text
stringlengths
14
5.77M
meta
dict
__index_level_0__
int64
0
9.97k
package com.impetus.kundera.property.accessor; import junit.framework.Assert; import org.junit.After; import org.junit.Before; import org.junit.Test; import com.impetus.kundera.property.PropertyAccessor; /** * @author amresh.singh * */ public class DoubleAccessorTest { PropertyAccessor<Double> accessor; /** * @throws java.lang.Exception */ @Before public void setUp() throws Exception { accessor = new DoubleAccessor(); } /** * @throws java.lang.Exception */ @After public void tearDown() throws Exception { accessor = null; } /** * Test method for {@link com.impetus.kundera.property.accessor.DoubleAccessor#fromBytes(java.lang.Class, byte[])}. */ @Test public void testFromBytes() { Assert.assertEquals(0.0, accessor.fromBytes(Double.class, null)); Double d1 = new Double(4.555); byte[] b = accessor.toBytes(d1); Double d2 = accessor.fromBytes(Double.class, b); Assert.assertEquals(d1, d2); } /** * Test method for {@link com.impetus.kundera.property.accessor.DoubleAccessor#toBytes(java.lang.Object)}. */ @Test public void testToBytes() { Assert.assertNull(accessor.toBytes(null)); Double d1 = new Double(4.555); byte[] b = accessor.toBytes(d1); Double d2 = accessor.fromBytes(Double.class, b); Assert.assertEquals(d1, d2); } /** * Test method for {@link com.impetus.kundera.property.accessor.DoubleAccessor#toString(java.lang.Object)}. */ @Test public void testToStringObject() { Assert.assertNull(accessor.toString(null)); Double d1 = new Double(4.555); String s1 = d1.toString(); String s2 = accessor.toString(d1); Assert.assertTrue(s1.equals(s2)); } /** * Test method for {@link com.impetus.kundera.property.accessor.DoubleAccessor#fromString(java.lang.Class, java.lang.String)}. */ @Test public void testFromString() { Assert.assertNull(accessor.fromString(Double.class, null)); Double d1 = new Double(4.555); String s = d1.toString(); Double d2 = accessor.fromString(Double.class, s); Assert.assertEquals(d1, d2); } /** * Test method for {@link com.impetus.kundera.property.accessor.DoubleAccessor#getCopy(java.lang.Object)}. */ @Test public void testGetCopy() { Double d1 = new Double(4.555); Double d2 = accessor.getCopy(d1); Assert.assertEquals(d1, d2); } /** * Test method for {@link com.impetus.kundera.property.accessor.DoubleAccessor#getInstance(java.lang.Class)}. */ @Test public void testGetInstance() { Object o = accessor.getInstance(Double.class); Assert.assertNotNull(o); Assert.assertEquals(Double.MAX_VALUE, (Double) o); } }
{ "redpajama_set_name": "RedPajamaGithub" }
568
Apple TV+ Subscribers Get Another Free Extension Till July 2021: Report Apple TV+ users may get another free extension of services. This means that subscribers might get another 9 months of free Apple TV+ according to some reports. The first extension happened in November 2020 Ubisoft Making New Open World Star Wars Game with LucasFilm Games Lucasfilm Games and Massive Entertainment have confirmed that the latest Star Wars game being developed by them will be open-world. However, they haven't given any release dates, yet. An open-world Star Wars game has been long awaited NewsLifestyle Headphones Might Become Obsolete As New Tech Rolls In Dia Majumder This may be the last year people buy headphones. 2019 will bring with it an audio technology that might make our favorite headphones obsolete. Noveto, an Israel based startup, promises to provide the sensation of sound right in your ears, without the need for an actual gadget in your ears. They use focused sound waves. The audio might be playing on a standalone device, which the company calls "Sowlo" that looks like a small Bluetooth speaker, or from any device that can play audio – a computer, TV, smartphone, tablet, or any other device with their technology built in. Focused sound waves sounds similar to "directional" audio, which means the audio is emitted directly in front of the device. But if you're not directly in front of the directional audio speaker, you'll not be able to hear it. Noveto's focused audio is different from directional audio. It's "steerable", according to Noveto co-founder and lead engineer Tomer Shani said in an interview with Business Insider. This means that focused audio can follow you around as you move, using 3D tracking technology to determine the position of your ears. "I need to know the position of your ears in space so I know where to build my sound bubbles," Shani told Business Insider, referring to himself as the technology. The other co-founder of Noveto is Noam Babayoff. Their CEO is now Brian Wallace, who has managed to raise funds for Noveto from some of the biggest names in the tech industry. At Magic Leap, a mixed reality company, about $1.4 million was raised. Noveto's technology also reminds us of VR. Although VR is an amazing aspect of technology, it hasn't enjoyed mainstream popularity, partly because of its high prices and also because of the technical requirements like powerful computers and a whole lot of cables. Adding to this is the fact that there's relatively less content available for VR. Noveto's viability has quite a few positive points, like being in the same price range as midrange headphones. The source of the audio also doesn't need to be modified in any way. Wallace believes that Noveto's technology can provide better sound quality than popular headphones in the future. "Are we going to replace a $10,000 home surround system? No. But can we replace 80% of what's out there? Yes, 100%" Wallace said. Currently the focus is on giving users private audio listening experience, without wearing anything on the head or ears, and being aware of the surroundings at the same time. The only major issue right now is what happens if you position yourself at exactly 90 degrees to the device. The sound reaches only one ear, the ear that is facing the device. The system can be positioned above the user, which might take care of this problem. The technology is expected to hit the markets in fall 2019. "The tech is real, ready, and everything set to go," Wallace said. Previous articleTop Ten Smartphones: Mid-2018 Edition! (The #1 Smartphone may surprise you!) Next articleThe Rise of PC Gaming and The Shrinking Lifespans of Consoles Avoid these things when you choose an online casino Playing casinos online is becoming even more popular than brick and mortar gambling. This is due to the convenience provided...
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
4,871
Q: Writing functions for a command line in C. Errors I'm writing a program in C that calls functions from the command line and it is coming up with errors all in the first line of code (the for statement) and I'm not sure why or what they are. It says "syntax error found, expecting ;" "syntax error found, expecting )" "undeclared indentifier i" and "illegal statement termination." int main(int argc, char *argv[]) { for(int i = 0; i < argc; i++ ) { if(0 == stricmp("ParameterA", argv[i])) { exec1 = TRUE; } else if(0 == stricmp("ParameterB", argv[i])) { exec2 = FALSE; } else if(0 == stricmp("ParameterC", argv[i])) { exec2 = TRUE; } else { fprintf(stderr, "Unknown parameter: %s", argv[i]); } } } A: In C variable should be declared before any executable code. you can change the code to int main(int argc, char *argv[]) { int i = 0; for(i = 0; i < argc; i++ ) ... A: In C you can't declare int i inside a FOR loop declare int i before the loop
{ "redpajama_set_name": "RedPajamaStackExchange" }
3,286
\section{Introduction} \subsection{Generating strong cutting planes through lifting} \textit{Lifting} is a technique that is used to derive or strengthen classes of cutting planes. It was first introduced to optimization in the context of mixed integer linear programming (MILP); see~\cite{richard2010lifting} for a review. The lifting process has two steps: \begin{itemize} \item \textit{Fixing} and generation of a \textit{seed inequality}: In the first step, the set $S$ of interest is restricted by fixing a subset of variables, say $x^F$, to specific values (typically to one of their bounds), say $\tilde{x}^F$. A valid inequality $h(x) \geq h_0$, which we call \textit{seed inequality}, is then generated for the restriction $S|_{x^F = \tilde{x}^F}$. \item \textit{Lifting} the seed inequality: The seed inequality $h(x) \geq h_0$, when viewed with ``zero coefficients" for the fixed variables $h(x) + 0\cdot x^F\geq h_0$ is typically not valid for the original set $S$. The task in the lifting step is to generate an inequality $h(x) + g(x^F) \geq h_0 + g_0$, which (i) is valid for $S$ and (ii) satisfies $g(\tilde{x}^F) = g_0$. Under condition (ii), inequality $h(x) + g(x^F) \geq h_0 + g_0$ reduces to inequality $h(x) \geq h_0$ when $x^F$ is set to $\tilde{x}^F$. The process of lifting is often accomplished by rotating or titling the seed inequality~\cite{espinoza2010lifting}. \end{itemize} Though condition (ii) is not strictly necessary to impose, we require it in the remainder of the paper as otherwise $h(x) + g(x^F) \geq h_0 + g_0$ is weak on the face $x^F = \tilde{x}^F$. Lifting, as a technique for generating cutting-planes in MILP, has been extensively researched. Originally devised for node packing and knapsack sets ~\cite{padberg1973facial,padberg1975note,balas1975facets,hammer1975facet,balas1978facets}, lifting was extended to general settings \cite{wolsey1976facets,wolsey1977valid,gu1999lifted,gu2000sequence,richard2003liftedbta,richard2003liftedsl,atamturk2004sequence} and used to derive families of valid inequalities for many sets including \cite{ceria1998cutting,martin1998intersection,atamturk2003facets,agra2007lifting,kaparis2008local,dey2009linear,zeng2007framework,zeng2011polyhedral,zeng2011polyhedrala,gomez2018submodularity}. Many of the classes of cutting planes that have yielded significant computational gains can be obtained through lifting. This includes \textit{lifted cover inequalities}~\cite{gu1999lifted}, lifted tableaux cuts~\cite{dey2009linear,narisetty2011lifted}, and even the \textit{Gomory mixed integer cut}~\cite{dey2010two}; see \cite{balas1980strengthening,richard2009valid,dey2010constrained,conforti2011geometric,basu2012unique,basu2013unique,basu2019nonunique,averkov2015lifting,basu2015operations,dey2010composite,basu2019nonunique} for papers related to lifting in the infinite group problem model. Similarly, \textit{mixing inequalities}~\cite{gunluk2001mixing} can be viewed as an outcome of lifting~\cite{dey2010composite}. Significantly fewer articles have focused on studying how lifting can be applied to nonlinear programs and mixed integer nonlinear programs. Exceptions include~\cite{richard2010liftingframework}, which develops a general theory for lifting linear inequalities in nonlinear programming, \cite{nguyen2018deriving} which applies lifting to derive the convex hull of a nonlinear set, \cite{gupte2012mixed} which studies lifting for the pooling problem, \cite{atamturk2011lifting} which uses lifting for conic integer programs, and \cite{chung2014lifted} which develops strong inequalities for mixed integer bilinear programs. \subsection{Goal of this paper} The goal of this paper is to derive new classes of valid convex inequalities for \textit{quadratically constrained quadratic programs} (QCQPs) through the technique of lifting. Generating valid inequalities for single row relaxations (together with bounds and integrality restrictions), \textit{i.e.}, for knapsack constraints, was the first, and arguably the most important step in the development of computationally useful cutting-planes in MILP. Motivated by this observation, various cutting-planes and convexification techniques for sets defined by a single non-convex quadratic constraint together with bounds have recently been investigated; see ~\cite{chung2014lifted,tawarmalani2010strong,anstreicher2020convex} for classes of valid inequalities for single constraint QCQPs and~\cite{dey2019new,santana2020convex} for convex hull results for such sets. The paper~\cite{rahman2019facets} studies a set similar to the one we study, albeit with integer variables. Further, \cite{dey2019new} demonstrates that cuts obtained from one-row relaxations of QCQPs can be useful computationally. The paradigm of intersection cuts has also been explored to generate cuts for single-constraint QCQPs~\cite{munoz2020maximal,bienstock2020outer}. Due to lack of space, we refrain from describing here the vast literature on convexification techniques for QCQPs and instead refer interested readers to~\cite{burer2015gentle,santana2020convex} and the references therein. In this paper, we investigate the lifting of a convex seed inequality for a feasible region defined by a single (non-convex) quadratic constraint together with bound constraints. Apart from \cite{atamturk2011lifting}, we are not aware of any paper that attempts to study or employ lifting of convex nonlinear inequalities. To the best of our knowledge, this is the first study that derives lifted valid inequalities for general non-convex quadratic constraints with arbitrary number of variables. An extended abstract of this paper was accepted for publication in IPCO 2021~\cite{GuIPCO2021}. \subsection{Main contributions} \begin{itemize} \item \textit{Can we always lift?} We present an example in two variables that illustrates that, even when a set is defined by a convex quadratic constraint, it might not always be possible to lift a linear seed inequality, valid for the restriction obtained by fixing a variable at lower bound, when we assume $g(\cdot) - g_0$ is an affine function of the fixed variable. Our main result, by contrast, establishes that there exists a large class of sets, described by a single \textit{bipartite bilinear} constraint~\cite{dey2019new} together with bounds, for which it is always possible to lift when variables are fixed at their bounds. Note that any quadratic constraint can be relaxed to a bipartite bilinear constraint. \item \textit{Sequence-independent lifting.} The lifting of a fixed variable requires the solution of a non-convex nonlinear optimization problem. When multiple variables must be lifted one at a time, this process (referred to as \textit{sequential lifting}) can be computationally prohibitive. Further, the form of the lifted inequality obtained will differ depending on the order in which variables are lifted. For MILPs, it was shown in \cite{wolsey1977valid} that when the so-called \textit{lifting function} is subadditive, lifting is far more computationally tractable in part because the form of the lifted inequality is independent of the order in which variables are lifted. We develop a similar general result for sequence-independent lifting of seed inequalities for \textit{separable bipartite bilinear} constraints. \item \textit{Bilinear covering set and bilinear cover inequality.} We next study a \textit{separable bipartite bilinear} set whose coefficients form a minimal cover with respect to the right-hand-side. For this set, we derive a \textit{bilinear cover inequality}. This \textit{second-order cone representable valid inequality} yields a constant-factor approximation of the convex hull of the original set. \item \textit{Sequence-independent lifting of bilinear cover inequality.} We construct a \textit{two-slope} subadditive upper bound of the lifting function corresponding to the bilinear cover inequality. This function is reminiscent of the two-slope subadditive functions studied in the context of cutting-planes for the infinite group relaxation~\cite{gomory1972some,richard2010group,koppe2015electronic}, although there is no apparent connection. Using this subadditive function, we lift fixed variable pairs in closed-form, thus describing a family of \textit{lifted bilinear cover inequalities}, which are valid for general separable bipartite bilinear constraints. \end{itemize} \paragraph{Notation and organization of the paper} Given a positive integer $n$, we denote the set $\{1, \dots, n\}$ by $[n]$. Given a set $S \subseteq \mathbb{R}^n$ and $\theta > 0$, we use $\theta\cdot S$ to denote the set $\{\theta x\, |\, x \in S\}$. We also use $\textup{conv}(S)$ to denote the convex hull of set $S$. The rest of the paper is organized as follows. In Section~\ref{sec:main} we present our main results. In Section~\ref{sec:future} we discuss some key directions for future research. Sections~\ref{section:existence}-\ref{section:lifted} give the proofs of the results described in Section~\ref{sec:main}. \section{Main results}\label{sec:main} Before we discuss our results, we first present two examples that illustrate how lifting can be performed for a set defined by a quadratic constraint and what challenges can arise during such procedure. \begin{example}\label{example:one} Consider the set $S:= \{\ (x_1, x_2, x_3) \in [0,1]^3 \ | \ x_1x_2 + 2x_1x_3 \geq 1\ \}.$ First, we fix $x_3 = 0$ to obtain the restriction $S|_{x_3 = 0}: =\{ (x_1, x_2) \in [0,1]^2 \, |\, x_1x_2 \geq 1\}$. The seed inequality $\sqrt{x_1x_2} \geq 1,$ is a valid convex inequality for $S|_{x_3 = 0}$. We next show how it can be lifted into a valid inequality for $S$. Observe that, although valid for $S|_{x_3 = 0}$, the seed inequality is not valid for $S$, since $(x_1, x_2, x_3) = (1,0,\sfrac{1}{2})$ violates it while belonging to $S$. We therefore must introduce variable $x_3$ into the seed inequality so as to make it valid. In particular we seek $\alpha \in \mathbb{R}$ for which \begin{eqnarray} \sqrt{x_1x_2} + \alpha x_3 \geq 1, \label{ex:liftedsimple} \end{eqnarray} is valid for $S$. This question can be answered by solving the problem \begin{equation}\label{example:lifting} \begin{split} \alpha^*:= \textup{sup}\ & \frac{ 1 - \sqrt{x_1x_2}}{x_3} \\ \textup{s.t.}\ & x_1x_2 + 2x_1x_3 \geq 1 ,\ x_3 \in (0, 1],\ (x_1, x_2) \in [0, 1]^2, \end{split} \end{equation} where a key challenge is to first ascertain that the supremum is finite. When $\alpha^*$ is finite, it is clear that choosing any $\alpha \ge \alpha^*$ in (\ref{ex:liftedsimple}) yields a valid inequality for $S$. Problem (\ref{example:lifting}) can be analyzed using the following facts: (1) for any fixed value of $x_3$, we can always assume that an extreme point is the optimal solution, as the objective is to maximize a convex function, and (2) the extreme points of the set where $x_3$ is fixed to a value within its bounds are well-understood~\cite{santana2020convex}. This suggests that one can inspect all different values of $x_3$ to establish that the supremum is finite. We illustrate these calculations next. Observe that $\alpha^*$ can be obtained by computing the supremum $\alpha_1^*$ of (\ref{example:lifting}) for $x_3 \in [\sfrac{1}{2},1]$ and then computing the supremum $\alpha_2^*$ of (\ref{example:lifting}) for $x_3 \in (0,\sfrac{1}{2}]$. When $x_3 \in [\sfrac{1}{2},1]$, one optimal solution is $x_1 = \frac{1}{2x_3}$ and $x_2 = 0$, thus $\alpha^*_1 = \textup{sup}_{x_3 \in [\sfrac{1}{2}, 1] }\frac{1}{x_3} = 2.$ When $x_3 \in (0,\sfrac{1}{2}]$, one optimal solution is $x_1 = 1$ and $x_2 = 1 - 2x_3$, thus $$ \alpha^*_2 = \sup_{x_3 \in (0, \sfrac{1}{2}] }\frac{1 - \sqrt{1 - 2x_3}}{x_3} = \sup_{x_3 \in (0, \sfrac{1}{2}] }\frac{2}{1 + \sqrt{1 - 2x_3}} = 2. $$ Choosing any $\alpha \ge \alpha^*=\max\{\alpha_1^*,\alpha_2^*\}=2$ yields a valid inequality for $S$. The strongest such valid inequality is $\sqrt{x_1x_2} + 2 x_3 \geq 1.$ \end{example} Example~\ref{example:one} might suggest that lifting can always be performed when seeking to derive a linear valid inequality. Example~\ref{example:nolifting} shows that it is not so. \begin{example}\label{example:nolifting} Consider the set $ S=\left\{ \ (x_1,x_2)\in[0,1]^2 \ \bigm| \ -x_1^2 - (x_2-0.5)^2\geq -0.5^2 \ \right\}.$ The inequality $-x_1 \geq 0$ is valid for the set $S|_{x_2=0}$ obtained from $S$ by fixing $x_2=0$. By setting up an optimization problem similar to (\ref{example:lifting}), it is easy to verify that there is no $\alpha\in \mathbb{R}$ for which $-x_1+\alpha x_2\geq 0$ is valid for $S$. \end{example} In Example~\ref{example:nolifting}, ($i$) set $S$ is convex, ($ii$) we are trying to lift a linear inequality, and ($iii$) $x_2$ is fixed to a bound. Even then, it is not possible to lift the seed inequality when we insist that lifting should be accomplished using an affine function of the fixed variable; see Example~\ref{ex:twocont} in Section~\ref{sec:future} for further discussion. \subsection{Sufficient conditions under which seed inequalities can be lifted} In Theorem~\ref{thm:existence}, we identify a large class of single row QCQPs where lifting can be accomplished using affine functions of the fixed variables. \begin{definition} \label{defn:bbs} A set $Q$ is a \textit{bipartite bilinear set}\footnote{We use the term bipartite, perhaps redundantly, to highlight that variables can be divided into two groups, such that any degree two term comes from product of variables one each from these two groups~\cite{dey2019new}.} if it is of the form $$ S = \left\{ \ (x,y) \in [0,1]^m \times [0,1]^n \ \Bigm| \ x^{\intercal} Q y + a ^{\intercal} x + b^{\intercal} y \geq c \ \right\}, $$ where $Q\in\mathbb{R}^{m\times n}$, $a\in \mathbb{R}^m$, $b\in \mathbb{R}^n$, and $c\in\mathbb{R}$. \end{definition} \begin{restatable}{theorem}{ThmExistence} \label{thm:existence} Let $S$ be a bipartite bilinear set. Given $C\times D\subset [m]\times[n]$ and {$\tilde{x}_i,\tilde{y}_j\in\{0,1\}$} for $i\in [m]\backslash C$, $j\in [n]\backslash D$, assume that inequality $ h(x_C, y_D) \geq r $ is valid for $\{ (x,y)\in S \ |\ x_{[m]\backslash C}=\tilde{x}_{[m]\backslash C}, \ y_{[n]\backslash D}=\tilde{y}_{[n]\backslash D} \}\neq\emptyset$, where $h$ is a concave function defined on {$[0,1]^{|C|+|D|}$}. Then, for any $k\in [m]\backslash C$, there exists a finite $f_k\in(-\infty,\infty)$ for which $h(x_C, y_D) + f_kx_k \geq r + f_k\tilde{x}_k$ is valid for {$\{ (x,y)\in S \ |\ x_{([m]\backslash C)\backslash\{k\}}=\tilde{x}_{([m]\backslash C)\backslash\{k\}}, \ y_{[n]\backslash D}=\tilde{y}_{[n]\backslash D} \}$}. \label{THM_EXISTENCE} \end{restatable} \begin{remark} The result of Theorem~\ref{THM_EXISTENCE} can be applied iteratively to all the fixed variables one at a time to obtain a valid inequality for $S$. Theorem~\ref{THM_EXISTENCE} holds even when the bounds on variables are not $[0,1]$, since we can always rescale and translate variables. \end{remark} The proof of Theorem~\ref{THM_EXISTENCE} is presented in Section~\ref{section:existence} and uses calculations similar to those presented in Example~\ref{example:one}. In particular, using a characterization of extreme points of the bipartite bilinear set $S$~\cite{dey2019new}, the proof reduces to establishing the result for three-variable problems where one of the variables is fixed. For a three-variable problem, a number of cases have to be analyzed to verify that the optimal value of an optimization problem similar to (\ref{example:lifting}) is finite. The proof can be turned into an algorithm to compute the lifting coefficients, although not necessarily an efficient or practical one. Theorem~\ref{THM_EXISTENCE} assumes that, when variables $x$ and $y$ are fixed, they are fixed at their bounds (either $0$ or $1$.) When this assumption is not imposed, we show next through an example that lifting may not be possible. \begin{restatable}{example}{EgNonexists} Consider the bipartite bilinear set $ S= \{ (x,y,\hat{x})\in[0,1]^3 | \left(x-\sfrac{1}{4}\right)\left(y-\sfrac{1}{2}\right)$ $\geq \sfrac{\hat{x}}{4}+\sfrac{1}{8} \}.$ First, we argue that the seed inequality $x \ge \sfrac{3}{4}$ is valid for the restriction of $S$ where $\hat{x}=\sfrac{1}{2}$. This is clear as $|y-\sfrac{1}{2}|\le \sfrac{1}{2}$ when $y \in [0,1]$ and $|x-\sfrac{1}{4}|<\sfrac{1}{2}$ when $x < \sfrac{3}{4}$. Next, we claim that there is no $\alpha\in \mathbb{R}$ such that $x+\alpha(\hat{x}-\sfrac{1}{2}) \geq \sfrac{3}{4}$ is valid for $S$. Assume by contradiction that $x+\alpha (\hat{x}-\sfrac{1}{2}) \ge \sfrac{3}{4}$ is valid for $S$ for some $\alpha \in \mathbb{R}$. Since $(x,y,\hat{x})=(0,0,0) \in S$, we must have $-\sfrac{\alpha}{2} \ge \sfrac{3}{4}$. Since $(x,y,\hat{x})=(1,1,1) \in S$, we must have $1+\sfrac{\alpha}{2} \ge \sfrac{3}{4}$. This is the desired contradiction as the former expression requires that $\alpha \le -\sfrac{3}{2}$ while the later requires that $\alpha \ge -\sfrac{1}{2}$. \end{restatable} \subsection{A framework for sequence-independent lifting} Given a set of variables fixed at their bounds and a seed inequality for the corresponding restriction, a valid inequality for the original problem can be obtained by lifting each fixed variable one at the time. This computationally demanding process requires the solution of a non-convex nonlinear optimization problem, similar to (\ref{example:lifting}), to lift each variable. It results in a lifted inequality whose form depends on the order in which variables are lifted. Next, we study situations where the lifting inequality obtained does not depend on the order in which variables are lifted. In particular, we develop a subadditive theory for lifting in QCQPs that is inspired by that originally developed in MILP in \cite{wolsey1977valid}. We consider the special case of separable bipartite bilinear constraints. \begin{restatable}{definition}{DefSeparable} \label{def:separable} A set $Q$ is a \textit{separable bipartite bilinear set} if it is of the form \begin{eqnarray*} Q:= \left\{ \ (x , y) \in [0, 1]^n \times [0, 1]^n \ \Bigm|\ \sum_{i = 1}^n a_i x_iy_i \geq d \ \right\}, \end{eqnarray*} for some $d$ and $a_i \in \mathbb{R}$ for $i \in [n]$, \textit{i.e.}, variables $x_i$ and $y_i$, for $i \in [n]$, appear in only one term. \end{restatable} In the separable case, it is natural to lift each pair of variables $x_i$ and $y_i$ together. Next, we derive conditions that guarantee that the form of the lifted inequality obtained is independent of the order in which these pairs are lifted. This result is obtained, as is common in MILP, by deriving a subadditive upper bound on the lifting function of the seed inequality, from which all lifting coefficients can be derived. \begin{restatable}{definition}{liftingfunction}\label{def:liftingfunction} Let $Q$ be a separable bipartite bilinear set. Assume that $\Lambda = \{I, J_0, J_1\}$ is a partition of $[n]$ (\textit{i.e.}, $I\cup J_0\cup J_1 = [n]$ with $I\cap J_0=I\cap J_1=J_0\cap J_1=\emptyset$) and that $h(x_I, y_I) \geq r$, is a valid inequality for $\{(x,y)\in Q \ |\ x_{J_0}=y_{J_0}=0,\ x_{J_1}=y_{J_1}=1\}$. For $\delta \in \mathbb{R}$, we define the lifting function of the seed inequality as $$ \textstyle \phi(\delta):= \max\left\{ \ r-h(x_I, y_I) \ \Bigm| \ \sum_{i\in I} a_ix_iy_i\geq \left(d-\sum_{i\in J_1}a_i\right) -\delta, \ (x_I,y_I) \in [0,1]^{2|I|} \ \right\}. $$ \end{restatable} Structured approximations of lifting functions allow for simple lifting of inequalities as described next in Proposition~\ref{lm:seqind}, whose proof can be found in Section~\ref{section:seqind}. \begin{restatable}{proposition}{LmSeqInd} \label{lm:seqind} Let $Q$ be a separable bipartite bilinear set and let $\Lambda = \{I, J_0, J_1\}$ be a partition of $[n]$. Let $\phi$ be the lifting function of seed inequality $h(x_I, y_I) \geq r$ for $\{(x,y)\in Q \ |\ x_{J_0}=y_{J_0}=0,\ x_{J_1}=y_{J_1}=1\}$ where $h$ is a concave function. Assume there exists $\psi: \mathbb{R} \mapsto \mathbb{R}$ and concave functions $\gamma_i:\mathbb{R}^2 \mapsto \mathbb{R}$ for $i \in J_0 \cup J_1$ such that \begin{enumerate}[label={(\roman*)},align=left] \item \label{item:seqind:1} $\psi(\delta)\geq \phi(\delta)$, $\forall \delta \in \mathbb{R}$; \item \label{item:seqind:2} $\psi$ subadditive, (\textit{i.e.}, $\psi(\delta_1)+\psi(\delta_2)\geq \psi(\delta_1+\delta_2)$, $\forall \delta_1,\delta_2\in\mathbb{R}$) with $\psi(0) = 0$; \item \label{item:seqind:3} for $i\in J_0$, $\gamma_i(x,y)\geq \psi(a_ixy), \forall (x,y)\in [0,1]^2,$ \item \label{item:seqind:4} for $i\in J_1$, $\gamma_i(x,y)\geq \psi(a_ixy-a_i), \forall (x,y)\in [0,1]^2.$ \end{enumerate} Then, the lifted inequality $h(x_I, y_I)+\sum_{i\in J_0\cup J_1} \gamma_i(x_i,y_i)\geq r$ is a valid convex inequality for $Q$. \end{restatable} The statement of Proposition~\ref{lm:seqind} does not specify the type of functional forms $\gamma_i(x_i,y_i)$ to use in ensuring that conditions \ref{item:seqind:3} and \ref{item:seqind:4} are satisfied. It is however clear from the definition that choosing $\gamma_i(x_i,y_i)$ to be the concave envelope of $\psi(a_ix_iy_i)$ over $[0,1]^2$ when $i \in J_0$, and the concave envelope of $\psi(a_ix_iy_i - a_i)$ over $[0,1]^2$ when $i \in J_1$ is the preferred choice for $\gamma_i$. \begin{remark} While we state the result of Proposition~\ref{lm:seqind} for a set $Q$ defined by a single separable bipartite bilinear constraint, a similar result would also hold for sets defined by multiple separable bipartite bilinear constraints. \end{remark} \subsection{A seed inequality from a minimal covering set} To generate lifted inequalities for separable bipartite bilinear sets, we focus next on a family of restrictions we refer to as \textit{minimal covering sets}. For such minimal covering sets, we introduce a provably strong convex, second-order cone representable valid inequality. We use this inequality as the seed in our lifting procedures. \begin{restatable}{definition}{DefMinimal} \label{def:minimal} Let $k \in \mathbb{Z}_{+}$ be a positive integer. We say that $a_i\in \mathbb{R}$ for $i \in [k]$ form a \textit{minimal cover} of $d\in \mathbb{R}$, if (i) $a_i > 0$ for all $i \in [k]$, $d >0$, (ii) $\sum_{i = 1}^k a_i > d$, (iii) $\sum_{i \in K} a_i \leq d$, $\forall K \subsetneq [k]$. For a separable bipartite bilinear set $Q$, we say that a partition $\Lambda = \{I, J_0, J_1\}$ of $[n]$, where $I\neq \emptyset$, is a \textit{minimal cover yielding partition} if: $a_i$ for $i \in I$ form a minimal cover of $d^\Lambda:= d - \sum_{i\in J_1} a_i$. For a minimal cover yielding partition, we let $J_0^+ := \{ i\in J_0\ |\ a_i>0 \}$, $J_0^- := \{ i\in J_0\ |\ a_i < 0 \}$; we define $J_1^+$ and $J_1^-$ similarly. \end{restatable} \begin{remark} When $k \geq 2$, conditions (ii) and (iii) in the definition of minimal cover imply condition (i) For example, if $a_i \leq 0$ for some $i\in [k]$, then (ii) implies $\sum_{j \in [k] \setminus \{i\}} a_j > d$, contradicting (iii). Now (iii) together with $a_i >0$ for $i \in [k]$ implies $d >0$. \end{remark} \begin{notation}\label{not:1} Assuming that $a_i$ for $i \in [n]$ form a minimal cover of $d$, we use (i) $\Delta:= \sum_{i=1}^n a_i - d$, (ii) $d_i:= d - \sum_{j\in [n]\backslash \{i\}} a_j$, (iii) $I^> := \{i \in [n] \ |\ a_i > \Delta\}$, (iv) when $I^> \neq \emptyset$, $i_0$ to be any index in $I^>$ such that $a_{i_0} = \min \{ a_i \ |\ i \in I^> \}$. \end{notation} For a minimal cover, conditions (ii) and (iii) in Definition~\ref{def:minimal} imply that $\Delta>0$ and $a_i \ge \Delta$ for all $i \in [n]$, respectively. Simple computations show that $d_i = a_i - \Delta$. Our overall plan is the following. We will fix $x_i = y_i=0$ for $i\in J_0$ and $x_iy_i=1$ for $i\in J_1$. Then, we will find a valid seed inequality for the set where the coefficients form a minimal cover. Finally, we will lift this seed inequality. One key reason to generate cuts from a seed inequality corresponding to a minimal cover is the following result. \begin{restatable}{theorem}{ThmMinimal} \label{thm:minimal} For a nonempty separable bilinear set $Q$, either there exists at least one minimal cover yielding partition or $\mathrm{conv}(Q)$ is polyhedral. \end{restatable} Loosely speaking, the proof of Theorem~\ref{thm:minimal}, which is given in Section~\ref{section:minimal}, is based on showing that if there is no minimal cover yielding partition, then $Q$ is ``almost" a packing-type set, \textit{i.e.}, a set of the form $\{ (x,y) \in [0, 1]^{2n} \ |\ \sum_{i = 1}^n a_ix_iy_i \leq d\}$ where $a_i$s are non-negative. For packing sets $Q$, \cite{richard2010liftingframework} shows that $\textup{conv}(Q)=\textup{proj}_{x,y}(G)$ where \begin{eqnarray*} G = \left\{(x,y,w) \in [0, 1]^{3n} \ \Bigm|\ \sum_{i = 1}^n a_iw_i \leq d, \ x_i + y_i -1 \leq w_i,\ \forall i \in [n]\right\}. \end{eqnarray*} We say ``almost", since there are non-packing sets such as $S:= \{ (x,y) \in [0, 1]^4\ |\ x_1y_1 - 100x_2y_2 \geq -98\}$, where there is no partition that yields a minimal cover. Such sets are ``overwhelmingly" like a packing set; in the case of the example, it is a perturbation of the packing set $\{ (x_2, y_2) \in [0, 1]^2\ |\ 100x_2y_2\leq 98\}$. For such sets it is not difficult to show that $\mathrm{conv}(S)$ is polyhedral. Since the main focus of this paper is the study of lifted convex (nonlinear) inequalities and since in the packing case the convex hull is trivially obtained using McCormick inequalities~\cite{mccormick1976computability}, the remainder of the paper will concentrate on the case where there exists a minimal cover yielding partition. Associated with a minimal cover is a convex valid inequality that we present next. \begin{restatable}{theorem}{ThmValid} \label{thm:valid} Consider the separable bipartite bilinear minimal covering set as presented in Definition~\ref{def:separable} where $a_i$, $i \in [n]$ form a minimal cover of $d$. Then, the \textit{bilinear cover inequality} is valid for $Q$: \begin{eqnarray} \label{eq:bilincoverineq} \sum_{i = 1}^n \frac{\sqrt{a_i}}{ \sqrt{a_i}-\sqrt{d_i}}\left( \sqrt{x_iy_i} - 1\right) \geq -1. \end{eqnarray} \end{restatable} Our proof of Theorem~\ref{thm:valid}, which is presented in Section~\ref{section:valid}, uses techniques from disjunctive programming~\cite{balas1998disjunctive} and an ``approximate version" of Fourier-Motzkin projection. In particular, using the minimal covering property of the coefficients and a characterization of the extreme points of bipartite bilinear sets~\cite{dey2019new}, we obtain $n$ second-order cone representable sets whose union contains all the extreme points separable bipartite bilinear set. Next we write an extended formulation~\cite{balas1998disjunctive,ben2001lectures} of the convex hull of the union of these sets. Finally, we use the Fourier-Motzkin procedure to project out the auxiliary variables of the extended formulation one at a time. This procedure works to project out most of the variables. The last step however requires a relaxation to be constructed so that projection can be carried in closed-form. Finally we obtain an inequality that is in fact stronger than (\ref{eq:bilincoverineq}). Inequality (\ref{eq:bilincoverineq}) can be viewed as a strengthening of an inequality presented in \cite{tawarmalani2010strong} for the set $Q^{\textup{relax}}$ obtained from $Q$ by relaxing upper bounds on variables, \textit{i.e.}, $Q^{\textup{relax}} := \{ (x,y) \in \mathbb{R}^{2n}_+ \ | \ \sum_{i =1}^n a_ix_iy_i \geq d \},$ where $a_i>0$ for $i \in [n]$ and $d>0$. The convex hull of $Q^{\textup{relax}}$ is shown in \cite{tawarmalani2010strong} to be described by nonnegativity constraints together with \begin{eqnarray}\label{eq:oldieq} \sum_{i = 1}^n \frac{\sqrt{a_i}}{\sqrt{d}}\sqrt{x_iy_i}\geq 1. \end{eqnarray} The ensuing proposition, whose proof we skip due to lack of space shows that (\ref{eq:bilincoverineq}) improves on (\ref{eq:oldieq}). It essentially proceeds by comparing the coefficients of variable pairs $\sqrt{x_iy_i}$ inside of the inequalities. Moreover, if $n\geq 2$ and there exists $i \in [n]$ such that $d_i > 0$, then (\ref{eq:bilincoverineq}) strictly dominates (\ref{eq:oldieq}). \begin{proposition}\label{prop:comparedtoCRT} Inequality (\ref{eq:oldieq}) is dominated by (\ref{eq:bilincoverineq}) over the $0-1$ box, \textit{i.e.}, $$\{\ (x,y) \in [0, 1]^{2n} \ |\ (\ref{eq:oldieq})\ \} \supseteq \{\ (x,y) \in [0, 1]^{2n} \ |\ (\ref{eq:bilincoverineq}) \ \}.$$ \end{proposition} Even though Proposition~\ref{prop:comparedtoCRT} hints at the strength of the bilinear cover inequality, it can be easily verified that (\ref{eq:bilincoverineq}) does not produce the convex hull of $Q$. However there are a number of reasons to use this inequality as a seed for lifting. The first reason is that, not only is inequality (\ref{eq:bilincoverineq}) second-order cone representable, we only need to introduce one extra variable representing $\sqrt{x_iy_i}$ for each $i \in [n]$, to write it as a second-order cone representable set. Apart from the convenience of using this inequality within modern conic solvers, the main reason for considering it as a seed inequality is its strength. In particular, we prove next that (\ref{eq:bilincoverineq}) provides a constant factor approximation of the convex hull of the original set. \begin{restatable}{theorem}{ThmStrength} \label{thm:strcovercon} Let $Q$ be a bipartite bilinear minimal covering set Let $R:= \{ (x,y) \in \mathbb{R}^{2n}_{+} \ |\ (\ref{eq:bilincoverineq})\}$. Then $ (4\cdot R) \cap [0, \ 1]^{2n} \subseteq \textup{conv}(Q) \subseteq R \cap [0, \ 1]^{2n}.$ \end{restatable} Since $R$ is a set of the covering type (that is, its recession cone is the non-negative orthant), we have that $4\cdot R \subseteq R$. The proof of Theorem~\ref{thm:strcovercon}, which is given in Section~\ref{section:strcovercon}, is based on optimizing linear functions with non-negative coefficients on $R$ and $Q$ and proving a bound of $4$ on the ratio of their optimal objective function values. \subsection{Lifting the bilinear cover inequality} We now follow the framework of Proposition~\ref{lm:seqind} to perform sequence-independent lifting of the bilinear cover inequality. The first step is to study the lifting function. \begin{restatable}{theorem}{ThmBound} \label{thm:upperbound} Let $\phi$ be the lifting function for valid inequality~(\ref{eq:bilincoverineq}). Define \begin{equation} \psi(\delta) := \left\{ \begin{array}{lrrll} l_+(\delta+\Delta) - 1\ & &\delta&\leq -\Delta \\ l_-\delta & -\Delta\leq & \delta & \leq 0 \\ l_+\delta & 0 \leq & \delta,& & \end{array}\right. \label{eq:upperbound} \end{equation} where $l_- = \frac{1}{\Delta}$ and where $l_+=\frac{\sqrt{a_{i_0}}+\sqrt{d_{i_0}}}{\Delta \sqrt{d_{i_0}}}$ if $a_{i_0}$ exists and $l_+=\frac{1}{\Delta}$ otherwise. Then \begin{enumerate}[label={(\roman*)},align=left] \item $l_+\geq l_-> 0$, \item $\psi(\delta)$ is subadditive over $\mathbb{R}$ with $\psi(0) = 0$, and \item $\phi(\delta) \le \psi(\delta)$ for $\delta \in \mathbb{R}$. \end{enumerate} \end{restatable} Although computing the lifting function for an arbitrary valid inequality, in general, appears to be a difficult task, the bilinear cover inequality (\ref{eq:bilincoverineq}) has sufficient structure that we can derive a strong subadditive upper bound in Theorem~\ref{thm:upperbound}. The key to proving Theorem~\ref{thm:upperbound}, as we show in Section~\ref{section:upperbound}, is to first obtain the lifting function exactly in a region around the origin, and to argue that the linear upper bound of the lifting function for this region upper bounds the lifting function globally. Figure~\ref{fig:sub} presents examples of the lifting function $\phi$, and the upper bound $\psi$ we derived in Theorem~\ref{thm:upperbound} for the cases when $a_{i_0}$ exists and for the case when it does not. \begin{figure} \centering \begin{subfigure}[b]{0.45\textwidth} \centering \includegraphics[width=\textwidth]{liftfunplot5_1n.pdf} \caption{$a_i=2$, $\Delta = 1$} \end{subfigure} \begin{subfigure}[b]{0.45\textwidth} \centering \includegraphics[width=\textwidth]{liftfunplot5_2n.pdf} \caption{$a_i=1$, $\Delta = 1$} \end{subfigure} \caption{Lifting function $\phi(\delta)$ in red and subadditive upper bound $\psi(\delta)$ in blue} \label{fig:sub} \end{figure} We observe in Figure~\ref{fig:sub} that the lifting function is not subadditive since it is convex in a neighborhood of the origin. Therefore, building a subadditive approximation is required to achieve sequence-independent lifting. Building on the subadditive upper bound obtained in Theorem~\ref{thm:upperbound}, we are now able to lift the bilinear cover inequality in a sequence-independent manner. \begin{restatable}{theorem}{ThmLifted} \label{thm:lifted} Consider the separable bipartite bilinear set presented in Definition~\ref{def:separable}. Let $\Lambda = \{I, J_0, J_1\}$ be a minimal cover yielding partition and let $\Delta, a_{i_0}, d_i, l_{+}, l_{-}$ be defined as in Theorems~\ref{thm:valid} and~\ref{thm:upperbound} (We clarify that they are calculated using $d^\Lambda$ instead of $d$). Let $J_0^+$, $J_0^-$, $J_1^+$, and, $J_1^-$ be as in Definition~\ref{def:minimal}. Then inequality \begin{eqnarray} \label{eq:liftedbilinearcoverinequality} \sum_{i \in I} \frac{\sqrt{a_i}}{ \sqrt{a_i}-\sqrt{d_i}}\left( \sqrt{x_iy_i} - 1\right) + \sum_{i\notin I} \gamma_i (x_i, y_i) \geq -1, \end{eqnarray} is valid for $Q$ where $\gamma_i: \mathbb{R}^2 \rightarrow \mathbb{R}$ for $i \in [n]\setminus I$ are the concave functions: \begin{enumerate}[label={(\roman*)},align=left] \item $\gamma_i(x,y) = l_+a_i\min\{x,y\}$ for $i\in J_0^+$; \item $\gamma_i(x,y) = -l_+a_i\min\{2-x-y, 1\}$ for $i\in J_1^-$; \item $\gamma_i(x,y) = \min\{l_-a_i(x+y-1), l_+a_i(x+y-1)+l_+\Delta-1, 0\}$ for $i\in J_0^-$; \item $\gamma_i(x,y) = \min\{\tilde{g}_i(x,y),\tilde{h}_i(x,y), g_i(x,y), h_i(x,y)\},$ for $i\in J_1^+$ with $a_i\geq a_{i_0}$ when $I^>\neq\emptyset$, and $\gamma_i(x,y) = \min\{\tilde{g}_i(x,y),\tilde{h}_i(x,y)\}$ in all other cases where $i\in J_1^+$, with \begin{align*} \tilde{g}_i(x,y)&=l_+a_i(\min\{x, y\}-1)+l_+\Delta-1\\ \tilde{h}_i(x,y)&=l_-a_i(\min\{x, y\}-1)\\ g_i(x,y)&= \sqrt{a_i - \Delta}\sqrt{a_i}l_+\sqrt{xy} -l_+(a_i-\Delta) -1 \\ h_i(x,y)&= \frac{\sqrt{a_i}}{\sqrt{a_i} - \sqrt{d_i}}(\sqrt{xy} - 1). \end{align*} \end{enumerate} \end{restatable} We refer to inequality (\ref{eq:liftedbilinearcoverinequality}) as \textit{lifted bilinear cover inequality}. {This inequality is second-order cone representable.} The proof of Theorem~\ref{thm:lifted} can be found in Section~\ref{section:lifted}. \section{Future directions}\label{sec:future} The results presented in this paper open up new avenues for generating cutting-planes for QCQPs. They also raise new theoretical and computational questions that can be investigated. To illustrate this assertion, we revisit Example~\ref{example:nolifting} next. \begin{example}\label{ex:twocont} Consider $S:= \{ (x_1,x_2) \in [0,1]^2 \,|\, - x_1^2 - (x_2-0.5)^2\geq -0.5^2\}$ with the same fixing as in Example~\ref{example:nolifting}, \textit{i.e.}, $x_2 =0$. For the associated restriction $S|_{x_2=0}$, consider the seed inequality $-x_1 \geq 0$. In contrast to our earlier discussion, consider now the problem of lifting this seed inequality into an inequality of the form $-x_1 + \alpha \sqrt{x_2} \geq 0$. Finding the values of $\alpha$ that generate a valid inequality is equivalent to solving the problem \begin{align*} \alpha^*:= \sup\ \left\{ \frac{x_1}{\sqrt{x_2}} \ \Bigm| \ -x_1^2 - (x_2-0.5)^2\geq -0.5^2, \ x_1 \in [0,1],\ x_2 \in (0, 1] \right\}. \end{align*} Using constraint $-x_1^2 - (x_2-0.5)^2\geq -0.5^2$ we can bound the objective function as: $ \frac{x_1}{\sqrt{x_2}} \leq \frac{\sqrt{0.5^2 - (x_2-0.5)^2}}{\sqrt{x_2}} = \frac{\sqrt{(1 - x_2)(x_2)}}{\sqrt{x_2}} = \sqrt{1 - x_2}. $ It follows that selecting $\alpha \ge \alpha^* = 1$ yields a valid inequality for $S$. Note first that $\alpha <0$ leads to an invalid inequality since $(x_1,x_2)=(0, 0.5)$ is a feasible point. Moreover, any $\alpha \in [0,1)$ yields an invalid inequality, since the point $(x_1,x_2)$ where $x_1 = \sqrt{x_2(1-x_2)}$ and $x_2 = 1-(\sfrac{(1+\alpha)}{2})^2$ is feasible. Therefore, the inequality $-x_1 + \sqrt{x_2} \geq 0$ is the strongest such lifted inequality. \end{example} The above example raises the question of obtaining a complete characterization of when one can accomplish lifting, \textit{i.e.}, of generalizing Theorem~\ref{THM_EXISTENCE} to situations where the functional form of the lifted variable is not necessarily linear. It would also be valuable to develop a theory to accomplish sequence-independent lifting in the more general case of bipartite bilinear programs, instead of just the separable case. On the computational side, one key question is to understand the complexity of separating the lifted bilinear cover inequality presented in Theorem~\ref{thm:lifted} and to design efficient computational schemes to perform separation. Finally, extensive numerical experiments should be conducted to understand the practical strength of these inequalities and to determine how useful they can be in the solution of QCQPs. Given the strength of the seed inequality, we are hopeful that these lifted inequalities could yield nontrivial dual bound improvements. \section{Proof of Theorem~\ref{thm:existence}} \label{section:existence} \begin{proof}[Theorem~\ref{thm:existence}] Without loss of generality, we assume that we lift a component of the variable $x$, say $x_k$ with $k\in [m]\backslash C$. In addition, we assume $\tilde{x}_k=0$; if not we may perform the operation $x_k\leftarrow 1-x_k$ and $f_k\leftarrow -f_k$. In order to find a lifting coefficient, We examine the following optimization problem \begin{eqnarray*} u_k(x_k):=\frac{1}{x_k}&\max &r - h(x_C, y_D)\\ &\mathrm{s.t.} & x^{\intercal} Q y + a^{\intercal} x + b^{\intercal} y \geq c,\\ && x_C, y_D\in [0,1],\ x_{[m]\backslash C\backslash\{k\}}=\tilde{x}_{[m]\backslash C\backslash\{k\}}, y_{[n]\backslash D}=\tilde{y}_{[n]\backslash D}. \end{eqnarray*} Now note that $u_k^* = \sup_{x_k\in(0,1]} u_k(x_k)$, assuming it exists, is a valid the coefficient for lifting, \textit{i.e.}, $ h(x_C, y_D) + u_k^* x_k \geq r $ is a valid lifted inequality. Any coefficient larger than $u_k^*$ is also valid for lifting. From the concavity of $h$ (\textit{i.e.}, convexity of $r-h$), for any specific $x_k$ the optimal solution must be an extreme point. According to~\cite{dey2019new}, all extreme points satisfy the following property: except one pair of $(x_i, y_j)$, all other $x_{i'}, y_{j'}$ pairs will be equal to either $0$ or $1$. Thus, for any pair of partitions $\{i\}\cup I_0\cup I_1 = C$ (denoted by $I$) and $\{j\}\cup J_0\cup J_1 = D$ (denoted by $J$), define \begin{eqnarray*} u_{I,J}(x_k):=\frac{1}{x_k}&\max &r - h(x_C, y_D)\\ &\mathrm{s.t.} & x^{\intercal} Q y + a^{\intercal} x + b^{\intercal} y \geq c,\\ && x_i,y_j\in [0,1],\ x_{I_0} = 0, x_{I_1} = 1, y_{J_0} = 0, y_{J_1} =1,\\ && x_{[m]\backslash C\backslash\{k\}}=\tilde{x}_{[m]\backslash C\backslash\{k\}}, y_{[n]\backslash D}=\tilde{y}_{[n]\backslash D}. \end{eqnarray*} We clearly have $u_k(x_k)=\max_{I,J} u_{I,J}(x_k)$. In addition, observe that $u_k^*=\max_{I,J} u_{I,J}^*$ where $u_{I,J}^* = \sup_{x_k\in(0,1]} u_{I,J}(x_k)$. Therefore in order to prove that $u_k^*<\infty$, it is sufficient to show that for any partition $I,J$, $u_{I,J}^*<\infty$. Therefore, we now focus on one instance of such partitions. { We define $\tilde{x} \in \mathbb{R}^m$ and $\tilde{y} \in \mathbb{R}^n$ as: $(\tilde{x}_{I,J})_{I_0\cup\{i\}} = 0$, $ (\tilde{x}_{I,J})_{I_1}=1$, $ (\tilde{y}_{I,J})_{J_0\cup\{j\}} = 0$, $(\tilde{y}_{I,J})_{J_1} = 1.$ In addition, define $r_{I,J} := r-p_{I_1}^{\intercal} \mathbf{1}_{I_1}-q_{J_1}^{\intercal} \mathbf{1}_{J_1}$, $c_{I,J}:= c-a^{\intercal} \tilde{x}_{I,J}-b^{\intercal} \tilde{y}_{I,J}-\tilde{x}_{I,J}^{\intercal} Q \tilde{y}_{I,J}$, $a_{I,J}:=a_i+ Q_{i, *} \tilde{y}_{I,J} $, $b_{I,J} := b_j + \tilde{x}_{I,J}^{\intercal} Q_{*, j}$, and $a_{I,J,k} := a_k + Q_{k, *}\tilde{y}_{I,J}$ so that we have equivalently} \begin{eqnarray*} u_{I,J}(x_k)=\frac{1}{x_k}&\max &r_{I,J} - h_{I,J}(x_i, y_j)\\ &\mathrm{s.t.} & q_{ij}x_iy_j + a_{I,J} x_i + b_{I,J}y_j + a_{I,J,k}x_k + q_{kj} x_k y_j \geq c_{I,J},\\ && (x_i,y_j) \in [0,1]^2, \end{eqnarray*} where $h_{I,J}$ is $h$ after the appropriate restriction. Note that $h_{I,J}$ is concave. As we are focusing on the pair of partitions $I,J$, for simplicity we rewrite the problem as \begin{eqnarray*} u(\hat{x}):=\frac{1}{\hat{x}}&\max_{x,y} &r - h(x, y)\\ &\mathrm{s.t.} & qxy + ax+by+\hat{a}\hat{x} + \hat{q} \hat{x}y \geq c,\ (x,y)\in[0,1]^2, \end{eqnarray*} and $u^*:=\sup_{\hat{x}\in(0,1]} u(\hat{x})$. It remains to prove $u^* < \infty$. For any $\epsilon \in (0,1]$ and $\hat{x}\in [\epsilon,1]$, we have \begin{eqnarray*} u(\hat{x})= &\max_{x,y} &\{ \ \frac{1}{\hat{x}}(r - h(x, y))\ \bigm| \ qxy + ax+by+\hat{a}\hat{x} + \hat{q} \hat{x}y \geq c, \ (x,y)\in[0,1]^2 \ \},\\ \leq &\max_{x,y} &\{ \ \frac{1}{\hat{x}}(r - h(x, y)) \ \bigm| \ (x,y)\in[0,1]^2 \ \} \\ \leq &\max_{x,y} &\left\{\ \max\{\frac{1}{\epsilon}(r - h(x, y)),(r - h(x, y))\}\ \bigm| \ (x,y)\in [0,1]^2 \ \right\}\\ :=& w < \infty. \end{eqnarray*} It is clear that $u(\hat{x})\leq w<\infty$ for any $\hat{x}\in [\epsilon,1]$. Therefore, to show that $u^*<\infty$, it is sufficient to show that $\limsup_{\hat{x}\downarrow 0}u(\hat{x})<\infty$. We define \begin{subequations}\label{sub*} \renewcommand{\theequation}{\arabic{parentequation}.\arabic{equation}} \begin{align} v(\hat{x}) = \max\ & r - h(x, y) \nonumber \\ \mathrm{s.t.}\ & x\geq 0, \label{sub1*} \\ & x\leq 1, \label{sub2*}\\ & y\geq 0, \label{sub3*}\\ & y\leq 1, \label{sub4*}\\ &qxy + ax+by+\hat{a}\hat{x} + \hat{q} \hat{x}y \geq c. \label{sub5*} \end{align} \end{subequations} Denote the feasible region of \eqref{sub*} as $S(\hat{x})$. Since $v(0) \leq 0$ {(because the seed inequality is assumed to be valid for the restriction)}, one can prove that $\limsup_{\hat{x}\downarrow 0}u(\hat{x}) <\infty$ by showing that there exists $l<\infty$ such that $$v(\hat{x})-v(0)\leq l \hat{x}+o(\hat{x}) \textup{ for }\hat{x}\downarrow 0. \footnote{This is equivalent to saying $\limsup_{\hat{x}\downarrow 0} \frac{v(\hat{x}) - v(0)}{\hat{x}} \leq l$ }$$ We denote the feasible region of the above problem as $S(\hat{x})$. For $i \in \{1,\ldots,5\}$, we define $v_i(\hat{x})$ to be the optimal value of \eqref{sub*} where constraint $(\ref{sub*}.i)$ is at to equality. We use $S_i(\hat{x})$ to denote the corresponding feasible region. For example, \begin{eqnarray*} v_a(\hat{x}) = &\max & \{ \ r - h(x, y) \ \bigm| \ (x,y)\in S_1(\hat{x}) \ \} \\ = &\max & \{ \ r - h(x, y) \ \bigm| \ x=0,\ (x,y) \in S(\hat{x}) \ \} \\ = &\max & \{ \ r - h(0, y) \ \bigm| \ by+\hat{a}\hat{x} + \hat{q} \hat{x}y \geq c,\ y\in[0,1] \ \}. \end{eqnarray*} Note that $v(\hat{x}) = \textup{max}_{i \in \{1, \dots, 5\}}\{v_i(\hat{x})\}$, since the objective function in computing $v(\hat{x})$ is maximizing a convex function, implying that there exists an optimal solution where at least one of the constraints \eqref{sub1*}-\eqref{sub5*} is active. Thus, to prove that $\limsup_{\hat{x}\downarrow 0}u(\hat{x}) <\infty$ it suffices to show that there exists $l < \infty$ such that \begin{eqnarray}\label{eq:toproveforthem1} v_i(\hat{x})-v(0)\leq l \hat{x}+o(\hat{x}) \textup{ for } \hat{x}\downarrow 0 \ \textup{ for all } i \in \{1, \dots, 5\}. \end{eqnarray} \paragraph{The case of $v_1, v_2, v_3, v_4$:} We present a proof of (\ref{eq:toproveforthem1}) for the case of $v_1$. The proof is similar for the cases of $v_2$, $v_3$, and $v_4$. First it is straightforward to verify that there exists a sufficiently small $\hat{x}_0>0$ such that for any $0<\hat{x}<\hat{x}_0$, we have one of the following two cases: \ref{whatwascase2} $S_1(\hat{x})\backslash S_1(0) = \emptyset$ (including the case $S_1(\hat{x}) = \emptyset$) and \ref{whatwascase1} $S_1(\hat{x})\backslash S_1(0)\neq \emptyset$, as, for sufficiently small $\hat{x}_0$, we may assume that it is impossible that $S_1(\hat{x})\neq\emptyset=S_1(0)$. \begin{enumerate}[label={(\roman*)}] \item \label{whatwascase2} We have $v_1(\hat{x})\leq v_1(0)\leq v(0)\leq 0$ (this holds even in the case when $S_1(\hat{x}) = \emptyset$ or $S_1(\hat{x}) = S_1(0) = \emptyset$), \textit{i.e.}, $v_1(\hat{x})-v(0)\leq 0\cdot \hat{x}$. \item \label{whatwascase1} We consider two sub-cases: \begin{enumerate}[label={(\alph*)}] \item if $b=0$, the feasibility of $\hat{x}=0$ yields $c\leq 0$. Thus, we have $S_1(0) = [0,1]\supseteq S_1(\hat{x})$ for $\hat{x}\in (0,\hat{x}_0)$ (actually in case \ref{whatwascase2}.). \item if $b\neq 0$, assume first that $b<0$. Then $S_1(\hat{x}) = \{y\in [0,1]: y \leq (c-\hat{a}\hat{x})/(b+\hat{q}\hat{x})\}$. We also denote $\Delta(\hat{x}):= (c-\hat{a}\hat{x})/(b+\hat{q}\hat{x}) - c/b$ and since $b < 0$, we have that $|\mathrm{d}\Delta(\hat{x})/\mathrm{d} \hat{x}| < \infty$ for $\hat{x}=0$. Since $S_1(\hat{x})\backslash S_1(0)\neq \emptyset$, we have $0 \leq c/b<1$ as well as $\Delta(\hat{x})\geq 0$. Utilizing the fact that $\Delta (\hat{x})\in[0,1-c/b]$ (the upper bound from the fact that $\hat{x}$ is assumed to be sufficiently small) and the concavity of $h$, we obtain \begin{equation} \label{eq:concavelinear} h\left(0,\frac{c}{b}+\Delta(\hat{x})\right)\geq \frac{\Delta(\hat{x})}{1-c/b} h(0,1)+\left(1-\frac{\Delta(\hat{x})}{1-c/b}\right) h\left(0, \frac{c}{b}\right). \end{equation} We now have \begin{eqnarray*} v_1(\hat{x})-v(0)&\leq& v_1(\hat{x})-v_1(0)\\ & \leq& \max\left\{(r - h(0, 0)) - ( r - h(0,0)),\right.\\ &&\left.(r - h(0, \Delta(\hat{x})+c/b) - (r - h(0, c/b) )\right\}\\ & \leq& \max\left\{0,\frac{h(0,c/b)-h(0,1)}{1-c/b} \Delta(\hat{x}) \right\}\\ & =& \max\left\{0,\left.\frac{h(0,c/b)-h(0,1)}{1-c/b}\frac{\mathrm{d}\Delta(\hat{x})}{\mathrm{d} \hat{x}}\right|_{\hat{x}= 0}\hat{x}+o(\hat{x})\right\},\ \ \ \ (\hat{x}\downarrow 0) \end{eqnarray*} where the second inequality comes from the fact that $v_1(\hat{x}) = \max \{ r- h(0,0), r - h(0, \Delta + c/b)\}$ and $v_1(0) = \max \{ r- h(0,0), r - h(0, c/b)\}$, and the third inequality follows from (\ref{eq:concavelinear}), and the last equality follows from Taylor's series expansion of $\frac{h(0,c/b)-h(0,1)}{1-c/b}\Delta(\hat{x})$ around $\hat{x} = 0$ . Thus, there exists $l_1<\infty$ such that $v_1(\hat{x})-v(0)\leq l_1 \hat{x}+o(\hat{x})$ for $\hat{x}\downarrow 0$. A similar argument holds for the case of $b>0$. \end{enumerate} \end{enumerate} \paragraph{The case of $v_5$:} If $q=0$, then it is easy to see that there always exists an optimal solution to the optimization problem corresponding to computing $v(\hat{x})$ such that one of \eqref{sub1*}-\eqref{sub4*} is active. Therefore if $q = 0$, it is sufficient to verify (\ref{eq:toproveforthem1}) for $v_1$, $v_2$, $v_3$, and $v_4$ as $v_5(\hat{x}) \leq \max \left\{v_1(\hat{x}), v_2(\hat{x}), v_3(\hat{x}), v_4(\hat{x})\right\}$. Therefore, we consider the case of $v_5$ for $q\neq 0$. Without loss of generality assume that $q>0$ or perform the transformation $x\leftarrow 1-x$. We in addition assume that $q=1$ or we can scale all parameters by $1/q$. The problem can now be rewritten as \begin{eqnarray*} v_5(\hat{x}) := &\max & r - h(x,y)\\ &\mathrm{s.t.} & (x+b+\hat{q}\hat{x})(y+a)= c+ab +(\hat{q}a-\hat{a})\hat{x},\ (x,y)\in[0,1]^2. \end{eqnarray*} We denote its feasible region by $S_5(\hat{x})$. The feasible region is the boundary of a hyperbola intersected with the $[0, 1]^2$ box. If both the connected components of the hyperbola intersect the $[0, 1]^2$ box, or $c+ab+(\hat{q}a-\hat{a})\hat{x}\leq 0$, then it is easy to see that there exists an optimal solution of the optimization problem corresponding to computation of $v(\hat{x})$ where at least one of \eqref{sub1*}-\eqref{sub4*} is active, \textit{i.e.}, $v_5(\hat{x}) \leq \max \left\{v_1(\hat{x}), v_2(\hat{x}), v_3(\hat{x}), v_4(\hat{x})\right\}$. So we can disregard this case as well and assume that only one of the connected components of the hyperbola is feasible, as well as $c+ab+(\hat{q}a-\hat{a})\hat{x}\geq 0$. Note again that if $S(0) = \emptyset$, then there exists $\hat{x}_0 > 0$ such that $S(\hat{x}) = \emptyset$ for all $0 \leq \hat{x} < \hat{x}_0$ and thus $v_5(\hat{x}) \leq v(\hat{x}) \leq 0$. Therefore we may assume that $S(0) \neq \emptyset.$ Let $h^M := \max\{\max_{(x,y)\in [0,1]^2} h(x,y), 0\}$ and $h^m = \min\{\min_{(x,y)\in [0,1]^2} h(x,y), 0\}$. \begin{enumerate}[label={(\roman*)}] \item We first consider the case when $c+ab = 0$. Feasibility of $S(0)$ requires $-b\in[0,1]$ or $-a\in[0,1]$. In addition, as only one part of the hyperbola is feasible for $S(\hat{x})$ for $\hat{x} >0$ (and sufficiently small), we obtain that either $-a\not\in (0,1)$ or $-b\not\in(0,1)$, and in addition $\hat{q}a-\hat{a}\geq 0$. \label{casei} \begin{enumerate}[label={(\alph*)}] \item \label{caseia} If $-a \in[0,1]$ and $-b\notin[0,1]$ (see Fig~\ref{fig:exist_1a}), \begin{figure} \centering \begin{subfigure}[b]{0.43\textwidth} \centering \includegraphics[width=\textwidth]{exist_1a.pdf} \caption{Case \ref{casei}\ref{caseia}} \label{fig:exist_1a} \end{subfigure} \begin{subfigure}[b]{0.43\textwidth} \centering \includegraphics[width=\textwidth]{exist_1d1.pdf} \caption{Case \ref{casei}\ref{caseid1}1} \label{fig:exist_1d1} \end{subfigure} \\ \begin{subfigure}[b]{0.43\textwidth} \centering \includegraphics[width=\textwidth]{exist_1e1.pdf} \caption{Case \ref{casei}\ref{caseie1}1} \label{fig:exist_1e1} \end{subfigure} \begin{subfigure}[b]{0.43\textwidth} \centering \includegraphics[width=\textwidth]{exist_2a1.pdf} \caption{Case \ref{caseii}\ref{caseiia}1} \label{fig:exist_2a1} \end{subfigure} \\ \begin{subfigure}[b]{0.43\textwidth} \centering \includegraphics[width=\textwidth]{exist_2a2.pdf} \caption{Case \ref{caseii}\ref{caseiia}2} \label{fig:exist_2a2} \end{subfigure} \begin{subfigure}[b]{0.43\textwidth} \centering \includegraphics[width=\textwidth]{exist_2a3.pdf} \caption{Case \ref{caseii}\ref{caseiia}3} \label{fig:exist_2a3} \end{subfigure} \\ \begin{subfigure}[b]{0.43\textwidth} \centering \includegraphics[width=\textwidth]{exist_2a4.pdf} \caption{Case \ref{caseii}\ref{caseiia}4} \label{fig:exist_2a4} \end{subfigure} \begin{subfigure}[b]{0.43\textwidth} \centering \includegraphics[width=\textwidth]{exist_2a5.pdf} \caption{Case \ref{caseii}\ref{caseiia}5} \label{fig:exist_2a5} \end{subfigure} \end{figure} as clearly $(x,-a)\in S(0)$, we have \begin{eqnarray*} && v_5(\hat{x})-v(0)\\ &\leq& \max\left\{\begin{array}{ll} \max & r - h(x, y)- v(0) \\ \mathrm{s.t.}& (x,y)\in S(0) \\ \textrm{ } \end{array},\right.\left.\begin{array}{ll} \max & (r - h(x, y)) - (r - h(x,-a) ) \\ \mathrm{s.t.} & xy + ax+by+\hat{a}\hat{x} + \hat{q} \hat{x}y = c\\ & (x,y)\in[0,1]^2\backslash S(0)\end{array} \right\}\\ &\leq& \max\left\{0,\ \begin{array}{ll}\max & h(x, -a) - h(x, y)\\ \mathrm{s.t.} & y = \frac{(\hat{q}a-\hat{a})\hat{x}}{x+b+\hat{q}\hat{x}}-a,\\ & (x,y)\in[0,1]^2\backslash S(0)\end{array} \right\}. \end{eqnarray*} Thus, depending on the sign of { $b$ (i.e. sign of $x+b+\hat{q}\hat{x}$ for sufficiently small $\hat{x}$)}, for $(x,y)\in[0,1]^2\backslash S(0)$ we have, using concavity of $h$, either $$ h(x, y) \geq \frac{\Delta(x, \hat{x})}{1+a} h(x, 1) + (1-\frac{\Delta(x, \hat{x})}{1+a})h(x, -a)\geq \frac{\Delta(x, \hat{x})}{1+a} (h^m-h^M) + h(x, -a) $$ or $$ h(x, y) \geq \frac{-\Delta(x, \hat{x})}{a} h(x, 0) + (1-\frac{-\Delta(x, \hat{x})}{a})h(x, -a)\geq \frac{-\Delta(x, \hat{x})}{a} (h^m-h^M) + h(x, -a) $$ where $ \Delta(x,\hat{x}) := \frac{(\hat{q}a-\hat{a})\hat{x}}{x+b+\hat{q}\hat{x}} $ is greater than $0$ in the first case and is less than $0$ in the second. From the continuity of $h$ we can get $\xi$ independent of $x$ and $\hat{x}$ such that $ h(x, y) \geq h(x, -a) + \xi\Delta(x, \hat{x}) . $ Therefore, we conclude that \begin{eqnarray*} v_5(\hat{x})-v(0) &\leq& \max\left\{0, \max_{x\in [0,1]}-\xi\Delta(x, \hat{x})\right\} =\max\left\{0, \max_{x\in\{0,1\}}-\xi\Delta(x, \hat{x})\right\}\\ &=& \max\left\{0, \max_{x\in\{0,1\}}\left.\frac{-\xi \partial \Delta(x, \hat{x})}{\partial \hat{x}}\right|_{\hat{x} =0}\hat{x}+o(\hat{x})\right\},\ \ \ \ (\hat{x}\downarrow 0) \end{eqnarray*} where the second equation comes from the monotonicity of $\Delta(x, \hat{x})$ on $x\in[0,1]$ for sufficiently small $\hat{x}$, due to the fact that $-b\notin[0,1]$. Since $x + b \neq 0$, we have that $\left|\max_{x\in\{0,1\}}\left.\frac{-\xi \partial \Delta(x, \hat{x})}{\partial \hat{x}}\right|_{\hat{x} =0}\right| < \infty$. Thus, there exists $l<\infty$ such that $v_5(\hat{x})-v(0)\leq l \hat{x}+o(\hat{x})$ for $\hat{x}\downarrow 0$. \item If $-b \in[0,1]$ and $-a\notin[0,1]$, a similar analysis can be conducted to obtain $l < \infty$ such that $v_5(\hat{x})-v(0)\leq l \hat{x}+o(\hat{x})$ for $\hat{x}\downarrow 0$. \item If $-a= -b=0$ or $-a=-b = 1$, then $S(0) = [0,1]^2$ so that $v_5(\hat{x})-v(0)\leq 0$. \item \textit{(subcase 1)} If $-a = 0$ with $-b\in (0,1]$ (see Fig~\ref{fig:exist_1d1}), we have $S(0)\supseteq [-b,1]\times [0, 1]$ and $S_5(\hat{x})\subset [-b-\hat{q}\hat{x},1]\times [0,1]$ (since $(\hat{q}a-\hat{a})\hat{x} \geq 0$). Therefore, for $\hat{q}\leq 0$, $v_5(\hat{x})-v(0)\leq 0$ and for $\hat{q}>0$, \begin{align*} &v_5(\hat{x})-v(0) \\ \leq &\max\left\{ \begin{array}{ll} \max & r - h(x, y)- v(0) \\ \mathrm{s.t.}& (x,y)\in S(0) \\ \textrm{ } \\ \end{array}\ , \ \begin{array}{ll} \max & (r - h(x, y)) - (r - h(-b,y) ) \\ \mathrm{s.t.} & xy + ax+by+\hat{a}\hat{x} + \hat{q} \hat{x}y = c\\ & (x,y)\in[0,1]^2\backslash S(0) \end{array} \right\}\\ \leq &\max\left\{0\ , \ \begin{array}{ll} \max & h(-b, y) - h(x, y) \\ \mathrm{s.t.} & (x,y)\in[-b-\hat{q}\hat{x},-b]\times [0,1] \end{array} \right\}. \end{align*} Note that for $x\in [-b-\hat{q}\hat{x},-b]$, using concavity of $h$, we have that \begin{align*} h(x,y) &\geq \frac{x}{-b} h(-b, y) + \frac{-b-x}{-b} h(0,y) \\ &\geq \frac{-b-x}{-b} (h^m-h^M) + h(-b,y) \geq \frac{\hat{q}\hat{x}}{-b} (h^m-h^M) + h(-b,y). \end{align*} Thus $ v_5(\hat{x})-v(0) \leq \max \left\{0, \frac{\hat{q}(h^m-h^M)}{b}\hat{x}\right\} $ and we obtain $l<\infty$ such that $v_5(\hat{x}) -v(0)\leq l\hat{x}$. \label{caseid1} \textit{(subcase 2)} If $-a = 1$ with $-b\in [0,1)$, this is the same as \ref{casei}\ref{caseid1}1 as we might perform $x\leftarrow 1-x$ together with $y\leftarrow 1-y$. \item \textit{(subcase 1)} If $-b = 0$ with $-a\in (0,1)$ (see Fig~\ref{fig:exist_1e1}), we have $S(0)\supseteq ([0,1]\times [-a,1]) \cup (\{0\}\times [0,1])$ and $S_5(\hat{x})\subset ([0,1]\times [-a,1]) \cup ([0, -\hat{q}\hat{x}]\times [0,1])$. For $\hat{q}\geq 0$ we have $S_5(\hat{x})\subset S(0)$ so $v_5(\hat{x})-v(0)\leq 0$. For $\hat{q}<0$, we have \begin{align*} & v_5(\hat{x})-v(0)\\ &\leq \max\left\{ \begin{array}{ll} \max & r - h(x, y)- v(0) \\ \mathrm{s.t.}& (x,y)\in S(0) \\ \textrm{ } \\ \end{array} \ , \ \begin{array}{ll} \max & (r - h(x, y)) - (r - h(0,y) ) \\ \mathrm{s.t.} & xy + ax+by+\hat{a}\hat{x} + \hat{q} \hat{x}y = c\\ & (x,y)\in[0,1]^2\backslash S(0)\end{array}\right\}\\ &\leq \max\left \{0 \ , \ \begin{array}{ll} \max & h(0, y) - h(x, y) \\ \mathrm{s.t.} & (x,y)\in[0,-\hat{q}\hat{x}]\times [0,1] \end{array} \right\}. \end{align*} For $(x,y)\in[0,-\hat{q}\hat{x}]\times [0,1]$, using concavity of $h$, we write \begin{eqnarray*} h(x,y)\geq& (1-x)h(0,y)+ xh(1,y) &\geq h(0,y) - x(h(0,y) - h(1,y)) \\ \geq& h(0,y) - x(h^M - h^m) &\geq h(0,y) + \hat{q}\hat{x}(h^M - h^m). \end{eqnarray*} Thus $ v_5(\hat{x})-v(0) \leq \max\left\{0, \hat{q} (h^m-h^M)\hat{x}\right\} $ and we get $l<\infty$ such that $v_5(\hat{x}) -v(0)\leq l\hat{x}$. \label{caseie1} \textit{(subcase 2)} If $-b=1$ with $-a\in (0,1)$, the argument is the same as for \ref{casei}\ref{caseie1}1 after performing $x\leftarrow 1-x$ and $y\leftarrow 1-y$. \end{enumerate} \item We next consider the case when $c+ab\neq 0$. As discussed above, we assume $c + ab + (\hat{q}a - \hat{a})\hat{x} \geq 0$ for all $\hat{x} >0$ and sufficiently small, and thus $c + ab >0$. In addition, if $(x,y)\in S(\hat{x})$ for $\hat{x} >0$ and sufficiently small, we have $x>-b-\hat{q}\hat{x}, y>-a$ or $x<-b-\hat{q}\hat{x}, y<-a$ but not both. \label{caseii} \begin{enumerate}[label={(\alph*)}] \item In the case $x<-b-\hat{q}\hat{x}$, $y<-a$ for $(x,y) \in S(\hat{x})$, we denote $S'(0)=S(0)\cap \{(x,y)\,|\,y<-a, x<-b\}\subseteq S(0)$. Since $(0, 0)\in S'(0)$, we may assume $(1, 1)\notin S'(0)$, or $S'(0)\supseteq[0,1]^2$. Then clearly $S(\hat{x}) \subseteq S(0)$ and therefore $v_5(\hat{x}) \leq v(0) \leq 0.$ \label{caseiia} \textit{(Subcase 1)} If $S'(0) = \{(0,0)\}$ (see Fig~\ref{fig:exist_2a1}), we have $c = 0$ and $a, b<0$. Thus, for $\hat{x}$, we obtain a curve between $ (0, -\frac{\hat{a}\hat{x}}{b+\hat{q}\hat{x}})$ and $(-\frac{\hat{a}\hat{x}}{a}, 0). $ Thus, for any $(x,y)$ within the curve, from concavity of $h$, it is clear that \begin{align*} h(x,y)\geq\ & \min\left\{h(0,0), h\left(0, -\frac{\hat{a}\hat{x}}{b+\hat{q}\hat{x}}\right), h\left(-\frac{\hat{a}\hat{x}}{a}, 0\right), h\left(-\frac{\hat{a}\hat{x}}{a}, -\frac{\hat{a}\hat{x}}{b+\hat{q}\hat{x}}\right)\right\}\\ \geq\ & h(0, 0) + \min\left\{0,-\frac{\hat{a}\hat{x}}{b+\hat{q}\hat{x}}(h(0,1)-h(0,0)), -\frac{\hat{a}\hat{x}}{a}(h(1,0)-h(0,0)),\right.\\ &\left. -\frac{\hat{a}\hat{x}}{b+\hat{q}\hat{x}}(h(0,1)-h(0,0))-\frac{\hat{a}\hat{x}}{a}(h(1,0)-h(0,0))\right\}\\ \geq\ & h(0,0)+\max\left\{0, -\frac{\hat{a}\hat{x}}{b+\hat{q}\hat{x}}, -\frac{\hat{a}\hat{x}}{a}, -\frac{\hat{a}\hat{x}}{b+\hat{q}\hat{x}}-\frac{\hat{a}\hat{x}}{a}\right\}(h^m-h^M)\\ =\ & h(0,0) + \xi\hat{x} + o(\hat{x}), \ \ \ \ (\hat{x}\downarrow 0) \end{align*} where the second inequality uses the concavity of $h$ and the fact that for sufficiently small $\hat{x}$, $-\frac{\hat{a}\hat{x}}{b+\hat{q}\hat{x}} + (-\frac{\hat{a}\hat{x}}{a}) \leq 1$, and $\xi$ is a constant obtained by taking the derivative of the ``max" term times $h^m - h^M$ (since $a,b< 0$, the term is differentiable.) Thus, by setting $l = \xi$ we have $v_5(\hat{x})-v(0)\leq l\hat{x} + o(\hat{x})$ for all sufficiently small $\hat{x}$. \textit{(Subcase 2)} If $S'(0)\backslash \{(0,0)\}\neq \emptyset$, but $(0,1),(1,0)\notin S'(0)$ (see Fig~\ref{fig:exist_2a2}). Then the curve for $\hat{x}=0$ is between $(0,\frac{c}{b})$ and $(\frac{c}{a}, 0)$. We find $\tilde{x}, \tilde{y}$ such that $(\tilde{x}, \frac{c}{2b}), (\frac{c}{2a}, \tilde{y})$ are within the curve. From the convex nature of one part of the hyperbola, it can be verified that $\tilde{y}>\sfrac{c}{2b}$, $\tilde{x}>\sfrac{c}{2a}.$ Consider $x\in [0, \tilde{x}]$ and denote $y'(x) = (c-ax)/(x+b)$, noting that $(x,y'(x))\in S(0)$ and \begin{eqnarray*} &&w_x(x, \hat{x}) := \max_y \{\ (r-h(x,y)) - v(0) \ | \ (x,y)\in S_5(\hat{x})\ \}, \\ &\leq&\max\left\{ \begin{array}{ll} \max_y & (r-h(x,y)) - v(0))\\ \mathrm{s.t.}& (x,y)\in S(0) \\ \textrm{ } \\ \end{array} \ , \ \begin{array}{ll} \max_y & (r- h(x, y)) - (r-h(x, y'))\\ \mathrm{s.t.}& xy + ax+by+\hat{a}\hat{x} + \hat{q} \hat{x}y = c,\\ & (x,y)\in[0,1]^2\backslash S(0) \end{array}\right\}\\ &= & \left\{ \begin{array}{lc} \max\{0, h(x, y'(x)) - h(x, y'(x) +\Delta(x, \hat{x}))\}\ & \Delta(x, \hat{x}) \geq 0\\ 0 & \Delta(x, \hat{x})<0 \end{array} \right. \end{eqnarray*} where $$ \Delta(x, \hat{x}) := \frac{c-ax-\hat{a}\hat{x}}{x+b+\hat{q}\hat{x}}-\frac{c-ax}{x+b}=\frac{ax\hat{q} - \hat{a}x - \hat{a}b - c\hat{q}}{(x+b+\hat{q}\hat{x})(x+b)}\hat{x}, $$ while noting that if $\Delta(x, \hat{x}) <0$, then $(x, y'(x) + \Delta(x, \hat{x}))\in S(0)$. Note that $[0,\tilde{x}]\subseteq \textup{Proj}_x S'(0)$ and thus $x + b < 0$ for $x \in [0, \tilde{x}]$ and therefore for $0<\hat{x}<\hat{x}_0$ sufficiently small, we obtain $ \frac{\hat{q}\hat{x}}{x+b}\geq -\frac{1}{2}$ or $2\frac{x+b+\hat{q}\hat{x}}{x+b}\geq 1 $ for any $x\in [0,\tilde{x}]$. Thus for $\Delta(x, \hat{x}) \geq 0 $, $$ \Delta(x,\hat{x})\leq 2\frac{ax\hat{q} - \hat{a}x - \hat{a}b - c\hat{q}}{(x+b)^2}\hat{x}\leq \max_{x\in[0,\tilde{x}]}\left\{2\frac{ax\hat{q} - \hat{a}x - \hat{a}b - c\hat{q}}{(x+b)^2}\right\} \hat{x}:=l'_{5,x}\hat{x} $$ while noting that continuity gives $l'_{5,x}<\infty$ independent of $x$, $\hat{x}$. Now for the case of $\Delta(x, \hat{x}) \geq 0 $ we have that \begin{eqnarray*} &&h(x,y'(x)) - h(x,y'(x)+\Delta(x, \hat{x})) \\ &\leq & \frac{\Delta(x, \hat{x})}{1-y'(x)}(h(x, y'(x))-h(x, 1)) \leq \frac{\Delta(x, \hat{x})}{1-y'(x)}(h^M-h^m)\\ &\leq & (h^M-h^m)\max_{x\in[0,\tilde{x}]}\left\{\frac{1}{1-y'(x)}\right\} \max_{x\in[0,\tilde{x}]}\Delta(x, \hat{x})\\ &\leq& (h^M-h^m)\max_{x\in[0,\tilde{x}]}\left\{\frac{1}{1-y'(x)}\right\} l'_{5,x}\hat{x}, \end{eqnarray*} Therefore, there exists $l_{5,x}<\infty$ independent of $x, \hat{x}$ such that $w_x(x, \hat{x}) \leq l_{5,x}\hat{x}+o(\hat{x})$. A similar analysis of $ w_y(y, \hat{x}) := \max_x \{r-h(x,y) - v(0)|x,y\in S(0) \} $ provides $ w_y(y,\hat{x})\leq l_{5,y}\hat{x}+o(\hat{x})$ for $\hat{x}\downarrow 0 $, where $l_{5,y}$ is a constant independent of $y\in[0,\tilde{y}]$ and $\hat{x}$. Finally, we combine the results for $w_x$ and $w_y$. Since they cover the whole curve with overlapping, we have $v_5(\hat{x})-v(0)\leq \max\{l_{5,x}, l_{5,y}\}\hat{x}+o(\hat{x})$ for $\hat{x}\downarrow 0$. \textit{(Subcase 3)} If $(0,1)\in S'(0)$ but $(1,0)\notin S'(0)$ (see Fig~\ref{fig:exist_2a3}), we apply a similar analysis for $w_y(y, \hat{x})$ with $y \in [0,1]$ and obtain a constant $l_{5,y}$ independent of $y$ and $\hat{x}$. We thus get $v_5(\hat{x})-v(0)\leq l_{5,y}\hat{x}+o(\hat{x})$ for $\hat{x}\downarrow 0$. \textit{(Subcase 4)} If $(1,0)\in S'(0)$ but $(0,1)\notin S'(0)$ (see Fig~\ref{fig:exist_2a4}), we apply a similar analysis for $w_x(x, \hat{x})$ with $x \in [0,1]$ and similarly obtain a constant $l_{5,x}$ independent of $x$ and $\hat{x}$. \textit{(Subcase 5)} If $(1,0),(0,1)\in S'(0)$ (see Fig~\ref{fig:exist_2a5}). Then the curve for $\hat{x}=0$ is between $(1, \frac{c-a}{1+b})$ and $(\frac{c-b}{1+a},1)$. Similar to \ref{caseii}\ref{caseiia}2, we find $\tilde{x},\tilde{y}$ such that $(\tilde{x}, \frac{c-a+b+1}{2+2b})$, $(\frac{c-b+a+1}{2+2a}, \tilde{y})$ are within the curve. From the convex nature of one part of the hyperbola, we have $ \tilde{y}>\frac{c-b+a+1}{2+2a}$ and $ \tilde{x}>\frac{c-a+b+1}{2+2b}. $ Similar to \ref{caseii}\ref{caseiia}2, we consider $w_x(x, \hat{x})$ for $x\in [\frac{c-a+b+1}{2+2b}, 1]$ and $w_y(y, \hat{x})$ for $y\in [\frac{c-b+a+1}{2+2a}, 1]$. We obtain $l_{5,x}$ and $l_{5,y}$. Therefore, we write $v_5(\hat{x})-v(0)\leq \max\{l_{5,x},l_{5,y}\}\hat{x}+o(\hat{x})$ for $\hat{x}\downarrow 0$. \item If for $\hat{x}\in (0,\hat{x}_0)$, and for $(x,y) \in S(\hat{x})$ $x>-b-\hat{q}\hat{x}, y>-a$, we denote $S'(0)=S(0)\cap \{y>-a, x>-b\}\subseteq S(0)$. If $(1,1)\in S'(0)$ while $(0,0)\notin S'(0)$, the proof is the same as \ref{caseii}\ref{caseiia} as we can perform $x\leftarrow 1-x$ with $y\leftarrow 1-y$. \end{enumerate} Combining the discussions for \ref{casei} and \ref{caseii} shows that there exists $l_5<\infty$ such that $v_5(\hat{x})-v(0)\leq l_5\hat{x}+o(\hat{x})$ for $\hat{x}\downarrow 0$. \qed \end{enumerate} \end{proof} \section{Proof of Proposition~\ref{lm:seqind}} \label{section:seqind} \begin{proof}[Proposition~\ref{lm:seqind}] Consider any feasible solution $(x',y')\in Q$. Then \begin{align*} & \sum_{i\in J_0\cup J_1} \gamma_i (x'_i, y'_i) \geq\ \sum_{i\in J_0} \psi(a_ix'_iy'_i)+\sum_{i\in J_1} \psi(a_ix'_iy'_i-a_i)\\ \geq\ & \psi\left(\sum_{i\in J_0}a_ix'_iy'_i + \sum_{i\in J_1}(a_ix'_iy'_i-a_i)\right) \geq\ \phi\left(\sum_{i\in J_0}a_ix'_iy'_i + \sum_{i\in J_1}a_ix'_iy'_i-\sum_{i\in J_1}a_i\right)\\ =\ & \max_{(x_I,y_I)\in[0,1]^{2|I|}}\left\{r-h(x_I,y_I)\ \,\Bigm|\,\sum_{i\in I} a_ix_iy_i\geq d-\sum_{i\in J_0\cup J_1}a_ix'_iy'_i \right\} \geq\ r-h(x'_I, y'_I), \end{align*} where the first inequality holds because of assumptions~\ref{item:seqind:3} and \ref{item:seqind:4}, the second inequality holds because assumption~\ref{item:seqind:2} requires $\psi(\cdot)$ to be subadditive over its range, the third inequality holds because assumption~\ref{item:seqind:1} requires $\psi(\cdot)$ to be an upper bound on $\phi(\cdot)$, the equality holds from the definition of $\phi(\cdot)$, and the last inequality is satisfied because $(x_I',y_I')$ is a feasible solution to the preceding optimization problem. \qed \end{proof} \section{Proof of Theorem~\ref{thm:minimal}} \label{section:minimal} \begin{proof}[Theorem~\ref{thm:minimal}] Suppose $Q\neq\emptyset$. It follows from \cite{dey2019new} that the extreme points of $Q$ are such that $(x^*_j,y^*_j)\in\{ 0,1\}^2$ for all $j\in [n]\backslash\{i\}$ for some $i \in [n]$. Assume first that $Q$ has an extreme point $(x^*,y^*)$ where $x^*_iy^*_i\notin \{0,1\}$ for some $i \in [n]$ with $a_i>0$. Define the partition $\Lambda$ with $I=\{i\}$, $J_0 = \{j|x^*_jy^*_j = 0\}$, and $J_1 = \{j|x^*_jy^*_j = 1\}$. Since $\sum_{j=1}^n a_jx^*_jy^*_j = d$, we have $a_i>a_ix^*_iy^*_i = d-\sum_{i\in J_1}a_j = d^\Lambda>0$. Since $d^\Lambda>0$, we conclude that $\Lambda$ is a minimal cover yielding partition. Assume second that all extreme points $(x^*,y^*)$ are such that $(x^*_i, y^*_i)\in\{0,1\}^2$ for all $i \in [n]$ with $a_i>0$. Denote $I_+ = \{i \in [n] \,|\, a_i>0\}$ and, for $K_1, K_2\subseteq I_+$, define $$ Q_{K_1, K_2}:=\mathrm{conv} \left\{\ (x,y)\in[0,1]^{2n} \ \left|\ \begin{array}{l} \ \sum_{i=1}^n a_ix_iy_i\geq d\\ \ x_i = 0,\ i\in K_1\\ \ y_i = 0,\ i\in K_2\\ \ x_i = 1,\ i\in I_+\backslash K_1\\ \ y_i = 1,\ i\in I_+\backslash K_2 \end{array} \ \right.\right\}. $$ It is clear that $\mathrm{conv}(Q) = \mathrm{conv}(\bigcup_{K_1, K_2\subseteq I_+} Q_{K_1, K_2})$. Because, for any $K_1, K_2 \subseteq I_+$, $Q_{K_1, K_2}$ is a polytope~\cite[Proposition 17]{richard2010liftingframework}, we conclude that $\mathrm{conv}(Q)$ is a polytope. \qed \end{proof} \section{Proof of Theorem~\ref{thm:valid}} \label{section:valid} \begin{proof}[Theorem~\ref{thm:valid}] For $i \in [n]$, define \begin{eqnarray*} Q_i = \left\{ \ (x,y) \in [0,1]^{2n} \ \Biggm| \ \begin{array}{l} (x_j,y_j)=(1,1), \,\forall j \in [n]\backslash i \\ \sqrt{a_i} \sqrt{x_iy_i} \ge \sqrt{d_i} \end{array} \ \right\}. \end{eqnarray*} First observe that, because $a_i$ for $i \in [n]$ form a minimal cover, we have that $a_i>d_i:=d-\sum_{j \neq i} a_j$ for each $i$. This implies that sets $Q_i$ are nonempty. We next argue that $\mathrm{conv}(Q)=\mathrm{conv}(\bar{Q})$ where $\bar{Q}:=\bigcup_{i=1}^n Q_i$. To this end, consider any extreme point $(x,y)$ of $Q$. Then, \cite{dey2019new} shows that there exists a partition $(I_0,I_1,\{i\})$ of $[n]$ such that $x_jy_j=0$ for $j \in I_0$, $x_jy_j=1$ for $j \in I_1$ and $x_iy_i \in [0,1]$. Because $a_i$ for $i \in [n]$ form a minimal cover, it must be that $|I_0|=0$ as otherwise $\sum_{j=1}^n a_jx_jy_j \le \sum_{j \in I_1} a_j < d$. We conclude that $(x,y) \in Q_i$. Since $Q$ is compact, it follows that $\mathrm{conv}(Q) \subseteq \mathrm{conv}(\bar{Q})$. Further, since $Q_i \subseteq \bar{Q} \subseteq Q$, it is clear that $\mathrm{conv}(\bar{Q}) \subseteq \mathrm{conv}(Q)$. We now use disjunctive programming to obtain an extended formulation of $\mathrm{conv}(\bar{Q})$. This formulation introduces convex multipliers $\lambda_i$ and copies $(x^i,y^i)$ of variables $(x,y)$ for each disjunct $Q_i$. Because disjunct $Q_i$ yields constraints $y^i_j=x^i_j=\lambda_i$ for $j \neq i$, variables $x^i_j$ and $y^i_j$ for $j \neq i$ can be eliminated from the formulation in favor of $\lambda_i$. Renaming variables $x^i_i$ as $\hat{x}_i$, we obtain \begin{eqnarray*} \begin{array}{rcll} x_j &=& \hat{x}_j + \sum_{i \neq j} \lambda_i &\ \forall j \in [n] \\ y_j &=& \hat{y}_j + \sum_{i \neq j} \lambda_i &\ \forall j \in [n] \\ \sqrt{a_i}\sqrt{\hat{x}_i \hat{y}_i } &\geq& \sqrt{d_i} \lambda_i &\ \forall i \in [n]\\ \lambda_i &\geq& \hat{x}_i,\hat{y}_i \geq 0 &\ \forall i \in [n]\\ \sum_{i = 1}^n \lambda_i &=& 1&\\ \end{array} \end{eqnarray*} because the constraint functions of each $Q_i$ are positively homogeneous. Using the fact that $\sum_{i \neq j} \lambda_i = 1 - \lambda_ j$, we obtain $\hat{x}_j = x_j -(1-\lambda_j)$ and $\hat{y}_j = y_j -(1-\lambda_j)$. Eliminating these variables from the formulation, we obtain \begin{eqnarray}\label{eq:nomoreFM} \begin{array}{rcll} \sqrt{a_i}\sqrt{(x_i - (1 - \lambda_i))\cdot (y_i - (1 - \lambda_i)) } &\geq& \sqrt{d_i} \lambda_i &\ \forall i \in [n]\\ 1-\lambda_i \le x_i,y_i &\leq &1 &\ \forall i \in [n]\\ \sum_{i = 1}^n \lambda_i &=& 1.&\\ \end{array} \end{eqnarray} Because projecting variables $\lambda_i$ from the above formulation seems difficult, we relax the above set by using, for each $i \in [n]$ the following inequality \begin{eqnarray} \sqrt{a_i} \left( \sqrt{x_i y_i} - (1 - \lambda_i) \right) \geq \sqrt{a_i} \sqrt{(x_i - (1 - \lambda_i))\cdot (y_i - (1 - \lambda_i)) }, \label{eq:ag} \end{eqnarray} which holds as $\left(\sqrt{x_iy_i}-(1-\lambda_i)\right)^2 \ge x_iy_i - (x_i+y_i)(1-\lambda_i) + (1-\lambda_i)^2 = (x_i-(1-\lambda_i))(y_i-(1-\lambda_i))$ where the first inequality is obtained by expanding the square and using the arithmetic-geometry mean inequality $-2\sqrt{x_iy_i} \ge -(x_i+y_i)$. Substituting (\ref{eq:ag}) in (\ref{eq:nomoreFM}), we obtain: \begin{eqnarray} \begin{array}{rcll} \lambda_i &\geq & \frac{\sqrt{a_i}}{\sqrt{a_i} - \sqrt{d_i}}\left( 1 - \sqrt{x_iy_i}\right)&\ \forall i \in [n]\\ \lambda_i &\geq& 1 - x_i &\ \forall i \in [n]\\ \lambda_i &\geq& 1 - y_i &\ \forall i \in [n]\\ x_i,y_i &\leq &1 &\ \forall i \in [n]\\ \sum_{i = 1}^n \lambda_i &=& 1.&\\ \end{array} \end{eqnarray} Using Fourier-Motzkin to project variables $\lambda_i$, we obtain $(x, y) \in [0, 1]^{2n}$ together with \begin{eqnarray*} \sum_{i = 1}^n \textup{max}\left\{ \frac{\sqrt{a_i}}{\sqrt{a_i} - \sqrt{d_i}}\left( 1 - \sqrt{x_iy_i}\right), 1 - x_i, 1 - y_i \right\} \leq 1, \end{eqnarray*} which is a convex inequality. Retaining only the first term in the maximum for each pair $(x_i,y_i)$ and multiplying through by $-1$ yields the weaker convex inequality (\ref{eq:bilincoverineq}). \qed \end{proof} \section{Proof of Theorem~\ref{thm:strcovercon} } \label{section:strcovercon} In this section, we provide a proof of Theorem~\ref{thm:strcovercon}. We say that $G\in\mathbb{R}^n$ is a \textit{set of the covering type} if whenever $\hat{x}\in G$, then $\tilde{x}\in G$ for all $\tilde{x} \in \mathbb{R}^n$ such that $\tilde{x}\geq \hat{x}$. Due to lack of space we skip the proof of the next proposition; see~\cite{bodur2018aggregation} for a similar result. \begin{proposition} \label{prop:eqposcone} Let $B=[0,1]^n$ and let $G$ and $H$ be sets of the covering type, such that $\mathrm{conv}(G\cap B)\subseteq H$. If there exists $\theta\geq 1$, such that for any $c \geq 0$, $ z^l\leq z^*\leq \theta z^l$, where $z^*:=\min\{c^{\intercal} x|x\in G\cap B\}$ and $z^l:=\min\{c^{\intercal} x|x\in H\cap B\}$, then $ (\theta H) \cap B \subseteq \mathrm{conv}(G\cap B).$ \end{proposition} Following Proposition~\ref{prop:eqposcone}, Theorem~\ref{thm:strcovercon} will be proven if, for all $(p, q) \in \mathbb{R}^{2n}_+$, \begin{align*} z^*&:=\min\left\{\ \sum_{i=1}^n (p_ix_i+q_iy_i) \ \Bigm| \ \sum_{i=1}^n a_ix_iy_i\geq d,\ (x,y) \in[0,1]^{2n} \ \right\}\\ z^l&:=\min\left\{\ \sum_{i=1}^n (p_ix_i+q_iy_i) \ \Bigm| \ \sum_{i=1}^n \frac{\sqrt{a_i}}{\sqrt{a_i}-\sqrt{d_i}}(\sqrt{x_iy_i}-1)\geq -1,\ (x,y)\in[0,1]^{2n} \ \right\} \end{align*} satisfy $z^l\leq z^*\leq 4z^l$. To this end, we prove first four ancillary results in Lemmas~\ref{lem:thetacompute}-\ref{lem:rangealpha}. \begin{assumption}\label{assp:1} $p_i \geq q_i$, $\forall i \in [n]$. \end{assumption} Assumption~\ref{assp:1} is without loss of generality as it can always be achieved by renaming variables $x_i$ as $y_i$, if necessary. \begin{lemma}\label{lem:thetacompute} For $\alpha \in [0, 1]$ and $i \in [n]$, define \begin{eqnarray*} \theta_i(\alpha) = &\textup{min} \left\{ p_i x_i + q_i y_i \Bigm| \begin{array}{l} \sqrt{x_i y_i } = \alpha\\ (x_i, y_i) \in [0,1]^2 \\ \end{array} \right\}. \end{eqnarray*} Then, $\theta_i(\alpha)=0$ when $p_i=0$. Further, when $p_i>0$, \begin{eqnarray*} \theta_i(\alpha) = \left\{\begin{array}{lcl} 2\sqrt{p_iq_i}\cdot\alpha\ &\textup{when}& \alpha \leq \sqrt{\frac{q_i}{p_i}}\\ p_i\cdot\alpha^2 + q_i\ &\textup{when}& \alpha \geq \sqrt{\frac{q_i}{p_i}}. \end{array}\right. \end{eqnarray*} \end{lemma} \begin{proof} When $p_i=0$, it follows from Assumption~\ref{assp:1} that $q_i=0$. The result holds trivially. For $p_i>0$, setting $x_i = \alpha^2/y_i$, we write $\theta_i(\alpha)=\min \{ p_i \sfrac{\alpha^2}{y_i} +q_iy_i \,|\, \alpha^2 \le y_i \le 1 \}$, a problem with linear constraints and a convex objective over $\mathbb{R}_+$. When $q_i=0$, $y^*_i=1$ is optimal and the result follows as $x^*_i=\alpha^2$. When $q_i>0$, the problem has $y^*_i = \sqrt{\sfrac{p_i}{q_i}}\alpha \ge \alpha \ge \alpha^2$ as unique stationary point over $\mathbb{R}_+$. We conclude that $\bar{y}_i = \min\{y^*_i,1\}$ is optimal for the constrained problem. \qed \end{proof} \begin{lemma}\label{lem:zoptstruc} Let $\alpha^*_i := \sqrt{\frac{d_i}{a_i}}$. Then, $z^{*} = \textup{min}_{i \in [n]} \left\{ \sum_{j \in [n]\setminus \{i\}}(p_j + q_j) + \theta_i(\alpha^*_i) \right\}$. \end{lemma} \begin{proof} Since an optimal solution to the problem defining $z^*$ can always be chosen among the extreme points of $Q$ and since the proof of Theorem~\ref{thm:valid} in Section~\ref{section:valid} establishes that extreme points of $Q$ belong to $\bigcup_{i=1}^n Q_i$, we write that $z^*=\min_{i \in [n]} \min\{ p^{\intercal} x+q^{\intercal} y \,|\, (x,y) \in Q_i\}$. Points of $Q_i$ satisfy $x_j=y_j=1$ for $j \neq i$ and $a_ix_iy_i \ge d_i$. Since $p_i \ge q_i \ge 0$, it suffices to consider solutions that satisfy $\sqrt{x_iy_i}= \sqrt{\sfrac{d_i}{a_i}}=\alpha_i^*$ in the above problem, yielding the result. \qed \end{proof} Rearranging the variables if necessary, assume from now on that $ z^* = \sum_{i \in [n-1]}(p_i + q_i) + \theta_n(\alpha^*_n). $ As a consequence of this assumption and Lemma~\ref{lem:zoptstruc}, we obtain that \begin{eqnarray}\label{eq:uselaterpairwise} \theta_j(\alpha^*_j) + p_n + q_n \geq \theta_n(\alpha^*_n) + p_j + q_j, \quad \forall j \in [n]. \end{eqnarray} \begin{lemma}\label{lem:termwiselower} Let $\tau_i(\alpha) = (p_i + q_i)\cdot\alpha^2$. Then $\tau_i(\alpha) \leq \theta_i(\alpha)$ for $\alpha \in [0, 1]$. \end{lemma} \begin{proof} When $p_i=0$, the result is clear. Assume therefore that $p_i>0$. When $\alpha \ge \sqrt{\sfrac{q_i}{p_i}}$, we write that $\theta_i(\alpha)=p_i \alpha^2 + q_i \ge p_i \alpha^2 + q_i \alpha^2 = \tau_i(\alpha)$, where the inequality holds because $\alpha \in [0,1]$. When $\alpha \le \sqrt{\sfrac{q_i}{p_i}}$ (or equivalently $\sqrt{q_i} \ge \sqrt{p_i} \alpha)$, we write that $\theta_i(\alpha)= 2\sqrt{q_i}\sqrt{p_i} \alpha \ge 2 p_i \alpha^2 \ge (p_i+q_i) \alpha^2$, where the last inequality holds because $p_i\ge q_i \ge 0$. \end{proof} \begin{lemma}\label{lem:rangealpha} Assume that $(x,y)\in[0,1]^{2n}$ satisfies (\ref{eq:bilincoverineq}), \textit{i.e.,} $\sum_{i = 1}^n \frac{\sqrt{a_i}}{\sqrt{a_i} - \sqrt{d_i} } (\sqrt{x_iy_i} - 1 )\geq -1 $. Define $\alpha_i = \sqrt{x_iy_i}$ for $i \in [n]$. Then (i) $\alpha^*_i \leq \alpha_i$ for all $i\in [n]$, (ii) $\alpha_i < \frac{1}{2}$ for at most one $i \in [n]$. \end{lemma} \begin{proof} Statement~(i) trivially holds, as any $\sqrt{x_iy_i}<\alpha_i^*=\sqrt{\sfrac{d_i}{a_i}}$ invalidates (\ref{eq:bilincoverineq}), even if we set $x_j=y_j=1$ for $j\in[n]\backslash\{i\}$. For (ii), assume by contradiction there exists distinct indices $i_1$ and $i_2$ in $[n]$ such that $\alpha_{i_1} \le \alpha_{i_2}<\frac{1}{2}$. Then $ \sum_{i=1}^n \frac{\sqrt{a_i}}{\sqrt{a_i} - \sqrt{d_i} } (\sqrt{x_iy_i} - 1 ) < \sum_{i \in \{i_1,i_2\}} \frac{\sqrt{a_i}}{\sqrt{a_i} - \sqrt{d_i} } \left(-\frac{1}{2} \right) \leq \sum_{i \in \{i_1,i_2\}} \left(-\frac{1}{2} \right) = -1, $ which violates (\ref{eq:bilincoverineq}). \qed \end{proof} We are now ready to give a proof of Theorem~\ref{thm:strcovercon} that inequality (\ref{eq:bilincoverineq}) yields strong bounds for optimization problems over $Q$. \begin{proof}[Theorem~\ref{thm:strcovercon}] Let $(\tilde{x},\tilde{y})$ be an optimal solution for the relaxation defining $z^l$ and let $\tilde{\alpha}_i = \sqrt{\tilde{x}_i\tilde{y}_i}$. From Lemma~\ref{lem:rangealpha}, it is sufficient to consider the following three cases. First assume that $\tilde{\alpha}_j \le \frac{1}{2}$ for some $j < n$. Lemma~\ref{lem:rangealpha} implies $\tilde{\alpha}_i \ge \frac{1}{2}$ for $i \neq j$. Then \begin{eqnarray*} 4z^l &\ = \ & 4\sum_{i = 1}^n \theta_i(\tilde{\alpha}_i) \ \geq \ 4\left(\sum_{i \in [n]\setminus \{j\}} (p_i +q_i)\tilde{\alpha}_i^2 + \theta_j(\tilde{\alpha}_j) \right) \\ &\geq& 4\left(\sum_{i \in [n]\setminus \{j\}} (p_i +q_i)\frac{1}{4} + \theta_j(\tilde{\alpha}_j) \right) \ = \ \sum_{i \in [n]\setminus \{j\}} (p_i +q_i) + 4\theta_j(\tilde{\alpha}_j) \\ &\geq &\sum_{i \in [n]\setminus \{j,n\}} (p_i +q_i) + \theta_j(\alpha^*_j) + (p_n +q_n)\\ &\geq& \sum_{i \in [n]\setminus \{j,n\}} (p_i +q_i) + p_j +q_j + \theta_n(\alpha^*_n)\ = \ z^*, \end{eqnarray*} where the first inequality holds because of Lemma~\ref{lem:termwiselower}, the second inequality holds because $\alpha_i \geq \frac{1}{2}$ for $i \neq j$, the third inequality is because $\alpha^*_j \leq \tilde{\alpha}_j$ from Lemma~\ref{lem:rangealpha} and because $\theta_j$ is monotonically increasing, and the fourth inequality holds because of (\ref{eq:uselaterpairwise}). Second assume that $\tilde{\alpha}_n \leq \frac{1}{2}$. Lemma~\ref{lem:rangealpha} implies that $\tilde{\alpha}_i \ge \frac{1}{2}$ for $i < n$. Similarly, $ 4z^l = 4\sum_{i = 1}^n \theta_i(\tilde{\alpha}_i) \geq 4(\sum_{i = 1}^{n-1} (p_i +q_i)\tilde{\alpha}_i^2 + \theta_n(\tilde{\alpha}_n) ) \geq 4(\sum_{i =1}^{n-1} (p_i +q_i)\frac{1}{4} + \theta_n(\tilde{\alpha}_n) ) = \sum_{i=1}^{n-1} (p_i +q_i) + 4\theta_n(\tilde{\alpha}_n) \geq \sum_{i=1}^{n-1} (p_i +q_i) + \theta_n(\alpha^*_n) = z^*. $ Finally assume that $\tilde{\alpha}_i \geq \frac{1}{2}$ for all $i$, and we use the same proof as just given. \qed \end{proof} \section{Proof of Theorem~\ref{thm:upperbound}} \label{section:upperbound} In this section, we provide a proof of Theorem~\ref{thm:upperbound}, which gives a subadditive over-approximation to the lifting function of the minimal covering inequality. We first pose \begin{assumption}\label{assp:2} $0<\Delta \le a_1 \le a_2 \le \ldots \le a_n$. \end{assumption} Assumption~\ref{assp:2} can always be achieved by reordering the variables since the notion of minimal cover requires that $a_i \ge \Delta$ for $i \in [n]$; see discussion following Notation~\ref{not:1}. We next present ancillary results in Lemmas~\ref{lm:decreasing}-\ref{lm:subadditive} and Proposition~\ref{prop:minimal-slope} that are used in the derivation of the approximation of the lifting function. The proof of Lemma~\ref{lm:decreasing} is straightforward and can be obtained by investigating signs of derivatives. \begin{lemma}\label{lm:decreasing} For $u\geq \max\{\alpha, \beta\}$ where $\alpha, \beta >0$, the function $f(u):= \frac{\sqrt{u}-\sqrt{u-\alpha}}{\sqrt{u}-\sqrt{u-\beta}}$ is decreasing when $\alpha >\beta$ and increasing when $\alpha < \beta$. \end{lemma} Lemma~\ref{lm:fractional-concave} establishes that the lifting function $\phi(\delta)$ exhibits local convexity. \begin{lemma} \label{lm:fractional-concave} Any point $\delta$ of the lifting function $\phi(\delta)$ corresponding to an optimal solution $(x,y)$ with at least one index $i$ such that $x_iy_i\in(0,1)$, is locally convex, \textit{i.e.}, there exists $r>0$ {and $\xi$} such that $\phi(\delta+\eta)\geq \phi(\delta)+\xi\eta$ for all $\eta \in [-r,r]$. \end{lemma} \begin{proof} Let $\dot{\delta}$ be a point for which an optimal solution ($\dot{x},\dot{y}$) to the problem defining $\phi(\dot{\delta})$ is such that $\dot{x}_i\dot{y}_i\in(0,1)$. Define $r=\min\{\dot{x}_i\dot{y}_i,1-\dot{x}_i\dot{y}_i\}/2>0$. Consider $\eta\in[-r,r]$ and construct $(x,y)$ so that $x_j=\dot{x}_j$, $y_j=\dot{y}_j$ for any $j\neq i$ and $x_iy_i = \dot{x}_i\dot{y}_i - \eta$. From the feasibility of $(\dot{x},\dot{y})$ for $\dot{\delta}$, we conclude that $(x,y)$ is a feasible solution to the optimization problem defining $\phi(\dot{\delta}+\eta)$. Therefore, \begin{eqnarray*} \phi(\dot{\delta}+\eta) - \phi(\dot{\delta}) &\ge& \frac{\sqrt{a_i}}{\sqrt{a_i}-\sqrt{d_i}} \left(\sqrt{\dot{x}_i\dot{y}_i}-\sqrt{\dot{x}_i\dot{y}_i-\eta}\right) \\ &=& \frac{a_i+\sqrt{a_id_i}}{a_i-d_i} \left(\sqrt{\dot{x}_i\dot{y}_i}-\sqrt{\dot{x}_i\dot{y}_i-\eta}\right) \ge \frac{a_i+\sqrt{a_id_i}}{2\Delta\sqrt{\dot{x}_i\dot{y}_i}}\eta, \end{eqnarray*} where the last inequality holds {because $a_i-d_i=\Delta$} and because the concavity of the square root function over $\mathbb{R}_+$ implies that $\sqrt{\dot{x}_i\dot{y}_i-\eta} \le \sqrt{\dot{x}_i\dot{y}_i}-\frac{1}{2\sqrt{\dot{x}_i\dot{y}_i}}\eta$. \qed \end{proof} To obtain the tightest linear over-approximation of $\phi(\delta)$ for $\delta\in(0,\infty)$, we next narrow down the set of points $\delta$ where function $\sfrac{\phi(\delta)}{\delta}$ can achieve a local maximum. \begin{proposition} \label{prop:minimal-slope} Assume that $\dot{\delta}>0$ is a local maximizer of the function $\sfrac{\phi(\delta)}{\delta}$ and that $(\dot{x},\dot{y})$ is an optimal solution to the problem defining $\phi(\dot{\delta})$. Then either \begin{enumerate}[label={(\roman*)},align=left] \item all $(\dot{x}_i,\dot{y}_i)$ pairs belong to $\{0,1\}^2$, or \label{minimal-slope:1} \item there exists $r>0$ such that $\sfrac{\phi(\delta+\eta)}{(\delta+\eta)}=\sfrac{\phi(\delta)}{\delta}$ for all $\eta \in (-r,r)$. \label{minimal-slope:2} \end{enumerate} \end{proposition} \begin{proof} Assume that \ref{minimal-slope:1} does not hold, \textit{i.e.}, there exists $i \in [n]$ for which $\dot{x}_i\dot{y}_i \in (0,1)$. We show that \ref{minimal-slope:2} holds. From Lemma~\ref{lm:fractional-concave}, there exists $\xi$ and $r>0$ such that $\phi(\dot{\delta}+\eta)\geq \phi(\dot{\delta})+\xi \eta$ for $\eta\in(-r, r)$. Without loss of generality, we assume $r<\dot{\delta}$. We consider two cases. Assume first that $\xi\geq \sfrac{\phi(\dot{\delta})}{\dot{\delta}}$. For any $\eta\in(0,r)$ we have $\phi(\dot{\delta}+\eta)\geq\phi(\dot{\delta})+\xi\eta \geq \frac{\phi(\dot{\delta})}{\dot{\delta}}(\dot{\delta}+\eta)$ or equivalently $\phi(\dot{\delta}+\eta)/(\dot{\delta}+\eta)\geq \phi(\dot{\delta})/\dot{\delta}$. Assume second that $\xi\leq \sfrac{\phi(\dot{\delta})}{\dot{\delta}}$. For any $\eta\in(-r,0)$ we have $\phi(\dot{\delta}+\eta)\geq\phi(\dot{\delta})+\xi\eta \geq \frac{\phi(\dot{\delta})}{\dot{\delta}}(\dot{\delta}+\eta)$ or equivalently $\phi(\dot{\delta}+\eta)/(\dot{\delta}+\eta)\geq \phi(\dot{\delta})/\dot{\delta}$. From analyzing these cases, we see that $\dot{\delta}$ can be a local maximum only if $\eta = \phi(\dot{\delta})/\dot{\delta}$ and all points in $(\delta-r, \delta+r)$ are also local maxima. \qed \end{proof} We now derive a linear over-approximation to the function $\phi(\delta)$ for $\delta \ge 0$. \begin{lemma}\label{lm:boundpos} Define $l_+:=\frac{\sqrt{a_{i_0}}+\sqrt{d_{i_0}}}{\Delta \sqrt{d_{i_0}}}$ if $I^> \neq \emptyset$ and $l_+:=\frac{1}{\Delta}$ otherwise. Then $\phi(\delta) \le l_+ \delta$ for $\delta \ge 0$. \end{lemma} \begin{proof} The result holds trivially for $\delta = 0$ since $\phi(\delta)=0$. Our main tool to prove this result is Proposition~\ref{prop:minimal-slope} which will allow us to verify the value of $\phi(\delta)/\delta$ only for a finite set of values of $\delta$. However, since Proposition~\ref{prop:minimal-slope} holds only for $\delta>0$, we first prove the result in an interval that has $0$ as an end point. As mentioned above, the first part of the proof investigates the function $\phi$ in a neighborhood of the point $\delta = 0$. There are two cases to consider. For the first case, assume that $I^> \neq \emptyset$. Consider $\delta \in [0, \min\{a_{i_0}-\Delta, \Delta\}/2]$. Because the problem defining $\phi$ consists of maximizing a convex function, optimal solutions can be found at extreme points of the feasible region. It follows that there exists an optimal solution that is such that $x_i^*y_i^* \in \{0,1\}$ for all $i\in[n]\backslash\{j\}$ for some $j \in [n]$. Further, at most one index $k \in [n]\backslash\{j\}$ can be such that $x_k^*y_k^* = 0$ as otherwise $\sum_{i=1}^n a_i x_i^*y_i^* \leq \sum_{i = 1}^n a_i - 2\Delta = d-\Delta < d-\delta$ which would made this solution infeasible for the problem defining $\phi$. Also, if there exists $k$ with $x_k^*y_k^* = 0$, then $a_k=\Delta$. If not, $a_k\geq a_{i_0}$ and thus $\sum a_ix_i^*y_i^*\leq d +\Delta - a_{i_0} < d-\delta$, infeasible. {Thus, $-\Delta-\delta = \sum_{i = 1}^n a_ix_i^*y_i^* - \sum_{i = 1}^n a_i = a_jx_j^*y_j^* - a_j - a_k$, \textit{i.e.}, $a_jx_j^*y_j^* = a_j-\delta$, and as} $\frac{\sqrt{a_k}}{\sqrt{a_k}-\sqrt{d_k}}= \frac{\sqrt{\Delta}}{\sqrt{\Delta} - 0} = 1$, we obtain $$ \phi(\delta) = \frac{\sqrt{a_j}}{\sqrt{a_j}-\sqrt{d_j}}(1-\sqrt{x_j^*y_j^*}) = \frac{\sqrt{a_j}-\sqrt{a_j-\delta}}{\sqrt{a_j}-\sqrt{a_j-\Delta}} \leq \frac{\sqrt{a_n}-\sqrt{a_n-\delta}}{\sqrt{a_n}-\sqrt{a_n-\Delta}}:= \eta(\delta), $$ where the last step follows from Lemma~\ref{lm:decreasing}. {Note that $\eta$ is well-defined and convex on $[0,\Delta]$.} Therefore, it is easy to verify that $\eta(\delta)\leq \delta/\Delta$ for $\delta\in[0,\Delta]$, and thus $\phi(\delta) \leq \delta/\Delta \leq l_+ \delta$ for $\delta \in [0, \min\{a_{i_0}-\Delta, \Delta\}/2]$. If there is no $k$ with $x^*_ky^*_k = 0$, we can verify $a_j>\Delta$ and $a_jx_j^*y_j^* = a_j-\Delta-\delta$. Thus, \begin{eqnarray*} \phi(\delta) &=& \frac{\sqrt{a_j}}{\sqrt{a_j}-\sqrt{d_j}}(1-\sqrt{x_j^*y_j^*})-1 \\ &=& \frac{\sqrt{a_j}-\sqrt{a_j-\Delta-\delta}}{\sqrt{a_j}-\sqrt{a_j-\Delta}}-1 \leq \frac{\sqrt{a_{i_0}}-\sqrt{a_{i_0}-\Delta - \delta}}{\sqrt{a_{i_0}}-\sqrt{a_{i_0}-\Delta}}-1:=\xi(\delta), \end{eqnarray*} where the last step follows Lemma~\ref{lm:decreasing}. The function $\xi(\delta)$ is again convex. Therefore, it is easy to verify that $\xi(\delta)\leq \frac{\sqrt{a_{i_0}}+\sqrt{d_{i_0}}}{\Delta\sqrt{d_{i_0}}}\delta$ for $\delta\in[0, a_{i_0}-\Delta]$. Thus we obtain that $\phi(\delta) \leq l_+ \delta$ for $\delta \in [0, \min\{a_{i_0}-\Delta, \Delta\}/2]$. For the second case $I^>=\emptyset$, \textit{i.e.}, $a_1=\ldots=a_n=\Delta$. Note that in this case $n \geq2$. Consider $\delta\in[0, \Delta/2]$. Similar to above, there exists an optimal solution that is such that $x_i^*y_i^* \in \{0,1\}$ for all $i\in[n]\backslash\{j\}$ for some $j\in [n]$. In addition, there exists exactly one index $k \in [n]\backslash \{j\}$ with $x_k^*y_k^* = 0$, or otherwise we obtain $a_j>\Delta$ a contradiction to $I^>=\emptyset$. As $\frac{\sqrt{a_k}}{\sqrt{a_k}-\sqrt{d_k}}=1$ and $a_jx_j^*y_j^*=\Delta x_j^*y_j^*=\Delta-\delta$, we obtain $$ \phi(\delta) = \frac{\sqrt{a_j}}{\sqrt{a_j}-\sqrt{d_j}}(1-\sqrt{x_j^*y_j^*}) = \frac{\sqrt{\Delta}-\sqrt{\Delta-\delta}}{\sqrt{\Delta}} \leq \frac{\delta}{\Delta} \leq l_+ \delta, \ \delta \in [0, \Delta/2]. $$ The second part of the proof investigates the function $\phi$ away from the origin. As we are attempting to show that $\phi(\delta)/\delta$ bounded from above by $l_+$, it is sufficient to consider all local maximas of $\phi(\delta)/\delta$. It follows from Proposition~\ref{prop:minimal-slope} that it is sufficient to verify the condition at values of $\delta$ such that $x_iy_i \in \{0,1\}$ for $i \in [n]$. (This is because, at other local maximas, the function $\phi(\delta)/\delta$ is locally constant and so it is sufficient to check at the end points of these ``constant intervals" where $x_iy_i \in \{0,1\}$.) Any such local maximum $\delta$ is therefore such that there exists $S \subseteq [n]$ with $x_iy_i=0$ for $i \in S$ and $x_iy_i=1$ for $i \notin S$. We denote it as $\delta^S$. It is easily verified that $\delta^S = \sum_{i\in S}a_i - \Delta$. Let $S = \{i_1, i_2,\ldots,i_k\}$ such that $a_{i_1}\leq \ldots \leq a_{i_k}$, and we have $$ \phi(\delta^S) = -1+\sum_{i\in S} \frac{a_i+\sqrt{a_id_i}}{\Delta} = \sum^{k-1}_{j=1}\frac{a_{i_j}+\sqrt{a_{i_j}d_{i_j}}}{\Delta} + \frac{d_{i_k}+\sqrt{a_{i_k}d_{i_k}}}{\Delta}. $$ Consider two cases. On the one hand, if $a_{i_k}> \Delta$, then $I^> \neq \emptyset$ and $a_{i_0}\leq a_{i_k}$. Thus, \begin{align*} \phi(\delta^S) &= \sum_{j=1}^{k-1} \frac{\sqrt{a_{i_j}}+\sqrt{d_{i_j}}}{\Delta\sqrt{a_{i_j}}} a_{i_j} +\frac{\sqrt{d_{i_k}}+\sqrt{a_{i_k}}}{\Delta\sqrt{d_{i_k}}}d_{i_k}\\ &\leq \sum_{j=1}^{k-1} \frac{\sqrt{a_{i_0}}+\sqrt{d_{i_0}}}{\Delta\sqrt{d_{i_0}}} a_{i_j} +\frac{\sqrt{d_{i_0}}+\sqrt{a_{i_0}}}{\Delta\sqrt{d_{i_0}}}d_{i_k} = \frac{\sqrt{a_{i_0}}+\sqrt{d_{i_0}}}{\Delta \sqrt{d_{i_0}}}\delta^S = l_+\delta^S, \end{align*} where {the inequality follows from the fact that $ \frac{\sqrt{a}+\sqrt{a-\Delta}}{\sqrt{a-\Delta}} = 1+\sqrt{1+\frac{\Delta}{a-\Delta}} $ is decreasing on $a$ for $a>\Delta$,} and the second last equality holds because $\delta^S = \sum^{k-1}_{j=1}a_{i_j} + d_{i_k}$. On the other hand, if $a_{i_k}=\Delta$, then we have $d_{i_k}=d_{i_j}=0$ for any $i_j\in S$. Thus \begin{align*} \phi(\delta^S) &= \sum^{k-1}_{j=1}\frac{a_{i_j}+\sqrt{a_{i_j}d_{i_j}}}{\Delta} + \frac{d_{i_k}+\sqrt{a_{i_k}d_{i_k}}}{\Delta} = \frac{1}{\Delta}\delta^S \leq l_+\delta^S.\tag*{\qed} \end{align*} \end{proof} Next, we derive an over-approximation of $\phi(\delta)$ when $\delta \leq 0$. \begin{lemma}\label{lm:boundneg} Define $l_-:=\frac{1}{\Delta}$. For $\delta \leq 0$, we have \begin{equation*} \phi(\delta) = \left\{\begin{aligned} &-\infty & \delta &< -\Delta \\ &\frac{ \sqrt{a_n - \Delta} - \sqrt{a_n - \Delta - \delta} }{\sqrt{a_n} - \sqrt{a_n - \Delta}} & - \Delta\leq \delta &\leq 0. \end{aligned}\right . \end{equation*} Further, $\phi(\delta) \le l_- \delta$ for $\delta \in [-\Delta,0]$. \end{lemma} \begin{proof} When $\delta < -\Delta$, $\phi(\delta) = -\infty$ as the right-hand-side of the problem defining $\phi$ is larger than $\sum_{i=1}^n a_i$. Consider therefore the case when $0\geq \delta \geq - \Delta$. There exists an optimal solution $(x^*,y^*)$ of the problem defining $\phi(\delta)$ that is such that $x^*_i = y^*_i = 1$ for all $i \in [n]\setminus \{j\}$ for some $j \in [n]$. Further, $a_jx^*_jy_j^* = a_j -\Delta-\delta$. We obtain \begin{eqnarray*}\phi(\delta) &=& \textup{max}_j \left[\frac{\sqrt{a_j } - \sqrt{a_j - \Delta - \delta}}{\sqrt{a_j} - \sqrt{a_j - \Delta}}\right]-1 = \frac{\sqrt{a_n } - \sqrt{a_n - \Delta - \delta}}{\sqrt{a_n} - \sqrt{a_n - \Delta}}-1, \end{eqnarray*} where the last step follows from Lemma~\ref{lm:decreasing}. Finally, observe that $\phi(\delta)$ is convex in $\delta$. Therefore, by taking a linear inequality tight at $\delta = 0$ and $\delta = -\Delta$, we obtain that $\phi(\delta) \leq \delta/\Delta = l_- \delta$ since $\phi(0)=0$ and $\phi(-\Delta)=-1$. \qed \end{proof} By combining Lemmas~\ref{lm:boundpos} and \ref{lm:boundneg}, we obtain the following over-approximation of $\phi$: \begin{eqnarray*} \phi(\delta) \leq \tilde{\psi}(\delta):=\left\{ \begin{array}{llrclcl} -\infty &\ & &&\delta&\leq& -\Delta \\ l_- \delta &\ & -\Delta &\leq &\delta &\leq& 0 \\ l_+ \delta &\ & 0 &\leq &\delta.&& \end{array}\right. \end{eqnarray*} Note that the function $\tilde{\psi}$ is not subadditive. Lemma~\ref{lm:subadditive} describes a subadditive function that upper bounds $\tilde{\psi}$, thus giving a subadditive upper bound of $\phi$. \begin{lemma}\label{lm:subadditive} {It holds that $l_+ \ge l_->0$.} Further, the function \begin{equation*} \psi(\delta) = \left\{ \begin{aligned} &l_+(\delta+\Delta) - l_-\Delta & \delta&\leq -\Delta \\ &l_- \delta & -\Delta \leq \delta &\leq 0 \\ &l_+ \delta & 0\leq \delta& \end{aligned} \right. \end{equation*} is subadditive. \end{lemma} \begin{proof} Define $\mathring{\psi}(\delta):=l_- \delta$ when $\delta \le 0$ and $\mathring{\psi}(\delta):=l_+ \delta$ when $\delta \ge 0$. Function $\mathring{\psi}(\delta)$ satisfies $\mathring{\psi}\geq \psi$ and is subadditive since it is straightforward to verify that $l_+ \ge l_- >0$. Thus, for $u,v$, such that $u,v, u + v \in [-\Delta, +\infty)$, we already have that $\psi(u)+\psi(v)\geq \psi(u+v)$. It remains to consider the cases where at least one of $u$, $v$ or $u+v$ belongs to $(-\infty,-\Delta]$. We do so by considering the possible values of $u+v$ and by assuming without loss of generality that $u\geq v$. We use the fact that for $\delta\leq 0$, $\psi(\delta)=\min\{l_+(\delta+\Delta)-l_-\Delta, l_-\delta\}\geq l_+\delta$. There are three cases to consider. First assume that $u + v \geq 0$. In this case, $\psi(u) + \psi(v) - \psi(u + v) \geq l_+ u + l_+v - l_+(u+v) = 0$. Second assume that $-\Delta \leq u + v \leq 0$. In this case, $v \leq - \Delta$ and $u\geq 0$ so that $\psi(u) + \psi(v) - \psi(u + v) = l_+ u + l_+(v+\Delta)-l_- \Delta - l_-(u+v) = (l_+-l_-)(u+v+\Delta) \geq 0.$ Third assume that $u + v \leq -\Delta$. There are two subcases. If $v \leq -\Delta$, we have $ \psi(u) + (\psi(v) - \psi(u + v)) \geq l_+ u + (l_+(v+\Delta) - l_+(u+v+\Delta))=0.$ If $v \geq -\Delta$, then $0\geq u\geq v\geq -\Delta$. Therefore $\psi(u) + \psi(v) - \psi(u + v) \geq l_-u + l_- v - l_-(u+v) = 0.$ \qed \end{proof} \begin{proof}[Theorem~\ref{thm:upperbound}] Combining Lemmas~\ref{lm:boundpos}, ~\ref{lm:boundneg}, and~\ref{lm:subadditive} yields Theorem~\ref{thm:upperbound}. \end{proof} \section{Proof of Theorem~\ref{thm:lifted}} \label{section:lifted} \begin{proof}[Theorem~\ref{thm:lifted}] Following Theorems~\ref{thm:valid} and \ref{thm:upperbound}, it is sufficient to show that $\gamma_i(x,y)\geq \psi(a_ixy)$ for $i\in J_0$ and $\gamma_i(x,y)\geq \psi(a_i(xy-1))$ for $i\in J_1$, where $\psi$ is the subadditive over-approximation of $\phi$ derived in Theorem~\ref{thm:upperbound}. We discuss the possible cases. \begin{enumerate}[label={(\roman*)}] \item Assume $i \in J_0^+$. We must find $\gamma_i(x,y)\geq \psi(a_ixy) = l_+a_ixy$ for $(x,y) \in [0,1]^2$ where the equality holds as $a_i>0$. As $\min\{x, y\}\geq xy$ is the best concave upper bound for $(x, y)\in [0,1]^2$, we choose $\gamma_i(x,y) = l_+a_i\min\{x,y\}$. \item Assume $i \in J_1^-$. We must find $\gamma_i(x,y) \geq \psi(a_i(xy-1)) = l_+(a_ixy-a_i) = l_+a_i(xy-1)$ for $(x,y) \in [0,1]^2$ where the equality holds since $a_i<0$. As $\max\{x+y-1, 0\}\leq xy$ is the best convex lower bound for $(x, y) \in [0,1]^2$, we choose $\gamma_i(x,y) = l_+a_i (\max\{x+y-1, 0\}-1)=-l_+a_i\min\{2-x-y,1\}$. \item \label{lift3} Assume $i \in J_0^-$. We must find $\gamma_i(x,y)\geq \psi(a_ixy)$ for $(x,y) \in [0,1]^2$. As $a_i<0$, $\psi(a_ixy) = \min\{l_-a_ixy, l_+a_ixy + l_+\Delta-1\}\leq \min\{l_-a_i(x+y-1), l_+a_i(x+y-1)+l_+\Delta-1, 0\} := \gamma _i(x,y)$. \item Assume $i \in J_1^+$. In this case, we must find $\gamma_i(x,y)\geq \psi(a_ixy-a_i)$ for $(x,y) \in [0,1]^2$. Since $a_i>0$, $\psi(a_ixy-a_i) = \min\{l_-a_i(xy-1), l_+a_i(xy-1)+l_+\Delta-1\}.$ Similar to \ref{lift3}, we have $ \psi(a_ixy-a_i)\leq l_-a_i(\min\{x, y\}-1) =: \tilde{h}(x,y)$, and $ \psi(a_ixy-a_i)\leq l_+a_i(\min\{x, y\}-1)+l_+\Delta-1=: \tilde{g}(x,y)$. Thus, $\gamma(x,y) = \min\{\tilde{h}(x,y), \tilde{g}(x,y)\}$ is a concave upper bound of $\psi$. Next we improve this upper bound when $a_i\geq a_{i_0} > \Delta$. As $g$ and $h$ (defined in Theorem~\ref{thm:lifted}) are concave, it remains to show the following: \begin{claim} For $a_i \geq a_{i_0} > \Delta$, $\min\{g(x,y),h(x,y)\} \geq \psi(a_ixy -a_i)$ for $(x,y) \in [0, 1]^2$ \end{claim} Observe that \begin{eqnarray*} \psi(a_ixy - a_i) = \left\{ \begin{array}{lll} l_+a_i((\sqrt{xy})^2-1) + l_+\Delta-1 \ &\textrm{if}& 0 \leq \sqrt{xy} \leq \sqrt{1 - \frac{\Delta}{a_i}}\\ l_-a_i ((\sqrt{xy})^2 - 1) &\textrm{if}& \sqrt{1 - \frac{\Delta}{a_i}} \leq \sqrt{xy} \leq 1. \end{array} \right. \end{eqnarray*} Consider first the function $g_i(x,y)= \sqrt{a_i - \Delta}\sqrt{a_i}l_+\sqrt{xy} -l_+(a_i-\Delta) -1$: \begin{itemize} \item $\sqrt{xy} \in [0, \sqrt{1 - \frac{\Delta}{a_i}}]$: $g_i(x,y) \geq -1+ (a_i(\sqrt{xy})^2 - a_i + \Delta) l_+ = \psi(a_ixy -a_i).$ \item $\sqrt{xy} \in [\sqrt{1 - \frac{\Delta}{a_i}}, 1]$: we simply prove $ \hat{g}_i(t) := l_+\sqrt{a_i-\Delta}\sqrt{a_i}\sqrt{t} - l_+(a_i-\Delta)-1 \geq l_-a_i (t - 1) =:f_i(t), $ for $t \in [1 - \Delta/a_i, 1]$. To this end, we verify: (i) $\hat{g}_i(1- \frac{\Delta}{a_i}) = f_i(1 - \frac{\Delta}{a_i})$ and (ii) $\hat{g}_i(1) \geq f_i(1)$. This is sufficient since $\hat{g}_i$ is a concave function and $f_i$ is a linear function. The proof of (i) is straightforward. To prove (ii) observe that $\hat{g}_i(1) = l_+\sqrt{a_i-\Delta}\sqrt{a_i}- l_+(a_i-\Delta)-1 \geq f_i(1) = l_-a_i (1 - 1) = 0$ is equivalent to verifying $l_+\geq \frac{1}{\sqrt{a_i -\Delta} (\sqrt{a_i} - \sqrt{a_i -\Delta}) }$ or equivalently $\frac{\sqrt{a_{i_0}} + \sqrt{a_{i_0} - \Delta}}{\sqrt{a_{i_0} - \Delta} } \geq \frac{\sqrt{a_i} + \sqrt{a_i -\Delta}}{\sqrt{a_i -\Delta} }$ which holds since $a_i\geq a_{i_0}$. \end{itemize} Consider second the function $h_i(x,y)= \frac{\sqrt{a_i}}{\sqrt{a_i} - \sqrt{d_i}}(\sqrt{xy} - 1)$: \begin{itemize} \item $\sqrt{xy} \in [\sqrt{1 - \frac{\Delta}{a_i}}, 1]$: by construction, $h_i(x,y) \geq l_-a_i(xy - 1)\geq \psi(a_ixy-a_i)$. \item $\sqrt{xy} \in [0, \sqrt{1 - \frac{\Delta}{a_i}}]$: first observe that $\hat{h}_i(t^*) = f_i(t^*)$ for $t^* = \sqrt{1 - \frac{\Delta}{a_i}}$, where $f_i(t) := l_+a_i(t-1)+l_+\Delta-1 $. Since $h_i(x,y)$ is concave it is sufficient to verify that $\hat{h}_i(0) \geq f_i(0)$. This condition holds as $ \frac{\sqrt{a_i}}{\sqrt{a_i} - \sqrt{a_i - \Delta}} \leq 1 + (a_i - \Delta)l_+$ which is equivalent to $l_+ \geq \frac{1}{\sqrt{a_i -\Delta} (\sqrt{a_i} - \sqrt{a_i -\Delta})}. $ \qed \end{itemize} \end{enumerate} \end{proof} \bibliographystyle{plain}
{ "redpajama_set_name": "RedPajamaArXiv" }
9,701
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.chrome.browser.preferences.privacy; import android.app.Activity; import android.app.ProgressDialog; import android.content.Intent; import android.os.Bundle; import android.preference.Preference; import android.preference.PreferenceFragment; import android.support.annotation.Nullable; import android.widget.ListView; import org.chromium.base.VisibleForTesting; import org.chromium.base.metrics.RecordHistogram; import org.chromium.base.metrics.RecordUserAction; import org.chromium.chrome.R; import org.chromium.chrome.browser.ChromeFeatureList; import org.chromium.chrome.browser.browsing_data.BrowsingDataType; import org.chromium.chrome.browser.browsing_data.TimePeriod; import org.chromium.chrome.browser.help.HelpAndFeedback; import org.chromium.chrome.browser.multiwindow.MultiWindowUtils; import org.chromium.chrome.browser.preferences.ButtonPreference; import org.chromium.chrome.browser.preferences.ClearBrowsingDataCheckBoxPreference; import org.chromium.chrome.browser.preferences.PrefServiceBridge; import org.chromium.chrome.browser.preferences.SpinnerPreference; import org.chromium.chrome.browser.preferences.TextMessageWithLinkAndIconPreference; import org.chromium.chrome.browser.preferences.privacy.BrowsingDataCounterBridge.BrowsingDataCounterCallback; import org.chromium.chrome.browser.profiles.Profile; import org.chromium.chrome.browser.tabmodel.TabModel.TabLaunchType; import org.chromium.chrome.browser.tabmodel.document.TabDelegate; import org.chromium.components.signin.ChromeSigninController; import java.util.Arrays; import java.util.EnumSet; /** * Preference screen that allows the user to clear browsing data. * The user can choose which types of data to clear (history, cookies, etc), and the time range * from which to clear data. */ public class ClearBrowsingDataPreferences extends PreferenceFragment implements PrefServiceBridge.ImportantSitesCallback, PrefServiceBridge.OnClearBrowsingDataListener, PrefServiceBridge.OtherFormsOfBrowsingHistoryListener, Preference.OnPreferenceClickListener, Preference.OnPreferenceChangeListener { /** * Represents a single item in the dialog. */ private static class Item implements BrowsingDataCounterCallback, Preference.OnPreferenceClickListener { private final ClearBrowsingDataPreferences mParent; private final DialogOption mOption; private final ClearBrowsingDataCheckBoxPreference mCheckbox; private BrowsingDataCounterBridge mCounter; private boolean mShouldAnnounceCounterResult; public Item(ClearBrowsingDataPreferences parent, DialogOption option, ClearBrowsingDataCheckBoxPreference checkbox, boolean selected, boolean enabled) { super(); mParent = parent; mOption = option; mCheckbox = checkbox; mCounter = new BrowsingDataCounterBridge(this, mOption.getDataType()); mCheckbox.setOnPreferenceClickListener(this); mCheckbox.setEnabled(enabled); mCheckbox.setChecked(selected); mCheckbox.setSummaryOff(""); // No summary when unchecked. } public void destroy() { mCounter.destroy(); } public DialogOption getOption() { return mOption; } public boolean isSelected() { return mCheckbox.isChecked(); } @Override public boolean onPreferenceClick(Preference preference) { assert mCheckbox == preference; mParent.updateButtonState(); mShouldAnnounceCounterResult = true; PrefServiceBridge.getInstance().setBrowsingDataDeletionPreference( mOption.getDataType(), mCheckbox.isChecked()); return true; } @Override public void onCounterFinished(String result) { mCheckbox.setSummaryOn(result); if (mShouldAnnounceCounterResult) { mCheckbox.announceForAccessibility(result); } } /** * Sets whether the BrowsingDataCounter result should be announced. This is when the counter * recalculation was caused by a checkbox state change (as opposed to fragment * initialization or time period change). */ public void setShouldAnnounceCounterResult(boolean value) { mShouldAnnounceCounterResult = value; } } private static final String PREF_HISTORY = "clear_history_checkbox"; private static final String PREF_COOKIES = "clear_cookies_checkbox"; private static final String PREF_CACHE = "clear_cache_checkbox"; private static final String PREF_PASSWORDS = "clear_passwords_checkbox"; private static final String PREF_FORM_DATA = "clear_form_data_checkbox"; @VisibleForTesting public static final String PREF_GOOGLE_SUMMARY = "google_summary"; @VisibleForTesting public static final String PREF_GENERAL_SUMMARY = "general_summary"; private static final String PREF_TIME_RANGE = "time_period_spinner"; /** The "Clear" button preference. */ @VisibleForTesting public static final String PREF_CLEAR_BUTTON = "clear_button"; /** The tag used for logging. */ public static final String TAG = "ClearBrowsingDataPreferences"; /** The histogram for the dialog about other forms of browsing history. */ private static final String DIALOG_HISTOGRAM = "History.ClearBrowsingData.ShownHistoryNoticeAfterClearing"; /** The web history URL. */ private static final String WEB_HISTORY_URL = "https://history.google.com/history/?utm_source=chrome_cbd"; /** * Used for the onActivityResult pattern. The value is arbitrary, just to distinguish from other * activities that we might be using onActivityResult with as well. */ private static final int IMPORTANT_SITES_DIALOG_CODE = 1; private static final int IMPORTANT_SITES_PERCENTAGE_BUCKET_COUNT = 20; /** * The various data types that can be cleared via this screen. */ public enum DialogOption { CLEAR_HISTORY(BrowsingDataType.HISTORY, PREF_HISTORY), CLEAR_COOKIES_AND_SITE_DATA(BrowsingDataType.COOKIES, PREF_COOKIES), CLEAR_CACHE(BrowsingDataType.CACHE, PREF_CACHE), CLEAR_PASSWORDS(BrowsingDataType.PASSWORDS, PREF_PASSWORDS), CLEAR_FORM_DATA(BrowsingDataType.FORM_DATA, PREF_FORM_DATA); private final int mDataType; private final String mPreferenceKey; private DialogOption(int dataType, String preferenceKey) { mDataType = dataType; mPreferenceKey = preferenceKey; } public int getDataType() { return mDataType; } /** * @return String The key of the corresponding preference. */ public String getPreferenceKey() { return mPreferenceKey; } } /** * An option to be shown in the time period spiner. */ private static class TimePeriodSpinnerOption { private int mTimePeriod; private String mTitle; /** * Constructs this time period spinner option. * @param timePeriod The time period represented as an int from the shared enum * {@link TimePeriod}. * @param title The text that will be used to represent this item in the spinner. */ public TimePeriodSpinnerOption(int timePeriod, String title) { mTimePeriod = timePeriod; mTitle = title; } /** * @return The time period represented as an int from the shared enum {@link TimePeriod} */ public int getTimePeriod() { return mTimePeriod; } @Override public String toString() { return mTitle; } } private OtherFormsOfHistoryDialogFragment mDialogAboutOtherFormsOfBrowsingHistory; private boolean mIsDialogAboutOtherFormsOfBrowsingHistoryEnabled; private ProgressDialog mProgressDialog; private Item[] mItems; // This is a constant on the C++ side. private int mMaxImportantSites; // This is the sorted list of important registerable domains. If null, then we haven't finished // fetching them yet. private String[] mSortedImportantDomains; // These are the reasons the above domains were chosen as important. private int[] mSortedImportantDomainReasons; // These are full url examples of the domains above. We use them for favicons. private String[] mSortedExampleOrigins; // This is the dialog we show to the user that lets them 'uncheck' (or exclude) the above // important domains from being cleared. private ConfirmImportantSitesDialogFragment mConfirmImportantSitesDialog; private final EnumSet<DialogOption> getSelectedOptions() { EnumSet<DialogOption> selected = EnumSet.noneOf(DialogOption.class); for (Item item : mItems) { if (item.isSelected()) selected.add(item.getOption()); } return selected; } /** * Requests the browsing data corresponding to the given dialog options to be deleted. * @param options The dialog options whose corresponding data should be deleted. */ private final void clearBrowsingData(EnumSet<DialogOption> options, @Nullable String[] blacklistedDomains, @Nullable int[] blacklistedDomainReasons, @Nullable String[] ignoredDomains, @Nullable int[] ignoredDomainReasons) { showProgressDialog(); int[] dataTypes = new int[options.size()]; int i = 0; for (DialogOption option : options) { dataTypes[i] = option.getDataType(); ++i; } Object spinnerSelection = ((SpinnerPreference) findPreference(PREF_TIME_RANGE)).getSelectedOption(); int timePeriod = ((TimePeriodSpinnerOption) spinnerSelection).getTimePeriod(); if (blacklistedDomains != null && blacklistedDomains.length != 0) { PrefServiceBridge.getInstance().clearBrowsingDataExcludingDomains(this, dataTypes, timePeriod, blacklistedDomains, blacklistedDomainReasons, ignoredDomains, ignoredDomainReasons); } else { PrefServiceBridge.getInstance().clearBrowsingData(this, dataTypes, timePeriod); } } private void dismissProgressDialog() { if (mProgressDialog != null && mProgressDialog.isShowing()) { mProgressDialog.dismiss(); } mProgressDialog = null; } /** * Returns the Array of dialog options. Options are displayed in the same * order as they appear in the array. */ private DialogOption[] getDialogOptions() { return new DialogOption[] { DialogOption.CLEAR_HISTORY, DialogOption.CLEAR_COOKIES_AND_SITE_DATA, DialogOption.CLEAR_CACHE, DialogOption.CLEAR_PASSWORDS, DialogOption.CLEAR_FORM_DATA }; } /** * Returns the Array of time periods. Options are displayed in the same order as they appear * in the array. */ private TimePeriodSpinnerOption[] getTimePeriodSpinnerOptions() { Activity activity = getActivity(); TimePeriodSpinnerOption[] options = new TimePeriodSpinnerOption[] { new TimePeriodSpinnerOption(TimePeriod.LAST_HOUR, activity.getString(R.string.clear_browsing_data_period_hour)), new TimePeriodSpinnerOption(TimePeriod.LAST_DAY, activity.getString(R.string.clear_browsing_data_period_day)), new TimePeriodSpinnerOption(TimePeriod.LAST_WEEK, activity.getString(R.string.clear_browsing_data_period_week)), new TimePeriodSpinnerOption(TimePeriod.FOUR_WEEKS, activity.getString(R.string.clear_browsing_data_period_four_weeks)), new TimePeriodSpinnerOption(TimePeriod.ALL_TIME, activity.getString(R.string.clear_browsing_data_period_everything))}; return options; } /** * Decides whether a given dialog option should be selected when the dialog is initialized. * @param option The option in question. * @return boolean Whether the given option should be preselected. */ private boolean isOptionSelectedByDefault(DialogOption option) { return PrefServiceBridge.getInstance().getBrowsingDataDeletionPreference( option.getDataType()); } /** * Called when clearing browsing data completes. * Implements the ChromePreferences.OnClearBrowsingDataListener interface. */ @Override public void onBrowsingDataCleared() { if (getActivity() == null) return; // If the user deleted their browsing history, the dialog about other forms of history // is enabled, and it has never been shown before, show it. Note that opening a new // DialogFragment is only possible if the Activity is visible. // // If conditions to show the dialog about other forms of history are not met, just close // this preference screen. if (MultiWindowUtils.isActivityVisible(getActivity()) && getSelectedOptions().contains(DialogOption.CLEAR_HISTORY) && mIsDialogAboutOtherFormsOfBrowsingHistoryEnabled && !OtherFormsOfHistoryDialogFragment.wasDialogShown(getActivity())) { mDialogAboutOtherFormsOfBrowsingHistory = new OtherFormsOfHistoryDialogFragment(); mDialogAboutOtherFormsOfBrowsingHistory.show(getActivity()); dismissProgressDialog(); RecordHistogram.recordBooleanHistogram(DIALOG_HISTOGRAM, true); } else { dismissProgressDialog(); getActivity().finish(); RecordHistogram.recordBooleanHistogram(DIALOG_HISTOGRAM, false); } } /** * Returns if we should show the important sites dialog. We check to see if * <ol> * <li>We've fetched the important sites, * <li>there are important sites, * <li>the feature is enabled, and * <li>we have cache or cookies selected. * </ol> */ private boolean shouldShowImportantSitesDialog() { if (!ChromeFeatureList.isEnabled(ChromeFeatureList.IMPORTANT_SITES_IN_CBD)) return false; EnumSet<DialogOption> selectedOptions = getSelectedOptions(); if (!selectedOptions.contains(DialogOption.CLEAR_CACHE) && !selectedOptions.contains(DialogOption.CLEAR_COOKIES_AND_SITE_DATA)) { return false; } boolean haveImportantSites = mSortedImportantDomains != null && mSortedImportantDomains.length != 0; RecordHistogram.recordBooleanHistogram( "History.ClearBrowsingData.ImportantDialogShown", haveImportantSites); return haveImportantSites; } @Override public boolean onPreferenceClick(Preference preference) { if (preference.getKey().equals(PREF_CLEAR_BUTTON)) { if (shouldShowImportantSitesDialog()) { showImportantDialogThenClear(); return true; } // If sites haven't been fetched, just clear the browsing data regularly rather than // waiting to show the important sites dialog. clearBrowsingData(getSelectedOptions(), null, null, null, null); return true; } return false; } @Override public boolean onPreferenceChange(Preference preference, Object value) { if (preference.getKey().equals(PREF_TIME_RANGE)) { // Inform the items that a recalculation is going to happen as a result of the time // period change. for (Item item : mItems) { item.setShouldAnnounceCounterResult(false); } PrefServiceBridge.getInstance().setBrowsingDataDeletionTimePeriod( ((TimePeriodSpinnerOption) value).getTimePeriod()); return true; } return false; } /** * Disable the "Clear" button if none of the options are selected. Otherwise, enable it. */ private void updateButtonState() { ButtonPreference clearButton = (ButtonPreference) findPreference(PREF_CLEAR_BUTTON); if (clearButton == null) return; boolean isEnabled = !getSelectedOptions().isEmpty(); clearButton.setEnabled(isEnabled); } @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); RecordUserAction.record("ClearBrowsingData_DialogCreated"); mMaxImportantSites = PrefServiceBridge.getMaxImportantSites(); PrefServiceBridge.getInstance().requestInfoAboutOtherFormsOfBrowsingHistory(this); getActivity().setTitle(R.string.clear_browsing_data_title); addPreferencesFromResource(R.xml.clear_browsing_data_preferences); DialogOption[] options = getDialogOptions(); mItems = new Item[options.length]; for (int i = 0; i < options.length; i++) { boolean enabled = true; // It is possible to disable the deletion of browsing history. if (options[i] == DialogOption.CLEAR_HISTORY && !PrefServiceBridge.getInstance().canDeleteBrowsingHistory()) { enabled = false; PrefServiceBridge.getInstance().setBrowsingDataDeletionPreference( DialogOption.CLEAR_HISTORY.getDataType(), false); } mItems[i] = new Item( this, options[i], (ClearBrowsingDataCheckBoxPreference) findPreference(options[i].getPreferenceKey()), isOptionSelectedByDefault(options[i]), enabled); } // Not all checkboxes defined in the layout are necessarily handled by this class // or a particular subclass. Hide those that are not. EnumSet<DialogOption> unboundOptions = EnumSet.allOf(DialogOption.class); unboundOptions.removeAll(Arrays.asList(getDialogOptions())); for (DialogOption option : unboundOptions) { getPreferenceScreen().removePreference(findPreference(option.getPreferenceKey())); } // The time range selection spinner. SpinnerPreference spinner = (SpinnerPreference) findPreference(PREF_TIME_RANGE); spinner.setOnPreferenceChangeListener(this); TimePeriodSpinnerOption[] spinnerOptions = getTimePeriodSpinnerOptions(); int selectedTimePeriod = PrefServiceBridge.getInstance().getBrowsingDataDeletionTimePeriod(); int spinnerOptionIndex = -1; for (int i = 0; i < spinnerOptions.length; ++i) { if (spinnerOptions[i].getTimePeriod() == selectedTimePeriod) { spinnerOptionIndex = i; break; } } assert spinnerOptionIndex != -1; spinner.setOptions(spinnerOptions, spinnerOptionIndex); // The "Clear" button. ButtonPreference clearButton = (ButtonPreference) findPreference(PREF_CLEAR_BUTTON); clearButton.setOnPreferenceClickListener(this); clearButton.setShouldDisableView(true); // The general information footnote informs users about data that will not be deleted. // If the user is signed in, it also informs users about the behavior of synced deletions. // and we show an additional Google-specific footnote. This footnote informs users that they // will not be signed out of their Google account, and if the web history service indicates // that they have other forms of browsing history, then also about that. TextMessageWithLinkAndIconPreference google_summary = (TextMessageWithLinkAndIconPreference) findPreference(PREF_GOOGLE_SUMMARY); TextMessageWithLinkAndIconPreference general_summary = (TextMessageWithLinkAndIconPreference) findPreference(PREF_GENERAL_SUMMARY); google_summary.setLinkClickDelegate(new Runnable() { @Override public void run() { new TabDelegate(false /* incognito */).launchUrl( WEB_HISTORY_URL, TabLaunchType.FROM_CHROME_UI); } }); general_summary.setLinkClickDelegate(new Runnable() { @Override public void run() { HelpAndFeedback.getInstance(getActivity()).show( getActivity(), getResources().getString(R.string.help_context_clear_browsing_data), Profile.getLastUsedProfile(), null); } }); if (ChromeSigninController.get(getActivity()).isSignedIn()) { general_summary.setSummary( R.string.clear_browsing_data_footnote_sync_and_site_settings); } else { getPreferenceScreen().removePreference(google_summary); general_summary.setSummary(R.string.clear_browsing_data_footnote_site_settings); } if (ChromeFeatureList.isEnabled(ChromeFeatureList.IMPORTANT_SITES_IN_CBD)) { PrefServiceBridge.fetchImportantSites(this); } } @Override public void onActivityCreated(Bundle savedInstanceState) { super.onActivityCreated(savedInstanceState); // Now that the dialog's view has been created, update the button state. updateButtonState(); // Remove the dividers between checkboxes. ((ListView) getView().findViewById(android.R.id.list)).setDivider(null); } @Override public void onDestroy() { super.onDestroy(); dismissProgressDialog(); for (Item item : mItems) { item.destroy(); } } // We either show the dialog, or modify the current one to display our messages. This avoids // a dialog flash. private final void showProgressDialog() { if (getActivity() == null) return; mProgressDialog = ProgressDialog.show(getActivity(), getActivity().getString(R.string.clear_browsing_data_progress_title), getActivity().getString(R.string.clear_browsing_data_progress_message), true, false); } @VisibleForTesting ProgressDialog getProgressDialog() { return mProgressDialog; } @VisibleForTesting ConfirmImportantSitesDialogFragment getImportantSitesDialogFragment() { return mConfirmImportantSitesDialog; } /** * This method shows the important sites dialog. After the dialog is shown, we correctly clear. */ private void showImportantDialogThenClear() { mConfirmImportantSitesDialog = ConfirmImportantSitesDialogFragment.newInstance( mSortedImportantDomains, mSortedImportantDomainReasons, mSortedExampleOrigins); mConfirmImportantSitesDialog.setTargetFragment(this, IMPORTANT_SITES_DIALOG_CODE); mConfirmImportantSitesDialog.show( getFragmentManager(), ConfirmImportantSitesDialogFragment.FRAGMENT_TAG); } @Override public void showNoticeAboutOtherFormsOfBrowsingHistory() { if (getActivity() == null) return; TextMessageWithLinkAndIconPreference google_summary = (TextMessageWithLinkAndIconPreference) findPreference(PREF_GOOGLE_SUMMARY); if (google_summary == null) return; google_summary.setSummary( R.string.clear_browsing_data_footnote_signed_and_other_forms_of_history); } @Override public void enableDialogAboutOtherFormsOfBrowsingHistory() { if (getActivity() == null) return; mIsDialogAboutOtherFormsOfBrowsingHistoryEnabled = true; } /** * Used only to access the dialog about other forms of browsing history from tests. */ @VisibleForTesting OtherFormsOfHistoryDialogFragment getDialogAboutOtherFormsOfBrowsingHistory() { return mDialogAboutOtherFormsOfBrowsingHistory; } @Override public void onImportantRegisterableDomainsReady( String[] domains, String[] exampleOrigins, int[] importantReasons) { if (domains == null) return; // mMaxImportantSites is a constant on the C++ side. While 0 is valid, use 1 as the minimum // because histogram code assumes a min >= 1; the underflow bucket will record the 0s. RecordHistogram.recordLinearCountHistogram("History.ClearBrowsingData.NumImportant", domains.length, 1, mMaxImportantSites + 1, mMaxImportantSites + 1); mSortedImportantDomains = Arrays.copyOf(domains, domains.length); mSortedImportantDomainReasons = Arrays.copyOf(importantReasons, importantReasons.length); mSortedExampleOrigins = Arrays.copyOf(exampleOrigins, exampleOrigins.length); } /** * This is the callback for the important domain dialog. We should only clear if we get the * positive button response. */ @Override public void onActivityResult(int requestCode, int resultCode, Intent data) { if (requestCode == IMPORTANT_SITES_DIALOG_CODE && resultCode == Activity.RESULT_OK) { // Deselected means that the user is excluding the domain from being cleared. String[] deselectedDomains = data.getStringArrayExtra( ConfirmImportantSitesDialogFragment.DESELECTED_DOMAINS_TAG); int[] deselectedDomainReasons = data.getIntArrayExtra( ConfirmImportantSitesDialogFragment.DESELECTED_DOMAIN_REASONS_TAG); String[] ignoredDomains = data.getStringArrayExtra( ConfirmImportantSitesDialogFragment.IGNORED_DOMAINS_TAG); int[] ignoredDomainReasons = data.getIntArrayExtra( ConfirmImportantSitesDialogFragment.IGNORED_DOMAIN_REASONS_TAG); if (deselectedDomains != null && mSortedImportantDomains != null) { // mMaxImportantSites is a constant on the C++ side. RecordHistogram.recordCustomCountHistogram( "History.ClearBrowsingData.ImportantDeselectedNum", deselectedDomains.length, 1, mMaxImportantSites + 1, mMaxImportantSites + 1); RecordHistogram.recordCustomCountHistogram( "History.ClearBrowsingData.ImportantIgnoredNum", ignoredDomains.length, 1, mMaxImportantSites + 1, mMaxImportantSites + 1); // We put our max at 20 instead of 100 to reduce the number of empty buckets (as // our maximum denominator is 5). RecordHistogram.recordEnumeratedHistogram( "History.ClearBrowsingData.ImportantDeselectedPercent", deselectedDomains.length * IMPORTANT_SITES_PERCENTAGE_BUCKET_COUNT / mSortedImportantDomains.length, IMPORTANT_SITES_PERCENTAGE_BUCKET_COUNT + 1); RecordHistogram.recordEnumeratedHistogram( "History.ClearBrowsingData.ImportantIgnoredPercent", ignoredDomains.length * IMPORTANT_SITES_PERCENTAGE_BUCKET_COUNT / mSortedImportantDomains.length, IMPORTANT_SITES_PERCENTAGE_BUCKET_COUNT + 1); } clearBrowsingData(getSelectedOptions(), deselectedDomains, deselectedDomainReasons, ignoredDomains, ignoredDomainReasons); } } }
{ "redpajama_set_name": "RedPajamaGithub" }
4,845
Real World Los Angeles' Jon Brennan, 47, Is Still a Virgin -- And Doesn't Want to Talk About It Television By TooFab Staff | 12/8/2021 12:00 AM PT "When I pivot away from it, don't keep egging it on. All of you, just drop it!" he said to Beth S. on the latest episode of "Homecoming." The sex lives of the roommates on "The Real World" have always been a topic of conversation -- but when it comes to Jon Brennan's lack of experience in that department, it's not exactly a conversation he wants to have. On Wednesday's new episode of "The Real World Homecoming: Los Angeles," Brennan, 47, opened up about becoming a youth pastor and overall "mentor to young people" since his time on his original season. "I don't drink alcohol, I don't live a promiscuous life, I try not to give into desires of the flesh," he explained in a confessional. "To people who say, 'Aren't you like missing something?' No, I'm surrounded by people all the time, I'm a people person, I'm pretty happy." Real World: Homecoming Stars Concerned Over David's Drinking: 'David's Volatile, He's a Mess' During a conversation with Tami Roman, David Edwards and Beth Stolarczyk about what they've been doing in the years since they last saw each other, Tami asked Jon when he'll get married -- with Brenann saying he's good where he is. She then asked if he was still a virgin, with Jon answering affirmatively. "I follow the Lord, whole heartedly. I'm all in. I'm all in," he added. While Tami said she didn't understand his decision to remain abstinent in the '90s, she explained she gets it now -- thanks to her own 27-year-old daughter who's saving herself for marriage. Making a joke, Beth S. asked if Tami's daughter was into country music -- as in, maybe she'd be a good fit for Jon. "You're gonna leave this world and never experience no coochie? You're just gonna go out of the world with no coochie whatsoever?" asked a shocked Tami after Jon said he'd probably never get married. "That's not a large priority at this stage," he replied, starting to get uncomfortable with the conversation. "It's really tacky to talk about the details of your sex life on a reality show. It's the same story 28 years later," he added in a confessional, before the talk continued. MTV/Paramount+ Beth Stolarczyk Teases Tami & David 'Real World Homecoming' Drama, Pitches All Stars Season (Exclusive) "I would like for you to get some p---- before you die," Tami went on. "If I had my druthers, I feel it's something everybody should experience once, at least." Jon and Beth eventually got up and walked away, with Jon calling out Beth for "egging" the conversation on with her comment about Tami's daughter. "When I pivot away from it, don't keep egging it on. All of you, just drop it," he told her, clearly upset. Later, Tami apologized to Jon for making a comment which "may have appeared to be insensitive" to him. Telling him he's "so much more than that piece" of his life, she said she was sorry. Explaining that he teaches abstinence to the youth he mentors, Jon said, "So when you say, 'Hey you're kinda old and you're not married, tell me about your sex life,' well, I don't have one. Married people don't like single people and they don't like single people to remain single. I'm through that, I'm 47 now." Tami & David Attempted to Hash Out '93 Fight on Real World Homecoming and It Didn't Go Well He then joked that he's good at pivoting the conversation, something Tami didn't want to see happen again. "See Jon, that's that s--- I don't like. I thought we were all here to really have conversations. He's censoring people's thoughts, reactions that will inevitably create dialogue," she said, accusing him of shutting down something that could be a productive, on-camera talking point. "Why does it have to be a sidebar conversation? That's when it feels shady and manipulative." But Jon was able to explain himself out of that corner. "When I'm done here, I go back to the people that I mentor," he reminded them all. "So I gotta really ask myself, 'Is this conversation one where the parents are gonna say, 'You're not gonna mentor my kids.'" Tami said she could 100% respect that and just wished he had said that in the beginning. He also agreed to be a little more open to sharing going forward. New episodes of "Real World Homecoming: Los Angeles" drop Wednesdays on Paramount+. Stars Open Up About Losing Their V-Card View Photos #RealWorld
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
1,915
Q: Saving conexion in Java EE's Session .vs. a connection pool I know that Java EE's session object can store complex objects, like a connection to a database. I'm pondering how to implement a certain application for a programming practice, made with Java EE. My first option to use a connection pool, which is very easy with Java EE. I'm wondering, out of curiosity, and also to properly justify the decision, what are the pros and cons of creating a connection to the database any time a client starts a session and storing it there, against the use of a connection pool. Thanks a lot. A: A resource pool will optimise the handling of the resource (your database connection) in a way your system can cope with it. Even though you can end up out of resources if you have a lot of opened connections. That is more likely to happen if you store your database connection in the session context. Web applications don't need to be connected all the time to a database, that connection can be stablished at the beginning of a new operation and closed at the end. Using a resource pool you return your connection back to the pool when you no longer need it, so a new user (session in the web paradigm) can use that resource you have already released instead of creating a new one. The pool will also handle the scenario in which some resources have been idle for a long time (no one has used them in a specific amount of time) and then it will release those resources. When storing a database connection in the session you are never releasing the resource but keeping a permanent reference to it that will last as long as the user session does. You may not face any issues in a short time with that, specially if there are really few users connected at the same time. But in real world applications you will definitively find them. Thus, storing a database connection in the session context is considered as a bad practice. EDIT: I forgot to mention that should only store Serializable objects in the session so, if the application server decides to passivate a session, it can be persisted and restored when the application server decides to reactivate it. A database connection is not a Serializable resource. A: Using a connection pool allows you maximize the usability of your connections. This means less connections = less memory = less sockets etc. The reason a pool is better than saving in a session is what happens if someone drops out unexpectedly? If you have a connection in your session, you risk keeping that connection alive for a long time, possibly indefinitely.
{ "redpajama_set_name": "RedPajamaStackExchange" }
4,856
Powiat Goesan (kor. 괴산군, Goesan-gun) znajduje się w prowincji Chungcheong Północny w Korei Południowej. Symbole Ptak - Sroka - symbolizuje pełnych współczucia mieszkańców powiatu Kwiat - Niecierpek Drzewo - Zelkova (spokrewnione z europejskimi wiązami) - reprezentuje patriotyzm i wierność Gospodarka W powiecie uprawia się żeńszeń, jabłka, koreańską odmianę gruszki, ryż, fasolę, sezam, grzyby Songi, kukurydzę, ziemniaki. Produkuje się m.in. kimchi oraz pastę z soi doenjang. Warto zobaczyć Ciekawe formacje skalne Hwayang Gugok Seonyu Gugok Ssanggok Gugok Gallon Gugok Wodospad Suokjung Góra Joryeongsan Maebuljwasang - posągi Buddy Mae Seokjobirosanabuljwasang - kamienny posąg Buddy Birosana Drzewa Miseon w Seongdeok-ri, Chujeom-ri, Yulji-ri - występują tylko na terenie Korei Świątynia Gakyeon (Gakyeonsa) Świątynia Gongrim (Gongrimsa) Święte miejsce katolików Yeongpungseongji - w czasie prześladowań religijnych w 1801 roku poniosło tu śmierć wielu misjonarzy katolickich Festiwal Goesan - odbywający się co roku w październiku Warto także skosztować lokalnych dań: Ssogari Maeuntang - zupa z ryby Ssogari z papryką, bardzo ostra, popularna szczególnie latem, Olgaengiguk - zupa ze ślimaków bagiennych z ziołami. Linki zewnętrzne Strona administracji powiatu Powiaty w prowincji Chungcheong Północny
{ "redpajama_set_name": "RedPajamaWikipedia" }
733
import React from 'react' import ReactDOM from 'react-dom' import { Provider } from 'react-redux' import store from './store' import Routes from './config/routes' import firebase from 'firebase/app'; require("firebase/auth"); require("firebase/database"); // Initialize Firebase firebase.initializeApp({ apiKey: "AIzaSyBUUoC2BwaqI1JFCSyTAPsJ9RPHFKhYAC4", authDomain: "click-or-be-clicked.firebaseapp.com", databaseURL: "https://click-or-be-clicked.firebaseio.com", storageBucket: "click-or-be-clicked.appspot.com", messagingSenderId: "448383551461" }); ReactDOM.render(<Provider store={store}> <Routes /> </Provider>, document.getElementById('app') );
{ "redpajama_set_name": "RedPajamaGithub" }
291
Q: Can not access Google Cloud Instance I am facing the following error while getting into Google Cloud Instance using the serial port. When I run this command, it starts throwing the error. gcloud compute connect-to-serial-port instance-1 This is the error: Sep 20 14:28:35 instance-1 OSConfigAgent[670]: 2022-09-20T14:28:35.5396Z OSConfigAgent Error main.go:196: network error when requesting metadata, make sure your instance has an active network and can reach the metadata server: Get http://169.254.169.254/computeMetadata/v1/?recursive=true&alt=json&wait_for_change=true&last_etag=b6d33d232458e45a&timeout_sec=60: dial tcp 169.254.169.254:80: connect: network is unreachable Sep 20 14:29:33 instance-1 OSConfigAgent[670]: 2022-09-20T14:29:33.5432Z OSConfigAgent Warning: Error waiting for task (attempt 10 of 10): error fetching Instance IDToken: error getting token from metadata: Get http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/identity?audience=osconfig.googleapis.com&format=full: dial tcp 169.254.169.254:80: connect: network is unreachable I am also unable to access the instance using external IP, and the SSH does not work either. SSH throws the following error: These are my network rules. I don't want to restart my instance as I have a job running in Jenkinwhich will destroy the whole day's runtime. A: From your error message "Connection via Cloud Identity-Aware Proxy Failed" these error occur when you try to use SSH when connecting to a VM that doesn't have a public IP address for which you haven't configured Identity-Aware Proxy on port 22. you can Create a firewall rule on port 22 that allows ingress traffic from Identity-Aware Proxy. Also from what @John Hanley suggested to check your VM if it have a service account.
{ "redpajama_set_name": "RedPajamaStackExchange" }
799
Fulfill your friends or yourself a childhood dream! Once to be a pilot himself and the control take over - We will help you to fulfill this dream. The trial flight "helicopter flying yourself" leaves nothing to be desired. The flight lesson is similar to that of a helicopter training. The passenger takes personally a seat on the pilot's seat and is made familiar by a professional instructor with the helicopter on double tax. When do you make a helicopter flight from Franfurt Aschaffenburg? Nearby lies the city of Aschaffenburg with its magnificent sights such as Castle Johannisburg, Pompejanum, collegiate and park Schönbusch . But also the view of the 1,200 years old Großostheim, only 2 km from the airport away, is worth it! Enjoy the region and accept the control of the helicopter!
{ "redpajama_set_name": "RedPajamaC4" }
4,047
Magstatt-le-Haut je občina v departmaju Haut-Rhin francoske regije Alzacija. Leta 1990 je v občini živelo 210 oseb oz. 54 oseb/km². Zunanje povezave Vir: Insee Zemljevidi in satelitski posnetki: Občine departmaja Haut-Rhin
{ "redpajama_set_name": "RedPajamaWikipedia" }
5,427
Track is the future in hospitality software. Your people will close more business, work faster, provide better customer service and increase ROI across all departments. Track Delivers. Supercharge your reservations with lightning fast bookings, and the industry's only built-in call tracking & optimization system, saving companies up to 80% on their current call tracking solution. Track Integrates. Connect Track to leading applications in the marketplace. Our API allows you to connect familiar systems for accounting, email marketing and more. Track Markets. Reach over 50 million travelers per month. Collect leads and automate marketing responses to find the best leads, follow up with past customers and close more business. Track Books. When it comes to converting online bookings, design matters. The responsive Track Booking Engine works on all devices, drives conversions and increases online revenues by delivering a superior user experience. Track Optimizes. Each Track customer receives a newly designed, customized, responsive website with modern, high performance search engine optimization for maximum visibility and conversion across all devices. Track Tracks. Track is the only PM System in the world that includes call tracking, agent phone reporting, automated call disposition and campaign logs standard. Find out which ads work and which don't. Which agents close and which don't. Track Stays In Touch. Track features a guest mobile web app that allows guests to check-in, check-out, add nights, manage their reservation, submit housekeeping, & receive maintenance messages from you and more via phone, tablet or desktop. Track is an all-in-one solution for your business. We give you everything you need in one cloud based system.
{ "redpajama_set_name": "RedPajamaC4" }
4,004
NOTICE is hereby given that a REGULAR meeting of COMMISSIONERS COURT will be held on Thursday, September 06, 2018 at 8:30 AM in the Commissioners Courtroom, 100 Main Plaza, New Braunfels, Texas. You may visit our website at www.co.comal.tx.us for agenda postings. 1. Discuss and consider approval of a line item transfer in Administrative Costs, FROM: Contingency - Personnel, TO: Unemployment Compensation to provide for unemployment compensation reserve balance. 3. Proclamation Naming September 2018 as Hunger Action Month in Comal County. 4. Proclamation Recognizing September 10-14, 2018 as Suicide Prevention Week in Comal County. 2. Presentation of the 2018 Annual Achievement of Excellence in Procurement Award to the Comal County Purchasing Department. 7. Discuss and consider approval of Roadway Material Transfer under the Local Government Assistance Program between Comal County and the Texas Department of Transportation, and authorize the County Judge to sign. 8. Discuss and consider approval of Yates/Sundt, Joint Venture request to utilize a portion of the Construction Manager-at-Risk's Contingency pursuant to the agreement between Comal County and Yates/Sundt, Joint Venture for the Comal County Jail Construction and the Sheriff's Office Renovation project and the associated Jail Guaranteed Maximum Price Proposal. 9. Discuss and consider approval of Event Site License Agreement between Comal County, Texas, and National Multiple Sclerosis Society, and authorize County Judge to sign. 10. Discuss and consider approval of the Texas Association of Counties Health and Employee Benefits Pool 2018-2019 Administrative Services Agreement and Stop Loss Agreement, and authorize County Judge to sign. 11. Discuss and consider approval of the 2018-2019 Ameriflex Administrative Services Agreement and authorize County Judge to sign. 12. Discuss and consider approval of Memorandum of Understanding between Texas A&M AgriLife Extension Service and Comal County pertaining to Information Technology Services, and authorize County Judge to sign. 13. Discuss and consider approval of Order of Appointment of Election Judges and Alternates for a two-year term to begin September 1, 2018. 14. Discuss and consider setting the maximum number of clerks for each Election Day polling location for the November 6, 2018 General Election. 15. Discuss and consider approval of Interlocal Agreement for Enforcement of Comal County Fire Code between Comal County and the City of Spring Branch, and authorize County Judge to sign. 16. Discuss and consider approval of Interlocal Cooperation Agreement between Comal County and the City of Spring Branch for Animal Control Services, and authorize County Judge to sign. 17. Discuss and consider approval of Interlocal Cooperation Agreement by and between McLennan County, Texas and Comal County, Texas for inmate housing and authorize County Judge to sign. 18. Discuss and consider approval of a line item transfer in Sheriff's Office, FROM: Training, TO: Continuing Education and Travel to cover expenses for the remainder of 2018. 19. Discuss and consider approval of a line item transfer in Juvenile Probation, FROM: Contingency, TO: Laboratory Testing and/or Witness Expense to cover anticipated cost for drug testing kits and laboratory confirmation for the remainder of 2018. 20. Discuss and consider approval of a line item transfer in Road & Bridge, FROM: Operations-Non-capital Equipment, High Water Warning System, Herbicide, Traffic Control Devices and Parts & Repairs, TO: Operations-Contract Services, Training, Building/Facilities maintenance and Administration-Utilities to cover remaining 2018 expenditures. 21. Discuss and consider approval of Order #417 - Disposition of Salvage and Surplus Property. 22. Discuss and consider approval of short term Food Services Contract for the Comal County Jail with Trinity Services Group and authorize Purchasing Director to execute all related documents. 23. Discuss and consider approval of Supplemental Agreement I to Bid 2017-1065 Cement & Flowable Backfill and authorize Purchasing Director to execute all related documents. 24. Discuss and consider approval of Supplemental Agreement to Bid 2017-1101 HVAC Repair/Replacement Services and authorize Purchasing Director to execute all related documents.
{ "redpajama_set_name": "RedPajamaC4" }
7,818
Q: Descent direction with symmetrically positive definite hessian matrix and $∇f(x_{k})=0$ with $Hf(xk)d_{k}=−∇f(x_{k})$ I have the following problem which I can't figure out how to solve it. Let $f:R^n→R$ be twice continuously differentiable. Furthermore, let $d_{k}$ denote the solution of the Newton equation $Hf(xk)d_{k}=−∇f(x_{k})$ for the Hessian matrix $Hf(x_{k})$ and the gradient $∇f(x_{k})$. I must show that $d_{k}$ defines a descent direction at $f$ at $x_{k}$ if $Hf(x_{k})$ is symmetrically positive definite and $∇f(x_{k})=0$. Can someone help me? A: We say that $d_k$ is a descent direction if $\langle d_k,\nabla f(x_k)\rangle\leq 0$, this is because a Taylor expansion yields, $$ f(x_k+d_k) = f(x_k) + \langle d_k,\nabla f(x_k)\rangle + o(\Vert d_k\Vert),$$ thus up to a first-order approximation (which must be treated carefully), $f(x_k+d_k) \leq f(x_k)$, if $\langle d_k,\nabla f(x_k)\rangle\leq 0$. In your case, $ \langle d_k,\nabla f(x_k)\rangle = - \langle H_f(x_k)\nabla f(x_k),\nabla f(x_k)\rangle$. This quantity is (strictly) negative if $H_f(x_k)$ is (definite) positive, by definition of a positive matrix. If you want to convince yourself of that, you can diagonalize $H_f(x_k)$ in an orthogonal basis.
{ "redpajama_set_name": "RedPajamaStackExchange" }
335
{"url":"http:\/\/community.boredofstudies.org\/238\/extracurricular-higher-level\/346341\/interesting-mathematical-statements.html","text":"1. ## Interesting mathematical statements\n\n$\\noindent Leave an intriguing mathematical statement for all of us to be flabbergasted by.$\n\n$\\noindent I'll start.$\n\n$\\lim_{x \\rightarrow \\infty} e^{e^{e^{\\left ( x+e^{-(a + x + e^x + e^{e^x})} \\right )}}} - e^{e^{e^x}} \\equiv e^{-a}$\n\n$\\noindent For all values of \"a\".$\n\n2. ## Re: Interesting mathematical statements\n\nExcept why is this in non school lol\n\n3. ## Re: Interesting mathematical statements\n\nCollatz conjecture\n\n$\\\\ Let the sequence { T }_{ n },{ T }_{ n+1 },\\dots be defined such that \\\\{ T }_{ k } \\in \\mathbb {Z}^{+} \\, \\forall \\, k \\in \\mathbb N \\\\ where { T }_{ n+1 } = \\left\\{ \\begin{matrix} 3{ T }_{ n }+1 \\\\ { T }_{ n } \/ 2 \\end{matrix} \\right if { T }_{ n } is \\begin{matrix} odd \\\\ even \\end{matrix} \\\\ The sequence always converges into 4, 2, 1.$\n\n4. ## Re: Interesting mathematical statements\n\nOriginally Posted by leehuan\nExcept why is this in non school lol\nWell it's not exactly a question asking thread, and it's more for entertainment.\n\n$Sophomore's Dream$\n\n$\\int_0^1 \\frac{dx}{x^x} = \\sum_{k=0}^\\infty \\frac{1}{k^k}$\n\n5. ## Re: Interesting mathematical statements\n\nOriginally Posted by leehuan\nCollatz conjecture\n\n$\\\\ Let the sequence { T }_{ n },{ T }_{ n+1 },\\dots be defined such that \\\\{ T }_{ k } \\in \\mathbb {Z}^{+} \\, \\forall \\, k \\in \\mathbb N \\\\ where { T }_{ n+1 } = \\left\\{ \\begin{matrix} 3{ T }_{ n }+1 \\\\ { T }_{ n } \/ 2 \\end{matrix} \\right if { T }_{ n } is \\begin{matrix} odd \\\\ even \\end{matrix} \\\\ The sequence always converges into 4, 2, 1.$\nThe sequence is conjectured to always diverge to 1\n\n6. ## Re: Interesting mathematical statements\n\nWell it's not exactly a question asking thread, and it's more for entertainment.\nYou could always have posted it under just maths then or extracurricular\n\nFermat's Last Theorem\n\n$\\\\ Consider the equation { a }^{ n }+{ b }^{ n }={ c }^{ n } \\quad \\forall \\, \\left\\{ a,b,c,n \\right\\} \\in \\mathbb Z. \\\\ If one restricts that \\left| n \\right| >2, then we have no solutions.$\n\n7. ## Re: Interesting mathematical statements\n\nOriginally Posted by KingOfActing\nThe sequence is conjectured to always diverge to 1\nI know lol. This is one of the biggest mind gobbling problems to pure mathematicians apparently; WHY?\n\n8. ## Re: Interesting mathematical statements\n\nOriginally Posted by leehuan\nI know lol. This is one of the biggest mind gobbling problems to pure mathematicians apparently; WHY?\nThe thing is, we don't even know how to begin to approach this problem. Paul Erdos has already commented on this problem.\nLooks like we'll just have to wait for the next\nEuler\/Erdos\/Tao\/Gauss\/Noether\/Polya\/Hilbert\/Russell\/Lagrange\/Riemann\/Hardy\/Poincare\/Fermat\/Grothendieck\/Newton\/Leibniz\/Weierstrass\/Cauchy\/Descartes\/Dirichlet\/Cantor\/Fibonacci\/Jacobi\/Ramanujan\/Hamilton\/Godel\/Pascal\/Apollonius\/Laplace\/Liouville\/Eisenstein\/Banach\/Peano\/Bernoulli\/Viete\/Fourier\/Huygens\/Chebyshev\/Lebesgue\/Turing\/Cardano\/Minkowski\/Littlewood\/Legendre\/Birkhoff\/Lambert\/Poisson\/Wallis\/Tarski\/Frege\/Hausdorff\/Neumann\/Galois\nto come around and resolve the problem.\n\n$\\infty ! = \\sqrt{2\\pi}$\n\n9. ## Re: Interesting mathematical statements\n\n$\\infty ! = \\sqrt{2\\pi}$\nOk link me the proof lol\n\nI'm worried about a 1+2+3+4+...=-1\/12 here\n\n10. ## Re: Interesting mathematical statements\n\nIf you have a small ball in 3 dimensional space, it is possible to decompose it as a union of a finite number of sets, which can be moved by rotations and translations such that the pieces never overlap and such that the final object constructed is an arbitrarily large ball.\n\nColloquially, one can cut a pea into a finite number of pieces and reassemble it into something the size of the sun.\n\n11. ## Re: Interesting mathematical statements\n\nOriginally Posted by leehuan\nOk link me the proof lol\n\nI'm worried about a 1+2+3+4+...=-1\/12 here\nIt's another zeta regularisation, I'm pretty sure.\n\n1+1=2\n\namazing\n\n14. ## Re: Interesting mathematical statements\n\nCan we keep our posts restrained to at least MX2 level and not making bad usages of mathematics lmao\n\n15. ## Re: Interesting mathematical statements\n\nZeta regularisations are important\n\n1 + 2 + 3 + ... =\/= -1\/12, but is rather 'assigned' that value\n\n-1 x -1 = 2\n\n17. ## Re: Interesting mathematical statements\n\nOriginally Posted by leehuan\nOk link me the proof lol\n\nI'm worried about a 1+2+3+4+...=-1\/12 here\n$\\noindent Your fear is somewhat correct, as I will now pull out the Riemann Zeta Function, which is used in the proof of \\sum_{k=1}^\\infty k = -\\frac{1}{12}$\n\nOriginally Posted by KingOfActing\nIt's another zeta regularisation, I'm pretty sure.\nCorrect, although I don't actually fully understand regularisation yet.\n\n$\\zeta(s) = \\sum_{k=1}^\\infty k^{-s}$\n\n$\\noindent The derivative of the zeta function evaluated at s = 0 is -\\frac{1}{2}\\log(2\\pi). I am still scouring the internet for a proof of this fact, as I cannot seem to find a proof of it anywhere.$\n\n$\\noindent Differentiating with respect to s, we have: \\zeta'(s) = \\sum_{k=1}^\\infty (-\\log{k}) k^{-s} \\Rightarrow \\Rightarrow \\Rightarrow \\Rightarrow \\zeta'(0) = -\\sum_{k=1}^\\infty \\log{k}$\n\n$\\noindent Combining the above, we have: -\\sum_{k=1}^\\infty \\log{k} = -\\log{\\sqrt{2\\pi}} \\Rightarrow \\Rightarrow \\Rightarrow \\Rightarrow \\Rightarrow \\Rightarrow \\Rightarrow \\Rightarrow \\sum_{k=1}^\\infty \\log{k} = \\log{\\sqrt{2\\pi}}$\n\n$\\log{1} + \\log{2} + \\log{3} + \\dots + \\log{\\infty} = \\log{\\infty !} = \\log{\\sqrt{2\\pi}}$\n\n$\\therefore \\infty ! = \\sqrt{2\\pi}}$\n\n18. ## Re: Interesting mathematical statements\n\nI like the 1 + 2 + 3 + 4 + \u2026 = -1\/12 result, and find it also quite amazing that this is used in physics and gives some experimentally verifiable results. There's a lot of 'weird' stuff like this in this series of lectures on Mathematical Physics by Carl Bender that can be found on YouTube.\n\nAlso, I think this thread should be in the maths Extracurricular Topics forum.\n\n19. ## Re: Interesting mathematical statements\n\nOriginally Posted by glittergal96\nIf you have a small ball in 3 dimensional space, it is possible to decompose it as a union of a finite number of sets, which can be moved by rotations and translations such that the pieces never overlap and such that the final object constructed is an arbitrarily large ball.\n\nColloquially, one can cut a pea into a finite number of pieces and reassemble it into something the size of the sun.\nOnly if I accept the axiom of choice. : PPPPPPP\n\n20. ## Re: Interesting mathematical statements\n\nOnly if I accept the axiom of choice. : PPPPPPP\nEven if you don't accept the axiom of choice (which is a bit limiting, but some minority of mathematicians don't), you would not be able to prove that such a reassembling of the pea into the sun is impossible. (Because the axiom of choice is consistent with the other axioms of set theory.)\n\nThis is still pretty unintuitive.\n\n21. ## Re: Interesting mathematical statements\n\nOriginally Posted by glittergal96\nEven if you don't accept the axiom of choice (which is a bit limiting, but some minority of mathematicians don't), you would not be able to prove that such a reassembling of the pea into the sun is impossible. (Because the axiom of choice is consistent with the other axioms of set theory.) This is still pretty unintuitive.\nit was a joke -_-\nI find that people who reject the axiom of choice are on the same level as those who reject the law of the excluded middle. Half of mathematics is based upon contradiction.\n\nGot the proof for derivative of zeta at zero\n$\\noindent \\eta(s) = \\sum_{k=1}^\\infty \\frac{(-1)^{k-1}}{k^s} \\Rightarrow \\zeta(s) - \\eta(s) = 2\\sum_{k=1}^\\infty (2k)^{-s} = 2^{1-s}\\sum_{k=1}^\\infty k^{-s} = 2^{1-s}\\zeta(s) \\Rightarrow \\Rightarrow \\zeta(s) = \\frac{\\eta(s)}{1-2^{1-s}} \\Rightarrow \\zeta(0) = -\\eta(0) = -\\frac{1}{2}$\n\n$\\zeta '(s) = \\frac{\\eta '(s)(1-2^{1-s}) - \\eta(s)2^{1-s}\\log{2}}{(1-2^{1-s})^2} \\Rightarrow -\\zeta '(0) = \\eta'(0) + \\log{2}$\n\n$\\eta '(s) = \\sum_{k=1}^\\infty (-1)^{k-1}\\frac{\\log{k}}{k^s} \\Rightarrow \\eta '(0) = \\sum_{k=1}^\\infty (-1)^{k-1}\\log{k}$\n\n$\\noindent2\\eta '(0) = 2\\sum_{k=1}^\\infty \\log(2k) - 2\\sum_{k=1}^\\infty \\log(2k+1) =\\left ( -\\sum_{k=1}^\\infty \\log(2k-1) + \\sum_{k=1}^\\infty \\log(2k) \\right ) - \\left ( -\\sum_{k=1}^\\infty \\log(2k) + \\sum_{k=1}^\\infty \\log(2k+1) \\right ) = \\sum_{k=1}^\\infty \\log \\left (\\frac{2k}{2k-1} \\right ) + \\sum_{k=1}^\\infty \\log \\left (\\frac{2k}{2k+1} \\right ) = \\sum_{k=1}^\\infty \\log \\left ( \\frac{4k^2}{4k^2 -1} \\right ) = \\log \\left ( \\prod_{k=1}^{\\infty} \\frac{4k^2}{4k^2 -1} \\right ) = \\log{\\frac{\\pi}{2}} \\Rightarrow \\eta '(0) = \\frac{1}{2}\\log{\\frac{\\pi}{2}}$\n\n$-\\zeta '(0) = \\frac{1}{2}\\log{\\frac{\\pi}{2}} + \\log{2} = \\log{\\sqrt{2\\pi}}$\n\n$\\therefore \\infty ! = \\sqrt{2\\pi}$\n\nIf you do not know where I got the product identity from, recall the 1995 HSC paper, in which we proved said identity.\n\n22. ## Re: Interesting mathematical statements\n\nWith all of these zeta function things, people often get confused by what is meant by these divergent sums having values.\n\nThe function $\\zeta(s)$ is defined by the Dirichlet series $\\sum_n n^{-s}$ only where it converges, which is the half-plane where the real part of s exceeds 1.\n\nElsewhere in the complex plane (apart from at s=1), the zeta function is defined by analytic continuation. In other words, there is a unique \"nice\" function on the complex plane that extends the series where it converges.\n\nSo at points like s=0 it is not like that series is equal to zeta(0), it is just that that sum diverges in the traditional sense and hence is an undefined object. We might as well use that sum notation to instead represent the \"nice function\" that the convergent sum extends to.\n\nIn this sense the statement is more like: If the sum of the natural numbers had to be defined to be something, -1\/12 would be a natural value for it to have.\n\nDefinitely a lot of the reason that non-mathematicians find this so interesting is that they view the sum as actually converging to -1\/12 in a more traditional sense which is of course nonsense. The amount that this fact is thrown around colloquially does not help.\n\n(I can't say I know much about how this fact crops up in physics though.)\n\n23. ## Re: Interesting mathematical statements\n\nSome of these statements are pretty fun to prove and not too difficult btw.\n\nPeople should post them in the undergrad marathon!\n\n24. ## Re: Interesting mathematical statements\n\nI think these kinds of divergent sums (and integrals for that matter) come up all in the time in quantum physics. For example, the 1+2+3+4+... one comes up in calculating the Casimir Force in 1D in Quantum Electrodynamics. The 13 + 23 + 33 + ... one comes up in the 3D version of this calculation (its value is \u03b6(-3) = 1\/120 using analytic continuation of the Riemann-Zeta function). As far as I know, experiments have now been done and agree with predictions to a good extent. Here's a derivation on Wiki of the 3D Casimir effect that uses \u03b6(-3):\n\nhttps:\/\/en.wikipedia.org\/wiki\/Casimi...regularization .\n\nIs it just a coincidence that using these regularised sums happens to give apparently physically sensible answers??\n\n25. ## Re: Interesting mathematical statements\n\n$Borwein Integral$\n\n$\\int_0^\\infty \\frac{\\sin{x}}{x} dx = \\frac{\\pi}{2}$\n\n$\\int_0^\\infty \\frac{\\sin{x}}{x}\\frac{\\sin{\\frac{x}{3}}}{\\frac{x} {3}} dx = \\frac{\\pi}{2}$\n\n$\\int_0^\\infty \\frac{\\sin{x}}{x}\\frac{\\sin{\\frac{x}{3}}}{\\frac{x} {3}}\\frac{\\sin{\\frac{x}{5}}}{\\frac{x}{5}} dx = \\frac{\\pi}{2}$\n\n$\\vdots$\n\n$\\int_0^\\infty \\frac{\\sin{x}}{x}\\frac{\\sin{\\frac{x}{3}}}{\\frac{x} {3}}\\frac{\\sin{\\frac{x}{5}}}{\\frac{x}{5}} \\dots \\frac{\\sin{\\frac{x}{11}}}{\\frac{x}{11}}\\frac{\\sin{ \\frac{x}{13}}}{\\frac{x}{13}} dx = \\frac{\\pi}{2}$\n\n$\\noindent\\int_0^\\infty \\frac{\\sin{x}}{x}\\frac{\\sin{\\frac{x}{3}}}{\\frac{x} {3}}\\frac{\\sin{\\frac{x}{5}}}{\\frac{x}{5}} \\dots \\frac{\\sin{\\frac{x}{11}}}{\\frac{x}{11}}\\frac{\\sin{ \\frac{x}{13}}}{\\frac{x}{13}}\\frac{\\sin{\\frac{x}{15 }}}{\\frac{x}{15}} dx = \\frac{467807924713440738696537864469}{935615849440 640907310521750000}\\pi = \\frac{\\pi}{2}-\\frac{6879714958723010531\\pi}{93561584944064090731 0521750000} \\approx \\frac{\\pi}{2} - 2.31 \\times 10^{-11}$\n\nPage 1 of 6 123 ... Last\n\nThere are currently 1 users browsing this thread. (0 members and 1 guests)\n\n#### Posting Permissions\n\n\u2022 You may not post new threads\n\u2022 You may not post replies\n\u2022 You may not post attachments\n\u2022 You may not edit your posts\n\u2022","date":"2018-05-21 02:53:56","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 33, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.8980742692947388, \"perplexity\": 1017.2771575259666}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2018-22\/segments\/1526794863923.6\/warc\/CC-MAIN-20180521023747-20180521043747-00584.warc.gz\"}"}
null
null
\section{Introduction} Smart homes is an emerging field with increased scientific interest. As ambient sensing devices become cheaper and easily available, smart houses exploit those sensors to acquire information from the surrounding environment \citep{CHAN200855, Nazmiye}. Ambient sensing could be distinguished between obtrusive and unobtrusive sensing. While obtrusive sensing, such as cameras, microphones etc., could potentially provide a better insight, there are privacy concerns especially when data are processed on the cloud. The smart home concept could promote independent living on elderly people. Activity recognition and abnormal behavior detection can be performed using non intrusive sensing \citep{Lentzas2020}. This is important as signs of mental illnesses (i.e. dementia, Alzheimer's disease) and potentially dangerous conditions, such as falling, can be detected, resulting on immediate intervention. In addition to abnormal behavior detection, the ability to detect outing events on elderly people living alone is important. Although wandering is a common behavior of people suffering from dementia \citep{SONG2008318,Lai}, a limited amount of published papers address this problem \citep{Lai}. An approach based on door opening detection was proposed by \cite{Aran2016}. Authors assumed that residents close the door when they leave the apartment and open it when they return. The period between two door events as was registered as an outing event. Motion and pressure sensors were also used for outing detection by \cite{Petersen2014}. Motion sensors were placed on each room while pressure sensors on doors. A camera was installed at the entrance allowing the authors to identify when the elder was leaving the house. It is worth mentioning that the camera was only used for gathering the ground truth and not for event detection. Logistic regression was employed to detect home absence. Authors linked prolonged outing with less loneliness. Pyroelectric sensors were also employed for home absence detection \citep{DBLP:journals/jrm/MoriINSMNOS12}. By monitoring the readings from the sensors, the proposed system was able to detect outing events accurately. Both outing time and duration were logged and authors were able to identify abnormal home absence. Similar to \cite{DBLP:journals/jrm/MoriINSMNOS12}, a solution based on infrared sensors was proposed by \cite{Suzuki}. An infrared sensor placed on the entrance was exploited to detect outing events. When the entrance sensor was triggered a going out event was registered. A second sensor trigger after the going out event was indicating that the tenant was back home. Non intrusive load monitoring (NILM), could provide useful information about the activities performed by the occupants of the house and their behavior \citep{GRAMHANSSEN201894}. NILM requires minuscule intervention at the house, as it is achieved using a single sensor. Using the disaggregated data as input, not only home absence can be detected but also information about the behavior of the residents can be extracted. While most most works available in the literature perform energy disaggregation on AC power supply, there is an increased interest in DC power supply as well. A power generation source connected to a DC micro grid could potentially eliminate the power loss when converting AC to DC \citep{Quek1,Schirmer}. Due to the increased DC appliances available, new identification techniques needed as AC disaggregation is not applicable to DC micro grid. K-nearest neighbors \citep{Quek3} and One-direction Convolutional LSTM RNN \citep{Quek2} have been proposed. Both produced excellent results on the energy disaggregation problem. Our approach exploits disaggregated appliance electrical consumption for outing detection, a novel non intrusive, non invasive approach that has not been thoroughly investigated in the past. Since power consumption is a passive unobtrusive sensing method, there are no concerns regarding privacy. Additionally, elder people are more friendly towards less invasive sensors compared to intrusive or wearable sensors. In this paper a benchmark between several machine learning models that can tackle absence detection based on appliance power use is presented. \section{Our Approach} In our work an outing detection approach based on electricity use of main appliances is proposed. Monitoring the power consumption can be achieved with unobtrusive techniques that require minimum intervention in the house. Smart plugs could be used on wall sockets in order to monitor the electrical use or it could be incorporated in a power disaggregation system, where the disaggregated energy consumption is used as input on the outing detection module. In this section our motivation, data collection, our methodology and the experiments performed are discussed. \subsection{Motivation} Outing detection based on appliance use could be beneficial in many real world scenarios where it is important to know whether or not the residents of a house are present. Our motivation was based on two scenarios: 1) Elderly people living alone and the early detection of mental diseases. 2) Monitor home quarantine especially during pandemics. With the outbreak of COVID-19, specialists recommend home isolation especially for people suspected to be sick. Despite instructions, many of them break their home quarantine helping the spread of corona-virus. An effective, cheap and easy to install home absence detection system would be extremely useful in cases like that. Contributions of an approach based on electrical consumption would be significant: a) the system would be easy to deploy without excessive house modifications. b) Load monitoring is a non intrusive method. Personal privacy can also be retained as, given a low enough sample frequency, machine learning algorithms can not extract specific knowledge. c) Houses with similar resident and electrical consumption profile could benefit from pre-trained machine learning models. d) An evaluation of different machine learning techniques would provide a baseline for future research. Preserving privacy in a strong motivation on smart home installations that monitor energy use \citep{6194398,Ukil}. As \cite{6194398} mention in their work, privacy issues are regulated by government agencies in most countries with strict laws. Elderly people have a different level of acceptance of information technology monitoring their health compared to young population \citep{FISCHER2014624}. Privacy and social stigmatization are important barriers that must be addressed for health monitoring tools. In addition to that, ethical issues arise when private information are gathered, transmitted and processed \citep{10.1007/978-3-319-57348-9_22}. A possible disclosure of sensitive information, may have a negative impact on the social life of the person, as it could be stigmatized. \subsection{Dataset} The dataset used in our research is the UK Domestic Appliance-Level Electricity (UK-DALE) \citep{UK-DALE}. The dataset contains electrical use from five houses. Both the total consumption as well as disaggregated consumption of individual appliances are recorded. The use was recorded every 6 seconds. Data from house 1 were used in our work, covering a period of 4.3 years. In our work, four appliances were employed to detect home absence: television, kettle, oven and microwave. As the dataset does not contain annotated data for home absence, the data were manually annotated. Everyday outings (i.e. home absence events) were added based on the consumption of the aforementioned devices. Additionally, major outing events were added. A Christmas trip starting on December $24^{th}$ till $26^{th}$, a 4-days spring break randomly placed in the period between $15^{th}$ of March and end of May, summer vacation for 2 weeks on early August and a weekend trip during autumn. The Christmas trip could be extended till $28{th}$ in case the $26^{th}$ is a weekend. In addition to the aforementioned outings, daily outing events were also introduced. Each week day, home absence was added between 8:30 in the morning and 16:00 in the afternoon, in case no electrical use was registered in that time frame. As a result a daily work routine was simulated. Finally, random outing events were added on Saturdays. An additional binary feature was introduced indicating whether the day was a bank holiday / weekend or not. While our intuition was that such a feature would improve the performance, that did not happen. The aforementioned binary feature was excluded in order to avoid overfitting (e.g. assume that people are always away on bank holidays). As already mentioned, the dataset was over-sampled with measurements taken every 6 seconds. Since detecting home absence in such a small time window is futile, the dataset was down-sampled. Measures were re-sampled to 30 minutes and the mean of all disaggregated values was used. The result was a dataset with 78.186 total instances, where 24.081 were outing events. The distribution of outing events on each weekday can be seen on Figure \ref{fig:outing}. \begin{figure} \centering \includegraphics[width=1\textwidth]{observations.eps} \caption{Daily Outing events distribution} \label{fig:outing} \end{figure} One of the main reasons we had to create our own data was the absence of a publicly available electric use dataset with home absence reported (based on our research). Although there are some datasets with outing ground truth, such as the Intelligent Systems for Assessing Aging Changes (ISAAC) \citep{Kaye} cohort and the ORCATECH Living Lab, the majority of them rely on ambient sensors and cameras. Pressure sensors are usually placed on door mats and / or contact sensors are placed on doors in order to detect when someone leaves or enters a house. \subsection{Experiments} Several machine learning algorithms were evaluated. More specifically Decision Tables (DT), C4.5 Trees \citep{Salzberg1994}, Random Forests (RF) \citep{TinRF}, Naive Bayes (NB), Multilayer Perceptron (MLP) \citep{Rosenblatt} and Deep Neural Networks \citep{LeCun2015} (DNN) were employed. Multilayer Perceptron was limited less than 10 hidden layers while Deep Neural Network's hidden layers were left unlimited. All methods were implemented using Python, SciKit Learn \citep{scikit-learn} and Keras \citep{chollet2015keras} with Tensorflow \citep{tensorflow2015-whitepaper} backend. The selection of those algorithms was based on related works found on the literature \citep{Lentzas2020}. Additionally, they are some of the most commonly used machine learning algorithms \citep{7724478}. A decision table is a machine learning algorithm documenting the different decisions taken depending on different set of conditions (similar to a flowchart). They still remain a solid choice on machine learning since they have a series of advantages. One of the main advantages of decision tables is that the result is interpretable allowing one to understand the reasoning behind the decision taken. Additionally they are immune to data scaling and multicolinearity as well as less feature engineering is required. The main drawback is the tendency to overfit and the requirement to load all the data to memory (batch training is not supported). C4.5 tree is a variant of decision trees. It is one of the most widely used machine learning algorithm \citep[p.~191]{WittenFrankHall11}. Information entropy is used to build the tree decision tree using a set of training data. At each node a feature is chosen and the training dataset is split into subsets enriched in one of the classes. Information gain is the used as splitting criterion. Recursive execution over the subsets creates the final tree. In order to avoid overfitting, pruning is used. In our work post-pruning was used. The entire tree was built first and then certain branches were removed using sub-tree replacement (a sub-tree is replaced with a leaf if the classification error is reduced). While C4.5 trees are interpretable and can deal with noise efficiently they require a bigger training set and small variations in data can lead to different trees. Random forests, as the name implies, consists of multiple decision trees. Each tree predicts one class and the class that was predicted most times is the output of the model. This ensemble method achieve higher accuracy compared to decisions trees. Interpretability is sacrificed but a higher accuracy is usually achieved (compared to a single decision tree). Accuracy may not increase in case of problems with multiple categorical variables and when there is a linear correlation between the predictive attributes and the target variable \citep{SMITH201385}. In our work C4.5 trees were used to create the Random Forest. Naive Bayes classifiers are based on Bayes theorem and are among the simplest Bayesian network models. They can achieve high accuracy when coupled with kernel density estimation. Kernel density estimation was used in our experiments. NB are faster when compared with more sophisticated methods while achieving good accuracy. Multilayer perceptron is a feedforward artificial neural network. MLP consists of at least three layers of nodes (input, output and one or more hidden). In our work hidden layers were limited to 10. On the other hand, the number of layers on the Deep Neural Network were left unlimited. DNN are artificial neural networks with multiple layers between the input and output layers. They can model complex non-linear relationships. Overfitting and computation time is common issues both on DNN and MLP. \subsubsection{Hyperparameter Tuning} All the aforementioned machine learning algorithms have several parameters that require tuning before training. There are several techniques that can help researches tune those parameters (although some of them could be set based on intuition). In our work all the models (except from the DNN) had there parameters tuned using quantum genetic algorithms as described by \cite{LentzasQ}. Quantum genetics algorithms (QGA) \citep{Narayanan2002} is a variation of genetic algorithms \citep{Goldberg1979}, an evolutionary type of algorithm. The main advantage of QGA is the faster convergence to the local best while performing global search. Each combination of hyperparameters was expressed as a quantum chromosome. A model was trained for each chromosome and based on their respective performance, the population is evolving, converging to the best solution. In order to avoid local optimal solutions, quantum disaster was employed \citep{Miao2009}. DNN hyperparameter tuning was performed with Randomized Parameter Optimization \citep{Bergstra}. A set or range (in case of continuous variables) is given for every hyperparameter. A random search is then performed on these distributions for a total of $N$ times. For each combination of hyperparameters a model is trained and the hyperparameters associated with the highest accuracy model are returned. \subsubsection{Evaluation Metrics} Evaluating the performance of each algorithm used was based on the most common metrics used in binary classification: Accuracy, Precision, Recall and F-score. In order to calculate those metrics, the confusion matrix is used. Confusion matrix is a $C_{n \times n}$ matrix where $n$ is the number of classes. Each element $C_{i,j}$ is the number of examples that belong to $i_{th}$ class and classified as instances of $j_{th}$ class. Confusion matrix allows the extraction of useful numerical values used on the aforementioned metrics: \begin{itemize} \item True Positives (TP): positive instances classified correctly. \item True Negatives (TN): negative instances classified correctly. \item False Positives (FP): negative instances classified as positive \item False Negatives (FN): positive instances classified as negatives \end{itemize} The information mentioned above are used in metrics calculation. More specifically: \begin{itemize} \item Accuracy: percentage of correctly classified examples (both positive and negative) \item Precision / Recall: ratio of correctly classified instances to the total positive classified examples and total positive instances respectively. \item F-score (F1): combination of Precision and Recall in one single metric. \end{itemize} \begin{align*} Accuracy = \frac{\sum_{TP} + \sum_{TN}}{\sum_{Examples}} && Precision = \frac{\sum_{TP}}{\sum_{TP} + \sum_{FP}}\\ Recall = \frac{\sum_{TP}}{\sum_{TP} + \sum_{FN}} && F1 = 2\frac{Precision \times Recall}{Precision + Recall} \end{align*} \subsection{Evaluation} Each model was trained using the same features. The chosen features can be seen on Table \ref{features}. Appliance use was converted to a binary variable based on whether the device on or off. Appliances with use lower than 10 Watts were considered off and the rest were set to on. By observing the dataset, when an appliance was operating, the electrical power consumed was greater than 30 Watts, while a device was on standby, the consumption was approximately 1. Setting the threshold to 10 Watts, allowed us to rule out power spikes identified in the data. The weak day variable was encoded to categorical variable in range [0-6] with (0) representing Monday and (6) Sunday. Label (ordinal) encoding \citep{VONEYE1996xix} was preferred over one-hot-encoding \citep{lantz_2015}. In the first place we would like to imply an order on week days. Moreover, using ordinal encoding, we avoided increasing the dimension of our dataset. Lastly, there was not a significant performance loss (as seen on Table \ref{results_average},\ref{results_table} while using label encoding. \begin{table}[htbp] \centering \begin{tabular}{c|c} \textbf{Feature} & \textbf{Description} \\ Appliance on/off & Binary variable indicating whether the appliance was on or off \\ Time & The time on 24h format \\ Weak day & The day of the week (Monday - Sunday) \\ Day & Day part of the date (1-31) \\ Month & The month of the observation(1-12) \end{tabular} \caption{Features used} \label{features} \end{table} During evaluation 10-fold cross validation was employed. Each experiment was repeated 10 times. The results can be seen on Table \ref{results_table}, \ref{results_average}. The former table contains the results of the best run while the latter the average results of all runs. As one can observe on Table \ref{results_table} MLP had the best single run compared to the rest of the classifiers evaluated. C4.5 trees, Decision Table and Random Forests had performance on par with the MLP. Naive Bayes and Deep NN on the other hand had the worst performance compared to the rest, but still achieved an overall good score. Observing the average of the 10 runs executed (Table \ref{results_average}) more information about the performance of each model can be extracted. Compared to the best run, averaging the results suggests that C4.5 Trees performance is better. Comparing the average score with the best score achieved, C4.5 and Decision Table had the smaller difference, thus providing more robust results. \begin{table}[htbp] \centering \begin{tabular}{c|c|c|c|c} \textbf{Classifier} & \textbf{Accuracy} & \textbf{Precision} & \textbf{Recall} & \textbf{F-score} \\ Decision Table & 0.9774 & 0.9861 & 0.986 & 0.9847 \\ C4.5 Tree & 0.9795 & 0.9916 & 0.9842 & 0.9861 \\ Random Forests & 0.9758 & 0.9829 & 0.9856 & 0.9836 \\ Naive Bayes & 0.8766 & 0.8894 & 0.9602 & 0.9192 \\ MLP & \textbf{0.982} & \textbf{0.9927} & \textbf{0.9946} & \textbf{0.9877} \\ Deep NN & 0.8671 & 0.9035 & 0.9404 & 0.9116 \end{tabular} \caption{Best Accuracy, Precision, Recall, F-score per classifier} \label{results_table} \end{table} \begin{table}[htbp] \centering \begin{tabular}{c|c|c|c|c} \textbf{Classifier} & \textbf{Accuracy} & \textbf{Precision} & \textbf{Recall} & \textbf{F-score} \\ Decision Table & 0.974 & 0.9819 & \textbf{0.9829} & 0.9824 \\ C4.5 Tree & \textbf{0.9766} & \textbf{0.9882} & 0.98 & \textbf{0.9841} \\ Random Forests & 0.9710 & 0.9788 & 0.982 & 0.9804 \\ Naive Bayes & 0.8678 & 0.8781 & 0.9533 & 0.9141 \\ MLP & 0.9727 & 0.9824 & 0.9809 & 0.98156 \\ Deep NN & 0.8575 & 0.8877 & 0.9238 & 0.9054 \end{tabular} \caption{Average Accuracy, Precision, Recall, F-score per classifier} \label{results_average} \end{table} Since the performance of most classifiers was similar a paired t-test analysis was performed using Weka \citep{hall09:_weka_data_minin_softw}. The t-test analysis was performed with 0.05 confidence and F-score was used. The best classifier (C4.5) was used as the baseline for the analysis. T-test analysis shown that results obtained from the rest of the models were statistically worst compared to the baseline. \section{Conclusions \& Future Work} Outing detection based on appliance use is a field that despite the benefits it could provide is not thoroughly discussed in the literature. This paper provides a benchmark of several machine learning models. UK-Dale dataset was used for training. As no home absence events were present in the dataset, artificial events were introduced. Results shown that home absence detection based on electrical consumption is feasible. Although MLP had the best score on a single run, C4.5 tree achieved the best average score. T-test analysis shown that C4.5 tree had statistically better results compared to the rest of the benchmarked classifiers. Home absence detection could be applied in a variety of scenarios. A potential application is planning home delivery routes. Knowing, or predicting, outing periods, delivery companies could plan their daily routes to deliver as many parcels as possible. This is important since delivering as many goods as possible without having to reschedule for another day, could reduce operating costs. Smart home applications targeting elder people leaving alone, could also utilize a home absence detection module. Detecting prolonged periods of home absence, especially during abnormal time periods, could lead to early detection of emergencies or mental issues for elders. Additionally, elders suffering from dementia could benefit from a smart home able to detect outing events. According to Alzheimer's Association, wandering off could occur in people suffering from dementia. Having the ability to detect whether the patient left the house could lead to an early alarm to his relatives or caretakers. This could promote independent living of dementia patients. Knowing when a person is out of home could be useful on energy saving systems as well. A smart home installation could take certain actions in order to preserve power and reduce cost and environmental impact when the owners are out of home. For instance a smart thermostat could turn off the heating when the house is empty and turn it on in time before the residents return. A boiler could be automatically turned off when left on during an outing event. All these applications would reduce the energy requirements of a smart house. Future work could be focused on integrating home absence detection on a power disaggregation module. Machine learning approaches provide a solid power disaggregation method \citep{Nalmpantis2019}. Using the disaggregated results provided by a system deployed on a real house, will provide a complete outing detection system. Further investigation is needed though as disaggregation error could accumulate and impact absence detection. Transfer learning should also be evaluated. As already mentioned on motivation section, pre-trained models on large datasets could be exploited. Especially on houses with alike energy demand profiles and residents following similar outing patterns. Transfer learning would greatly reduce deployment time without severely affecting performance. In addition to the above, a non artificially created dataset will be collected. Ground truth could be logged with sensors on the outer door and door mat. This could provide a more robust approach on outing detection based on electrical use. Although the rules applied to the dataset were selected based on accurate simulation of human behavior, the resulting dataset could be biased compared to data collected from a real person. Even by applying a random factor when the dataset was generated, human behavior can't be described precisely by rules.
{ "redpajama_set_name": "RedPajamaArXiv" }
5,502
{"url":"https:\/\/learn.careers360.com\/engineering\/question-can-someone-help-me-with-this-which-is-not-amphoteric\/","text":"Q\n\n# Can someone help me with this, Which is not amphoteric?\n\nWhich is not amphoteric?\n\n\u2022 Option 1)\n\n$HSO_{4}^{-}$\n\n\u2022 Option 2)\n\n$HCO_{3}^{-}$\n\n\u2022 Option 3)\n\n$H_{2}PO_{4}^{-}$\n\n\u2022 Option 4)\n\n$HCOO^{-}$\n\n94 Views\n\nAs learnt in\n\nIonization reaction of dibasic acid -\n\n$H_{2}X(aq)\\rightleftharpoons H^{+}(aq)+H\\bar{X}(aq)$\n\n$\\bar{H}X(aq)\\rightleftharpoons H^{+}(aq)+X^{2-}(aq)$\n\n- wherein\n\n$K_{a1}=\\frac{[H^{+}]\\:[\\bar{H}X]}{[H_{2}X]}$\n\n$K_{a2}=\\frac{[H^{+}]\\:[X^{2-}]}{[{H}X^{-}]}$\n\n$K_{a1}\\:and\\:K_{a2}$\u00a0 \u00a0are called first and second ionization constant.\n\nOf all the species given,\u00a0$HCOO^{-}$\u00a0is the only one that cannot donate a proton and act as an acid. All of these can accept a proton thereby acting as a lease.\n\nOption 1)\n\n$HSO_{4}^{-}$\n\nThis solution is incorrect.\n\nOption 2)\n\n$HCO_{3}^{-}$\n\nThis solution is incorrect.\n\nOption 3)\n\n$H_{2}PO_{4}^{-}$\n\nThis solution is incorrect.\n\nOption 4)\n\n$HCOO^{-}$\n\nThis solution is correct.\n\nExams\nArticles\nQuestions","date":"2020-01-24 19:24:40","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 14, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.7085031270980835, \"perplexity\": 14979.658906678467}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 5, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2020-05\/segments\/1579250625097.75\/warc\/CC-MAIN-20200124191133-20200124220133-00284.warc.gz\"}"}
null
null
Q: MediaInfo not able to get details of uploaded file in the same manner it can when pointed to local file I have been trying to get the durationstring of a piece of video content that has been uploaded to an MVC4 application using the MediaInfo.dll which I have added to the application as a reference. I am successfully able to view the details of the file when typing the location of my file locally using the following code: MediaFile uploadedFile = new MediaFile("C:\\Users\\jp\\Desktop\\Quarry.mp4"); string duration = uploadedFile.General.DurationString.ToString(); However when I try to use this on an uploaded file I am not getting back the information that I had expected. My controller is as follows: [HttpPost, ValidateInput(true)] public ActionResult NewContent(HttpPostedFileBase postedFile, string username, FormCollection form) { if (postedFile != null) { HttpPostedFileBase postedFileCopy = postedFile; postedFileCopy.InputStream.Position = 0; Stream stream = postedFile.InputStream; MediaFile uploadedFile = new MediaFile(Server.MapPath(postedFile.FileName)); string duration = uploadedFile2.General.DurationString.ToString(); string[] name = form.GetValues("name"); string[] author = form.GetValues("author"); string[] description = form.GetValues("description"); TimeSpan videoDuration = TimeSpan.Parse(duration); try { avm.AddContent(postedFile, stream, Convert.ToString(name[0]), Convert.ToString(author[0]), Convert.ToString(description[0]), videoDuration); return RedirectToAction("Contents", "Admin"); } catch (Exception ex) { System.Diagnostics.EventLog.WriteEntry("Application", ex.Message, System.Diagnostics.EventLogEntryType.Error); return RedirectToAction("Unsuccessful", "Admin"); } } else return RedirectToAction("NewCourse", "Admin"); } I have tried: MediaFile uploadedFile = new MediaFile(Server.MapPath(postedFile.FileName)); MediaFile uploadedFile = new MediaFile(postedFile.FileName); MediaFile uploadedFile = new MediaFile(postedFile.toString()); MediaFile uploadedFile = new MediaFile(System.IO.Path.GetFullPath(postedFile.FileName); Any ideas of how I can get MediaInfo to recognise the postedFile in the same manner it is able to read the local file. Or how I can retrieve the path of the client machine's file location. A: There is a way you can parse a file using a stream instead of a file. Look into these methods: Open_Buffer_Init(..) Open_Buffer_Continue(..) Open_Buffer_Continue_GoTo_Get(..) Open_Buffer_Init(..) It took me a while to work out how to get the IntPtr for the buffer.. remember to use GCHandle and AddrOfPinnedObject My sample is all over the place if you need any more help I'll try to get my code working again.
{ "redpajama_set_name": "RedPajamaStackExchange" }
5,284
Home Sports Jets hoping Schmidt's skills match his wit and charm His personality has lifted spirits in the locker room By: Jason Bell Posted: 8:33 PM CDT Monday, Oct. 11, 2021 Last Modified: 9:58 PM CDT Monday, Oct. 11, 2021 | Updates MIKAELA MACKENZIE / WINNIPEG FREE PRESS FILES Defenceman Nate Schmidt has quickly endeared himself to the Winnipeg Jets organization with his megawatt grin and rapid-fire wit. Nate Schmidt makes a memorable first impression. And second and third and… The unabashed extrovert has quickly endeared himself to the Winnipeg Jets organization with his megawatt grin and rapid-fire wit since he reported to training camp a few weeks back. All in moderation, mind you. "It's good to have different personalities. If you had 25 Nate Schmidts…'" winger Blake Wheeler quipped Monday morning. "That would be scary," chimed in Schmidt, seated next to the Jets captain in the media room at Canada Life Centre. "You'd need padded walls and a sound-proof locker room," added Wheeler. "It's been a great addition. Knowing Schmidty just the little bit I've known him in the past, I knew he would be a good fit in every way with our organization… he's fit in incredibly well in every way. I think his personality has been a real boost for our team." The question now is whether his play can somehow transcend that streaming charm and charisma. Schmidt, 30, is paired with Josh Morrissey as start of the 2021-22 NHL season draws near, and the Jets need both left-shooting defencemen to rebound from down 2020-21 seasons with their respective squads. Things didn't work out for Schmidt in Vancouver, whose offensive numbers dipped to just 15 points in 54 games with the Canucks in the all-Canadian division after three straight 30-plus-point campaigns with the Vegas Golden Knights. The Jets only surrendered a third-round pick to Vancouver to acquire his services in late July but will pay him just shy of $6 million this season and each of the next three. Wheeler is confident the fellow Minnesotan will contribute far more than just playful banter, lauding the trades for Schmidt and another veteran blue-liner, Brenden Dillon. "It wasn't just moves to fill holes. They're the right type of additions, too. the type of additions that fit our team and the needs of our team," said Wheeler, preparing for his 11th season opener in a Winnipeg jersey. "Obviously, Schmidty can get up the ice and break the puck out, make a great first pass. "Same with Dilly. (He) brings a little bit of edge to our back end. Like I said before with Nate, just two incredible additions to our locker room, too. They were both home runs." Dillon is paired with Neal Pionk, Winnipeg's most dependable and productive defenceman last season. Dylan DeMelo will start the season skating alongside Logan Stanley on the third unit, head coach Paul Maurice all but confirmed Monday. Schmidt, who is comfortable playing the right side, said the shift to a new organization has been relatively smooth, on and off the ice. "I've enjoyed being in the room. That's the first thing you have to look forward to when you get to a new group. How much is too much early on? Our group has been really great," Schmidt said. "(After a trade) you're not really sure how it's going to work out. You look at the hockey team and you can find out where you fit in as a hockey player. But as you fit in as a person in the room is something that's completely different. "That is something that when you look at training camp as a whole, it doesn't feel like it's only been three weeks. And that's something that is a great thing for myself, (Dillon) and guys that are new coming in." Want more sports? Three times a week, sports editor Steve Lyons provides his take on the Jets, Bombers, other sports and more. Sign up for Playbook The Schmidt-Morrissey pairing aims to take advantage of its speed and heady play to push the pace as much as possible. "You know what, playing with Josh (Morrissey) has really helped both of us get our feet moving up the ice. We're pushing each other a lot. We talk about it each drill, be really aggressive, let's get our feet moving, let's try and shut things down early because we can with our feet," Schmidt said. "If we're standing around, that's not going to be good. If we're moving and getting our feet up the ice and getting into that fourth man's hole up the ice, that's when we're going to be successful as a pair and as a five-man unit that goes on the ice." jason.bell@freepress.mb.ca Twitter: @WFPJasonBell Jason Bell Assistant sports editor Jason Bell wanted to be a lawyer when he was a kid. The movie The Paper Chase got him hooked on the idea of law school and, possibly, falling in love with someone exactly like Lindsay Wagner (before she went all bionic). Oct 12, 2021: Jets should be a force... if certain things fall into place Mike McIntyre Oct 11, 2021: Perfetti probably won't start season with Jets in Anaheim Jason Bell Updated on Monday, October 11, 2021 at 9:58 PM CDT: Fixes typo.
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
8,947
\section{Introduction to Hidden Valleys and Unparticles} The ``hidden valley'' scenario \cite{HV1} envisions a hidden sector coupling weakly (but not too weakly) to the standard model, with a multi-particle production mechanism and a mass gap (or an equivalent; see below.) It encompasses a very wide class of models, since almost any gauge theory from the last forty years, with a mass gap added, will fit within this category. A few examples of such models were given in \cite{HV1}. The phenomenological signatures, which can include novel and sometimes spectacular signals at particle accelerators, were explored in \cite{HV1,HV2,HV3,HVWis}, but much more study is needed. The main point is that some of the particles which are produced in abundance in the hidden sector can decay back to standard model particles on detector time-scales, creating a directly observable signal at the LHC. The ``unparticle'' scenario \cite{Un1} (see also \cite{SeibergNAD,RS2,HEIDI}) envisions a hidden sector coupling weakly (but not too weakly) to the standard model, with conformal dynamics (itself a multi-particle production mechanism) and, in the original papers, no mass gap. It encompasses a wide class of models; essentially any conformal or near-conformal field theory known from the last forty years will do. In this case the effects described in \cite{HV1,HV2,HV3} are absent, because the particles in the hidden sector distribute their energy into massless modes (or, if there is a a tiny mass gap, into states with lifetimes too long to observe.) Then one must rely on less spectacular but no less interesting inclusive processes, such as kinematic distributions in events with missing energy, and effects on production of small numbers of standard model particles. Both scenarios are motivated in part by the fact that hidden sectors coupling to the standard model at or near the TeV scale are common in non-minimal beyond-the-standard-model theories, for example in supersymmetry breaking models and in many string theory constructions. The Randall-Sundrum-2 scenario \cite{RS2} is dual (by gauge-string duality) to such a model. Other recent examples include the original Twin Higgs \cite{TwinHiggs} and Folded Supersymmetry models \cite{FoldedSUSY}. The other motivation for considering the two scenarios is that they both give unusual signals at the LHC. In the hidden valley case, these signals often create experimental challenges that have been considered very little, if at all. To avoid missing these signals altogether in the complex environment of the LHC, it is vital that these signatures be studied. Soon after the term ``unparticles'' was introduced, mass gaps were added to unparticle theories, for example in \cite{unSteph,unFox,unQuiros}. Somewhat surprisingly, the fact that unparticle models with mass gaps are identical to hidden valley models --- ones that have conformal dynamics above their mass gap --- seems not to have been recognized. In particular, all known conformal field theories in four dimensions are gauge theories, which all have the parton shower dynamics that plays an important role in \cite{HV1}. With a mass gap, many of these models also have hadronization and/or cascade decays, as again is common in hidden valleys \cite{HV1}. The predictions of hidden valley models are qualitatively the same, and many of the experimental implications similar, independent of whether the multiparticle dynamics above the mass gap is conformal or non-conformal, and whether it is weakly-coupled or strongly-coupled. The main differences are ones of degree. Thus an unparticle model with a mass gap satisfies all the criteria for a hidden valley model, and should have the same predictions, even though the literature, up to now, has indicated otherwise. In this paper I will show how to make the hidden valley phenomenology of these models more obvious. I will emphasize that focusing on the inclusive ``unparticle'' phenomenology often overlooks the main predictions for experiment. The most dramatic signals typically come from exclusive final states that cannot be studied using current unparticle methods. Even the most striking inclusive signals are often not of unparticle type. In any case, unparticle propagators are not sufficient for prediction; much more detailed information about the conformal sector and about the breaking of conformal invariance is needed. I will now summarize the main points of this paper. These are a combination of general cautionary remarks and predictions of LHC signatures that may arise in this context. \begin{itemize} \item Within the unparticle language for describing conformal field theory, there are two critical issues, often ignored, that strongly impact phenomenology. These are the imaginary part of the unparticle propagator, and unparticle interactions, including both self-interactions and interactions with other composite operators in the conformal field theory. With a mass gap, these effects become even more central, typically dominating the phenomenology. (Throughout this paper, I will generally refer to a mass ``gap'', but in fact only a mass ``ledge'' --- where one or more particles get stuck because they cannot decay within the hidden sector, forcing them to decay via the standard model --- is necessary for many of the conclusions.) \item Once a mass gap is introduced at the scale $M$, conformal invariance and the dimensions of operators cannot predict much of the physics in the vicinity of $M$. The details of the hidden sector and of the precise form of conformal symmetry breaking are critical. Not even the dominant cross-sections, much less distributions of kinematic variables for the most common events, can be predicted from unparticle propagators. Unparticle methods are reliable only on tails of distributions, where all the kinematics lies high above $M$. \item The hidden sector often becomes much more visible once a mass gap is introduced and the sector becomes a hidden valley. Hidden valley phenomenology can be spectacular. However, the range of visible phenomena that can occur in a hidden valley is enormous, as emphasized in \cite{HV1,HV2,HV3}, and cannot be predicted from the dimensions of the operators in the conformal field theory above the mass gap. Instead, many more details of the hidden sector are required in order to make predictions for the LHC, and in particular, to determine whether the hidden sector phenomenology is invisible, is visible but challenging, or is visible and spectacular. \item As an example, the mixing of the Higgs boson with an ``unparticle'' can easily lead to spectacular Higgs decays, such as Higgs decays to four leptons, Higgs decays to eight or more partons, Higgs decays to two or more particles which decay with displaced vertices, etc. This is for the same reason as in \cite{CFW,HV2,CFW4}; see also \cite{manyhiggs, NMSSM, JHU}. Again, the details cannot be predicted from the dimensions of operators; specific knowledge of the hidden sector and of its conformal symmetry breaking are required for any predictions. \item It has been proposed that a mass gap in a conformal hidden sector will lead to a tower of narrow states, with lifetimes decreasing as a power law as one goes up the tower. This is only true in an extreme situation. Instead, it is far more likely that all but one or two of the lightest states will decay to one another with short lifetimes not predictable from unparticle methods. Only the lightest one or two states in each tower will be extremely narrow and may have long enough lifetimes to give displaced vertices. This is exactly analogous to what one sees in QCD, or in pure Yang-Mills theory \cite{Morningstar}, and is also precisely what one expects in a confining hidden valley model \cite{HV1}. \end{itemize} Meanwhile, strong dynamics in a hidden sector can have other, non-unparticle, effects. \begin{itemize} \item Strong couplings can lead to sharp resonances at a scale very roughly of order $M$, but with a distribution of masses and widths that cannot be predicted from any unparticle method. \item Large anomalous dimensions (of operators not necessarily coupled to the standard model) can suppress global symmetry breaking and/or supersymmetry breaking in the hidden sector, with potentially observable consequences. It is possible that supersymmetry might be discovered first in the hidden sector before evidence for it in the standard model sector is convincing. \item Operators of high spin and large anomalous dimension, which cannot be given low-dimension couplings to the standard model, can strongly affect the hidden parton shower, which often plays an important role in hidden valleys; see for example \cite{HV1}. The effect is that high energy events, instead of producing two jets of hidden-sector-particles, produce a large number of soft hidden-sector-particles, in a distribution that probably is quasi-spherical. This has important experimental consequences. \end{itemize} We first review the hidden valley scenario and its predictions. We briefly review unparticles and discuss the effect of adding conformal symmetry breaking. In Sec.~\ref{sec:breakCFT} we explore some of the physical phenomena which can arise when conformal symmetry is broken, emphasizing its diversity. We reconsider \cite{unQuiros} and come to different conclusions. In Sec.~\ref{sec:unSteph} we deconstruct the Stephanov model of unparticles \cite{unSteph} and reconstruct it, coming to very different conclusions from \cite{unSteph} about the effect of conformal symmetry breaking. Finally in Sec.~\ref{sec:othereffects} we discuss some non-unparticle effects of strong coupling in the hidden sector, and their possible consequences for observable signatures at colliders. \makefig{0.4}{newvalley}{In the hidden valley scenario, a hidden sector couples at or near the TeV scale to the standard model sector. In the simplest hidden valleys, a barrier limiting production of hidden-sector particles will be breached in the near future. The number of particles increases through a multi-particle production process in the hidden sector. A mass gap prevents decays within the hidden sector, allowing hidden-sector particles to decay to visible particles, often with long lifetimes due to the barrier. Events with high multiplicity and/or displaced vertices naturally result.} \subsection{The Hidden Valley Scenario} In the hidden valley scenario, a model must have three ingredients, illustrated in \reffig{newvalley}: \begin{itemize} \item a coupling through a ``communicator'' (``mediator'', ``portal'') to a hidden sector, \item a multi-particle production process in the hidden sector, and \item a mass gap or ``ledge'' which prevents some particles in the hidden sector from easily decaying to lighter particles in the hidden sector \end{itemize} The specific realization in any model may take many forms. The communicator could be any neutral particle, including \begin{itemize} \item neutral gauge bosons, such as $Z$ or $Z'$ bosons \cite{HV1}, \item Higgs bosons \cite{HV1,HV2}, \item neutralinos \cite{HV3}, \item right-handed neutrinos. \end{itemize} Communication could also be generated through a loop of particles charged under both standard model and hidden sector gauge groups \cite{HV1}. The multi-particle production mechanism might include \cite{HV1} \begin{itemize} \item cascade decays of massive particles, \item parton showering (but note this need not be QCD-like parton showering) \item hadronization (but note this need not be QCD-like hadronization) \end{itemize} In QCD, let us note, all three mechanisms are operative, and all three may be active in the hidden sector. However, any one of the mechanisms is sufficient for the predictions below. Note also that in the gauge-string correspondence (often called AdS/CFT \cite{malda,GKP,WittenAdS} or AdS/QCD \cite{WittenADSQCD,ReyTheisenYee,Sonnen,Csaki2,Nonestar,cascade}, with RS-type models \cite{RS1,RS2} arising in a certain limit), these dynamical processes can be represented through equivalent processes in five dimensions, as we will review in Sec. \ref{sec:unSteph}. Finally, a mass gap (or its equivalent) could arise from several sources: \begin{itemize} \item explicit masses from, for example, supersymmetry breaking or the Higgs expectation value. \item confinement (of a form similar to, or different from, QCD) \cite{HV1} \item the Higgs mechanism (which is electric-magnetic dual to confinement) \item compactification of an extra dimension (which is often dual to confinement or the Higgs mechanism,) \end{itemize} The standard model and its supersymetric extension exhibit examples of the first three. Meanwhile the fourth, if the extra dimension has a warp factor, is known in some cases to be a crude dual description of confinement in QCD, and in some cases to be an {\it exact} dual description of confinement effects (or of the Higgs mechanism) in some non-QCD-like gauge theories. The reason to take such a large scenario with so many classes of models in it is that it makes a general class of novel predictions. This is analogous to the way that one gathers many supersymmetric models, extra dimensional models and little-Higgs models together because of their basic mechanisms, which immediately imply missing energy signals and heavy partners for some or all standard model particles. The predictions made in \cite{HV1}, which follow from the general structure of hidden valley models, are \begin{itemize} \item new light neutral states, decaying to the standard model through a variety of decay modes, \item long lifetimes for the new states, including therefore the probability of substantial missing energy and the possibility of highly displaced vertices, \item abundant high-multiplicity final states in high energy processes, with large event-to-event fluctuations in multiplicity, visible energy, event shape, and other quantities. \end{itemize} Other possible phenomena include \begin{itemize} \item non-standard multi-body decay modes for the Higgs boson, including ones already discussed \cite{NMSSM,CFW,HV2,JHU} and beyond, including the possibility of discovery channels involving highly displaced vertices \cite{HV1,HV2} ; \item new decay modes \cite{HV3} for the lightest R-parity-odd (or KK- or T-parity-odd) particle in supersymmetric or extra-dimensional or little Higgs models, and indeed in any model with a new global symmetry; again these decay modes offer several possible sources of highly displaced vertices and high-multiplicity final states. \end{itemize} Since these signals arise so easily and generally, and yet many of them do not appear often or at all in the standard array of most-studied models --- technicolor, supersymmetry and its cousins, extra dimensions or little Higgs models --- they pose potentially new challenges and opportunities for the Tevatron and LHC experiments. A little exploration of the experimental literature and internal notes shows many of the associated issues have not yet been addressed. More than anything else, it is this point which makes the hidden valley scenario important to consider. \makefig{0.4}{unvalley}{The unparticle scenario is similar to the hidden valley scenario, but specifically assumes the hidden sector is conformal, which is not a necessary assumption for a hidden valley. In standard unparticle models, the mass gap is very low, so that standard model particles reflect the hidden physics only indirectly. If the mass gap is higher, the unparticle model becomes a hidden valley model, with the same signals.} \subsection{The Unparticle Scenario and the Effect of an Added Mass Gap} The unparticle scenario is equally general. In its original form it requires only two ingredients: \begin{itemize} \item A coupling of one or more gauge-invariant local operator ${\cal O}_{sm}$ in the standard model to one or more gauge-invariant local operators ${\cal O}$ in the hidden sector. \item A conformal field theory in the hidden sector with no mass gap, or a very low one. \end{itemize} The predictions of the scenario include missing energy signals with unusual kinematic distributions for the visible particles that depend on the dimension of the operator ${\cal O}$ \cite{Un1}, and new production mechanisms and interference effects for two-to-two scattering of standard model particles \cite{Un2, allunparticle}, as well as in multi-particle production \cite{multiun}. The addition of a third ingredient --- conformal symmetry breaking that generates a mass gap or its equivalent --- turns these theories into hidden valley models, in particular ones with ultraviolet-conformal dynamics. The general predictions of the models are therefore, not surprisingly, the same. The detailed predictions of course depend on exactly what the conformal sector contains, and especially, as we will see, on how conformal symmetry is broken. This is not easy to discern from the literature, however. For example, in \cite{unFox} a strong breaking of conformal invariance was shown to be inevitable in a large class of unparticle models, and the physical effect of a mass gap was modeled. In \cite{unQuiros} the mixing of a Higgs boson with an unparticle sector with a mass gap was explored. In \cite{unSteph} a mass gap was added to a toy unparticle model to illustrate some theoretical points, and also the physical implications of the mass gap were briefly discussed. In all these cases, as we will see, the full story was not told, and the neglected visible signatures are of the type expected in hidden valleys, as described in \cite{HV1,HV2,HV3,HVWis}. \section{Hidden Valley Physics of Unparticles with a Mass Scale} \label{sec:breakCFT} In this section I will consider the addition of conformal symmetry breaking at a scale $M$ to an unparticle sector, mostly concentrating on the case where $M$ is of order or greater than a few GeV. Using a few toy models, I will illustrate how the dominant phenomenology typically cannot be predicted using unparticle techniques. This is especially true at the Tevatron and LHC: the rapidly falling parton distributions bias the phenomenology toward the lowest accessible energies, where conformal symmetry breaking is most manifest. Even if the sector is largely invisible, its production and interference effects are highly variable and cannot be predicted from operator dimensions alone. Instead the details of the conformal sector and its conformal symmetry breaking allow for enormous variety. Moreover, the sector itself may become visible, with all the phenomenology of \cite{HV1,HV2, HV3}. If so, the resulting exclusive events are often the leading observable at the Tevatron and LHC, and often even more so at the ILC. I will mostly focus on a popular scenario in the unparticle literature, where a hidden sector is coupled to the standard model though the Higgs boson \cite{unFox,unQuiros}. Many conclusions drawn are not specific to this case, although my presentation will highlight certain details of this particular unparticle coupling. I will consider some other cases briefly in Sec.~\ref{subsec:others}. \subsection{Three Toy Models of the Hidden Sector} \label{subsec:toys} In order to illustrate various physical points, I will introduce three useful toy models of the hidden sector. Model A, hidden scalar QED, will have the advantage of extreme simplicity and great phenomenological riches, at the cost of being fine-tuned and requiring us to remain at weak coupling. Model B, a scalar Banks-Zaks theory, will be only slightly more complicated and will have even richer physics; it is strictly conformal and fixed points might exist at stronger coupling. However it too is fine-tuned. Nevertheless, its concepts can be extended to fermionic Banks-Zaks theories, which are not fine-tuned. Finally, model C, a supersymmetric Banks-Zaks theory, is a theory very similar to model B, as far as its unparticle physics, and suffers no fine-tuning. It has physics far too rich to fully explore in this paper, as it includes all of the phenomenological possibilities of models A and B as a small subset. Now I will describe the models in more detail, and how their unparticles couple to the standard model. Except in section \ref{subsec:others}, where I will briefly consider other examples and show that the conclusions are similar, I will focus on the case where the unparticle couples to the Higgs boson. This case is already sufficient to uncover the exquisite complexity of hidden valley phenomenology. Model A is weakly-coupled scalar QED: a theory of a photon plus $N_f$ massless scalars $\phi_i$ of charge 1. This theory has a small beta function if $\alpha N_f$ is small, as I will assume, so it violates conformal invariance by a small amount. But small violations of conformal invariance have small effects on unparticle predictions, as the reader will easily verify in the discussion below. In this theory, the mass operators $\phi_i^\dagger \phi_j$ develop small negative anomalous dimensions and can serve as unparticles of dimension just below 2. Model B is a scalar Banks-Zaks fixed point with an $SU(N)$ gauge theory and $N_f\sim 22 N$ scalars in the fundamental representation. Again the mass operators will serve as unparticles. Although fixed points at strong coupling may exist, I will only consider the weakly-coupled cases. I have chosen scalars rather than fermions here because I want some weakly-coupled examples, and many authors claim unparticles only make sense for $d_{\cal O}<2$, to avoid divergences. I disagree, but do not want to be distracted by this controversy here. The mass operator in a scalar Banks-Zaks theory has $d_{\cal O}= 2 - $order$(\alpha N/\pi)$, an unparticle by any measure. The third toy model addresses the problem of naturalness which is present in models A and B. Model C is a supersymmetric $SU(N)$ Seiberg fixed point \cite{SeibergNAD}, {\it not} necessarily weakly coupled, with $N_f$ flavors of quarks $\psi_i, \tilde \psi^j$ and squarks $\phi_i,\tilde \phi^j$, in superfields $\Phi_i,\tilde\Phi^j$. When $N_f\sim 3N$ the theory is a Banks-Zaks point and is weakly coupled. This theory is natural, and its squark-antisquark bilinear $\phi_i\tilde \phi^j$ is an unparticle of dimension $3-3N/N_f$, which approaches 2 from below as $N_f\to 3N$. As mentioned earlier, the first two theories are highly fine-tuned; they involve scalars $\phi$ and gauge bosons, and both $\phi^\dagger \phi$ and $(\phi^\dagger\phi)^2$ are relevant operators. However, one can check that all results obtained using these models apply also to the third case, which is a natural theory without fine-tuning. Many also apply to fermionic Banks-Zaks models, albeit for a fermion bilinear unparticle, whose dimension is close to 3 if the coupling is weak, though it may be much smaller at strong coupling. \subsection{Important General Observations} \label{subsec:generalobs} Before I put these models into action, I would like to make a few observations which are very important for many applications to phenomenology, and are very typical in conformal or near-conformal sectors. \begin{itemize} \item All physically reasonable conformal field theories have non-zero three- and higher-point correlation functions. \item All conformal field theories have composite operators of higher spin and dimension; these will not couple to standard model fields in the Lagrangian but can still play an important role in the physics. \item Many conformal field theories, including these toy models for $N_f>1$, have flavor symmetries under which the unparticles transform as one or more multiplets. \end{itemize} On the first point, note the following. In models A and B the three point function for $\vev{{\cal O}\OO{\cal O}}$ is obviously non-zero; in perturbation theory this is a non-vanishing loop diagram. {\it Importantly, this three-point function does not go to zero as the hidden-sector gauge coupling goes to zero}. Even as the hidden gauge theory becomes free, the unparticles do {\it not} become free. Composite operators have interesting $n$-point functions even in free theories, and these ``unparticle interactions'' must not be neglected, especially once conformal symmetry is broken. We will see this shortly. The only way around this is to take a conformal gauge theory with $N$ colors and take $N$ strictly to infinity. At any finite $N$, the $1/N$ corrections change the physics drastically, and we will see later that even $N\sim 10000$ is not large enough that one can ignore these interactions for phenomenological predictions. On the second point, there are operators in the hidden sector which must be present. Obviously these include the stress tensor and any conserved flavor currents. Less obviously, any gauge theory has high-spin high-dimension operators; for example, the so-called ``DGLAP'' operators are always present. These cannot serve as unparticles, since the couplings to the standard model would be highly irrelevant, but they can affect the phenomenology, as we will see in Secs.~\ref{sec:unSteph} and \ref{sec:othereffects}. On the third point, models A, B and C all have multiple unparticles, as do many reasonable conformal field theories, transforming under a flavor symmetry. The Higgs may couple to one linear combination of these operators. This is the case in all of our examples with $N_f>1$. If there are no interactions other than the gauge interactions (and their supersymmetric partners, where appropriate), then the first two theories would have $U(N_f)$ symmetry, and model C has $SU(N_f)\times SU(N_f)\times U(1)$ symmetry. In models A and B, then, the operators ${\cal O}_i^j$ break up into two subclasses: the operator ${\cal O}\equiv \sum_i {\cal O}_i^i$, which is a singlet under $U(N_f)$, and the remaining operators, which form an adjoint of $U(N_f)$. Since the $U(N_f)$ currents commute with the dilatation operator, the members of the adjoint all have a dimension $d_{A}$, and the singlet in general has a different dimension $d_{0}$. (For example, in ${\cal N}=4$ Yang-Mills, theory, there are six scalars in an $SO(6)$ global symmetry; the adjoint bilinear has dimension 2 for any coupling while the singlet has a positive anomalous dimension; see \cite{lightscalars} for an application to the hierarchy problem.) In model C, however, the operators ${\cal O}_i^j=\phi_i\tilde \phi^j$ are in the bi-fundamental representation of $SU(N_f)\times SU(N_f)$, and all share the same dimension, unless additional interactions are added. The interaction with the Higgs may break these flavor symmetries, splitting the degeneracies within the multiplets and allowing processes that would be otherwise forbidden. Such effects are very important for the phenomenology, and very model-dependent, as we will see shortly. In general, if multiple standard model operators couple to multiple unparticles, they will couple to different linear combinations. One must keep track of the breaking of any global symmetries, as it will affect the observed phenomenology. Thus an unparticle coupled to the standard model cannot be treated in isolation. It may transform non-trivially under exact or approximate flavor symmetries in the hidden sector. It is an interacting object, both with itself and with other composite operators that may not be coupled to the standard model; in fact a free unparticle of dimension above 1 is not consistent. It will interact with the energy-momentum tensor and with higher-dimension and higher-spin operators in the theory. These interactions become extremely important when conformal symmetry is broken. \subsection{The Predictions of Conformal Invariance} \label{subsec:predCFT} Now let us couple the Higgs boson to the operator ${\cal O}\equiv \sum_i {\cal O}_i^{\ i}$ (the flavor trace of the mass operator) in any of the toy models. In models A and B we simply add $f H^\dagger H {\cal O}$ to the Lagrangian; see for example \cite{unFox} for a discussion. Recall that the mass operator has a small negative anomalous dimension (at least if the gauge coupling is the largest coupling in the theory, which we will assume). Of course this interaction will badly destabilize the fixed point in our first two toy models, since relevant operators ${\cal O}$ and $|{\cal O}|^2$ will be induced. But these models are indeed toys, so we accept some severe fine-tuning, as in the standard model, in return for simple-minded illustrations that can easily then be generalized to realistic cases. In model C there are two choices. We could introduce a term supersymmetrically, by writing $\Delta W=yH_uH_dS + \zeta S{\cal O}$, where $S$ is a singlet and $H_u,H_d$ are the two Higgs doublets; this induces the term $|y H_uH_d+\zeta{\cal O}|^2$ in the Lagrangian. Alternatively we could introduce $\Delta W= yH_uH_d{\cal O}$, though by unitarity this is an irrelevant operator, so its coefficient might be suppressed. In either case we will also assume that supersymmetry breaking adds additional terms to the scalar potential. Rather than treat these carefully, we will speak in more general terms about the low-energy dynamics, which will be sufficient to illustrate the complexity and diversity of possible phenomenology. Also, note that supersymmetry breaking itself can break conformal invariance in the hidden sector. In this case, a small coupling to the Higgs boson does not necessarily imply a low conformal breaking scale. Now when the Higgs gets an expectation value, a``tadpole'' term $fv^2{\cal O}$ will appear in the Lagrangian. This is simply a mass term for the scalar fields $\phi_i$. Obviously the conformal symmetry of the hidden sector is broken. Also, through the term $(fv) h{\cal O}$, the physical Higgs and the operator ${\cal O}$ will mix, which allows the process $gg\to h^*\to {\cal O}$, \reffig{gg2O}. This process was studied in \cite{unQuiros}. \makefig{0.4}{gg2O}{An unparticle mixing with the Higgs boson is produced in $gg$ collisions.} Note there is nothing mysterious here in our weakly-coupled toy models. The production of a single unparticle corresponds in more familiar language to the process $gg\to h^*\to \phi^\dagger_i \phi^i$, pair production for the $N_f$ scalars in the hidden sector via an off-shell Higgs boson $h^*$. If we consider the ${\cal O}$ propagator ending at a vertex, then the $\phi^\dagger$ and $\phi$ must come together to form a loop, as in \reffig{Oproduce}. Alternatively, if we consider the imaginary part of the ${\cal O}$ propagator, this contains the $gg\to h^*\to \phi^\dagger_i \phi^i$ process. \makefig{0.4}{Oproduce}{In our toy models at weak coupling, the unparticle propagator is a $\phi$ loop, corrected by hidden gauge boson exchange, and the imaginary part of the unparticle propagator contains the process $gg\to \phi\phi^\dagger$.} The changed dimension of ${\cal O}$ is also clear from this viewpoint. The departing $\phi$ and $\phi^\dagger$ are attracted to each other by the hidden gauge interactions, modifying the cross-section and cause it to fall faster than the $1/s$ behavior that would be expected if the scalars were free. \rem{\tiny What else does conformal invariance predict? As noted in \cite{multiun}, it predicts the scaling laws associated with ``multi-unparticle'' production: $n$-point correlation functions of ${\cal O} $. For example, suppose in addition to the coupling $|H^\dagger H| {\cal O}$ there is also a direct coupling such as $F_{\mu\nu}F^{\mu\nu} {\cal O}$, where $F_{\mu\nu}$ is the field strength of the photon (of QED, not of the hidden sector). Then we can expect the process $gg\to \gamma\gamma \gamma \gamma$ through the three-point function \reffig{ggOO4p}, which in the weakly-coupled language of models A and B is loop diagram \reffig{gg2phloop4p}. (Model C is more subtle due to supersymmetry.) The kinematics of this process will satisfy scaling laws that reflect the anomalous dimension of ${\cal O}$ \cite{multiun}. Of course, there is also the possibility of $gg\to hh$ through \reffig{gghh}. This does {\it not} satisfy simple scaling laws, because the pole at the Higgs mass violates conformal symmetry badly. If we unfold the effect of the Higgs line shape from the amplitude, however, simple scaling laws will be present. Still, since ${\cal O}$ mixes with the Higgs, this should already set off some warning bells, since it means that the ${\cal O}$ two-point function has received some corrections. Let us keep this in mind. \normalsize } \makefig{0.4}{gg2phiphiA}{The hidden sector is interacting: a hidden gauge boson can be radiated off the $\phi$ particles. This is {\it also} present in the imaginary part of the unparticle propagator.} In models A, B and C at weak coupling, not only do virtual gauge bosons at the production vertex change the scaling law, through ultraviolet effects, but also they can be emitted in the production process, along with $\phi_i$ and $\phi^\dagger_i$, as in \reffig{gg2phiphiA}. How should we represent this in terms of unparticles? It is another contribution to the imaginary part of the ${\cal O}$ propagator, which in fact will get contributions from an infinite number of processes involving the scalars and gauge bosons. If we only consider exclusive questions, we can treat the ${\cal O}$ propagator as a black box and ignore exactly what is going on inside the imaginary part. But we will see in the next section that the phenomenology depends crucially on looking inside this box. \rem{\tiny In model A, the photon can be expressed through a gauge invariant operator ${\cal O}'_{\mu\nu}=F^{(h)}_{\mu\nu}$, the field strength of the hidden photon, but the diagram drawn in \reffig{ggphiphiA} cannot be written as an correlation function of gauge-invariant local operators, that is, as an unparticle scattering amplitude, since $\phi_i$ and $\phi_i^\dagger$ are not at the same point. And in models B and C we cannot even express single gluons as local gauge invariant operators. Instead, we are forced to think of \normalsize } The two-point function of ${\cal O}$ must be modified at energies where conformal symmetry breaking is important, and its precise form cannot be determined without detailed understanding of the hidden sector. In \cite{unFox} a form for this two-point function was proposed, valid for any $d<2$. The functional form has a sharp cutoff at some value of $q^2$; it would be visible in experiments, as noted in \cite{Rizzo}. But we can immediately see that this is not the form which applies for any of our toy models. \subsection{The Unbroken Phase: A First Look} \label{subsec:unbroken1} There is an important question at the first stage, which is whether the breaking of conformal symmetry, which gives the $\phi_i$ a mass, might also give them expectation values. Let us first assume that we are in an ``unbroken phase'' where the $\phi_i$ are massive but the hidden gauge symmetry is not broken. In models A and B, at weakly coupling, we can immediately see what happens. Let $m$ be the physical mass of the $\phi$ field, which at weak coupling differs only slightly from $\sqrt f v$. The calculation of the cross-section is almost that of a free theory, which means that there is a phase-space suppression of the cross-section at $q^2$ just above $m^2$. Here the continuum production smoothly ends. But this is not all: there are $\phi_i^\dagger \phi_i$ bound states, which give a set of resonances below the cut. These are not infinitely narrow, because they can both radiate and annihilate to hidden gauge bosons, so only a finite number of resonances are resolvable. They are weak and closely spaced if the hidden gauge coupling is small, but strong and spread out for more strongly coupled theories. Thus the cross-section for ``unparticle'' production for $d_{\cal O}$ near 2 might resemble \reffig{CS1}. Observation of these resonances allows for an alternative measure of strong coupling, one complementary to the measurement of the power law obeyed by the falling cross-section. \makefig{0.48}{CS1}{One possible shape for the cross-section $\sigma(s)$ to produce hidden sector particles, versus partonic energy; parton distribution functions are not included here. Note the smooth turnoff, due to decreased phase space, as $s$ decreases, and the resonances from hidden-sector $\phi^\dagger \phi$ bound states.} But if the Higgs couples with different coefficients to the various $\phi_i$ fields --- if it has small couplings to unparticles ${\cal O}_i^{j}$ other than ${\cal O} \equiv \sum_i {\cal O}_i^{\ i}$ --- then the breaking of conformal invariance may be yet more complicated. Rather than one threshold with one set of bound states, there may be several. This could lead to a cross-section given by the bold curve in \reffig{CS2}. The curve in \reffig{CS1} is shown as a thin curve on the same plot; note that these differ strongly near the peak cross-section, yet both match the unparticle prediction perfectly at high energy. These cross-sections are simple partonic cross-sections; the cross-section at LHC, \reffig{CS3} (a log-linear plot!), where the parton distributions are folded in, strongly de-weights the high-energy region, where the unparticle prediction is valid. \makefig{0.48}{CS2}{Another possible shape for the cross-section $\sigma(s)$ versus the partonic energy, with the curve of \reffig{CS1} shown for comparison; parton distribution functions are not included here. Both curves are appropriate for $N_f=4$; the thick curve shows the result of having four different masses, one somewhat lighter than the other three, while the thin curve has equal masses. Note the curves agree exactly at high energy but differ greatly near the point of maximum cross-section.} Moreover, in this case {\it the unparticles may not be invisible.} If any of the diagonal flavor symmetries are broken, either by the Higgs couplings to the unparticles --- for example, if there is an $H^\dagger H {\cal O}_{12}$ coupling, even a very small one --- or by flavor-violating ${\cal O}$ or ${\cal O}^2$ terms in the Lagrangian, then nothing forbids the decay $\phi_2\to h\phi_1$. Here the Higgs may be on- or off-shell, depending on whether $m_2-m_1$ is larger or smaller than $m_h$. It is possible that this visible decay may be overwhelmed by an invisible flavor-changing coupling of the $Y$ boson, namely $\phi_2\to Y\phi_1$, but this is obviously model-dependent. Certainly we should not assume that unparticle production is invisible, and we must reconsider the experimental implications and whether this helps, or hinders, a measurement of the unparticle cross-section. That the decay $\phi_2\to h\phi_1$ is possible is clear as day in the languge of the scalars and gauge bosons in models A, B and C, at least when the coupling constant is weak and the unparticle has dimension just below 2. Yet how unclear it becomes when one tries to write it in terms of the gauge invariant operators ${\cal O}_{i}^j$! The otherwise obvious process becomes quite obscure. \makefig{0.48}{CS3}{A log-linear plot of $\log[\sigma(\sqrt{\hat s})]$ versus $\sqrt{\hat s}$ at the LHC, for the same two models shown in \reffig{CS2}. The gluon distribution functions greatly enhance the region of greatest difference between the two models.} We can try to write it down anyway, as an exercise, and also to see that the process does not vanish when the coupling becomes stronger and perturbative intuition fails. In unparticle language, there is a $\vev{{\cal O}_{22}{\cal O}_{21}{\cal O}_{12}{\cal O}_{11}}$ four-point function, even in the limit that the hidden gauge coupling goes to zero. This induces a process $gg\to h^*\to {\cal O}_{22}\to hh{\cal O}_{11}$. A cut through this process includes the process $gg\to h h\phi_1\phi_1^\dagger$, \reffig{ggOhhO}. This would be very small in a conformal field theory coupled to the Higgs, but when conformal symmetry is broken, the analytic structure of the ${\cal O}_{i}^j$ propagators is drastically altered, making the process now unsuppressed. The existence of flavor-symmetry-breaking spurions, and the change in the analytic structure of the ${\cal O}_{i}^j$ propagators (whose cuts now end at finite timelike values of $q^2$, not at $q^2=0$), are enough to guarantee that processes with Higgs emission are enhanced compared to the case with unbroken conformal symmetry. \makefig{0.4}{ggOhhO}{In the imaginary part of the four-point function hides the decays of $\phi_2\to h\phi_1$ and $\phi_2^\dagger \to h\phi_1^\dagger$.} \subsubsection{Possible Signatures} Despite the fact that there are visible effects, this model may pose a considerable challenge for the LHC. At best one obtains the following interesting but difficult signatures: \begin{itemize} \item If $m_h<2m_2$ and $m_2-m_1>m_h$ then in production of ${\cal O}_{22}$ ($gg\to \phi_2\phi_2^\dagger$) one might observe a final state with two Higgs bosons plus MET from the invisible $\phi_1$ and $\phi_1^\dagger$. The best channel might well be $b\bar b\tau^+\tau^-$ plus MET. \item If $m_h<2m_2$ and $m_2-m_1<m_h$, then the decay of $\phi_2$ must go via off-shell Higgs bosons; the rates are suppressed. It is possible the potentially invisible channel $\phi_2\to\phi_1 Y$ will dominate, but this may also be suppressed. The likely signal would involve soft nonresonant $b$ quark pairs or $\tau$ pairs plus MET. \item If $m_h> 2m_2$, then the decay $h\to \phi_2\phi_2$ is allowed, followed by the decay $\phi_2 \to h^* \phi_1$ with an unknown branching fraction. From such decays, the visible energy may be quite small, so triggering on $gg\to h$ may be impossible. In vector boson fusion events one might observe forward jets plus soft jets or leptons and MET from the two $\phi_2$ decays. Similar signals would arise in $Wh$ and $Zh$ production. \end{itemize} All of these modes are challenging or perhaps even impossible at the LHC. Indeed these are signals best found at an ILC, or perhaps the Tevatron. But in any case, {\it the hidden sector is not generally invisible.} In particular, a search aimed at unparticles assumed to be invisible, such as for events with a $W$ and large MET with no central jets, might throw away the signal. This is not the complete list of possibilities, and at larger $N_f$ the possible signatures multiply, as cascade decays ensue, giving high-multiplicity final states. These signatures are a minimal type of hidden valley-like phenomenology, in which a $\phi_i$ that is trapped on a ``ledge'' cannot decay rapidly within its own sector, and instead decays through a visible-sector Higgs boson. This kind of decay should be common to many models with multiple sterile scalars mixing with the Higgs boson \cite{manyhiggs}; however these signatures do not seem to have been explored. Let us compare this conclusion with that of \cite{unQuiros}, in which a Higgs boson coupling to the hidden sector was considered. In figures 3 and 6 of \cite{unQuiros}, cross-sections for unparticle production take the form of a pole below a continuum, if the Higgs is lighter than $2m$, and of a broad resonance inside a continuum, if the Higgs is heavier. In the second case we recognize this as the typical behavior of a particle mixing with a continuum -- a pole mixing with a cut. (See also \cite{manyhiggs}.) In \cite{unQuiros} it was stated that the signal is invisible, unless the unparticle itself can decay by mixing back through the Higgs. Why were the conclusions of \cite{unQuiros} so different from those of this section? A particular model for the unparticle was employed, a specific regulated form (Eq. 2.8 of \cite{unQuiros}) of the unparticle/Higgs coupling was introduced to assure stability of the vacuum, and unparticle interactions were neglected. These choices are simple, but they are not characteristic of typical hidden-sector gauge theories. Most importantly, their lack of realism precisely assumes away all the signals discussed above. (Moreover, the assumptions made cause the unparticle to develop an expectation value. This has other important effects that we will see in a moment, in Sec.~\ref{subsec:broken}.) In more realistic models, there are likely to be interesting resonances, as in \reffig{CS1}, below the continuum, and possibly more structure, as in \reffig{CS2}, if global symmetries are broken. And if there is any flavor mixing, as could be induced by a mismatch between the Higgs couplings and hidden sector couplings, then we expect decays within the hidden sector via Higgs boson emission, as in \reffig{ggOhhO} and in the bullet points above. The rates and branching fractions depend upon the rest of the hidden sector, not specified in \cite{unQuiros}. In sum, the hidden sector is easily made visible once conformal symmetry breaking occurs. But we are not done, by any means, at least not in general models. In model A, we may have identified all the phenomenology of the unbroken phase, since the theory may simply consist of massive scalars and a massless hidden photon. The massless scalars have a ${\bf Z}_2$ symmetry which forbids the lightest state $\phi_1$ from decaying to the standard model, and the same is true of the hidden photon $Y$, which also therefore remains invisible. But in models B and C we must address what happens at lower energy to the so-far massless hidden gluons. We will do this in Sec.~\ref{subsec:unbroken2}. Before we do this, let us consider a completely different possibility. \subsection{The Broken Phase} \label{subsec:broken} After conformal symmetry breaking, the theory may also end up in a ``broken phase'', where the gauge symmetry is broken. The presence of $|{\cal O}|^2$ terms, either ab initio or induced when $H$ gets an expectation value, may cause one or more of the $\phi_i$ (and thus ${\cal O}_{i}^i$) to develop an expectation value $w_i$. This gives a mass $m_Y\sim g_H w_i$ to some or all of the hidden gauge bosons $Y$. Let us assume, purely for simplicity, that they are all massive. The nonzero $w_i$, aside from their effect through $f H^\dagger H\vev{\cal O}$ on the Higgs boson mass, also cause mixing of $H$ and $\phi$ itself. Said another way, the operator ${\cal O}$, after symmetry breaking, is now just an ordinary particle at leading order \begin{equation} {\cal O}_i^j = w_i^\dagger w_j + w_i^\dagger\ \delta\phi^j + w^j\ \delta \phi^\dagger _i + \delta\phi^\dagger_i\ \delta \phi_j \end{equation} and thus the mixing between $h$ and ${\cal O}$ is ordinary particle mixing between $h$ and $\delta\phi$. Therefore the unparticle propagator develops poles at $m$ and at $m_h$, with non-zero imaginary parts, and with residues which depend on $f$, $v$ and $w_i$. If the $\phi_i$ have different masses, or the $w_i$ are not all equal, there will be a separate pole for each $i$. This has the effect that ${\cal O}$ (or $\delta \phi_i$, to be less obscure) has an amplitude to decay by $\delta\phi_i\to h^*$ to any kinematically allowed final state of the Higgs boson, such as $b\bar b$, $\tau^+\tau^-$, $WW$, $\gamma \gamma$, etc. This is precisely what happens in models \cite{manyhiggs,NMSSM} where there are standard-model singlets that can mix with the Higgs boson. This is also what can happen in hidden valley models \cite{HV1,HV2}, which easily produce sterile scalars that can mix with the Higgs. But there is another important effect to consider. Just as the Higgs boson can decay to $WW$ and $ZZ$ because it gives them their masses, the $\delta\phi_i$ can decay, if $w_i$ is nonzero and the $Y $ is sufficiently light, to $Y Y $. And since the Higgs can mix with the $\delta\phi_i$, it too can decay by $h\to YY$. More precisely, the Higgs and the $\phi_i$ form a mixed system of scalar mass eigenstates $\hat\phi_a$, each of which can decay to standard model fermions, standard model gauge bosons, and hidden gauge bosons, if kinematically allowed. Moreover, they can decay to each other, because of the $H^\dagger H \phi_i \phi_i^\dagger$ coupling, which after symmetry breaking induces many three-particle couplings, such as $h\to \phi_i \phi_i$ and $\phi_j\to h h$, or more generally $\hat \phi_a\to \hat \phi_b \hat \phi_c$. (There are also in general psuedoscalar states as well, but to keep the discussion under control we ignore them here; a more serious study might reveal additional signals.) Whether these three-point couplings dominate over the decays to standard model particles or to $Y Y $ depends strongly on the couplings and the masses and the mixing angles of all the states. Moreover, the $Y $, now massive, may not be invisible. It will mix with the $Z$ boson, with a model-dependent mixing angle, and decay with a model-dependent lifetime to standard model fermions: quark pairs or lepton pairs. Its mass is often small and its mixing must be small, for consistency with direct and indirect LEP bounds, so it may well decay with a displaced vertex. For instance, if the $Y$ field is heavier than $2m_b$, then its lifetime is roughly of order that of the $Z$ (about $10^{-24}$ seconds), divided by the square of a mixing angle, times $(m_Z/m_Y)^5$ \cite{HV1}. For lighter $m_Y$ some decay channels are kinematically forbidden and the lifetime becomes longer. For a 20 GeV $m_Y$ to decay inside the detector requires a mixing angle larger than $10^{-6}$, which is certainly permitted by experimental constraints. In \cite{unQuiros}, where the unparticle developed an expectation value, these issues were not considered. Admittedly it is difficult to express or even recognize these phenomena in purely unparticle language. It is tricky, at best, to write a shift from one vacuum to another, and the Higgs mechanism, in terms of gauge-invariant local operators. For example, unparticle interactions clearly cannot be neglected when computing the two-point function of an unparticle in a shifted vacuum, since three- and higher-point functions in the unshifted vacuum will contribute to the two-point function in the new vacuum. As all known nontrivial conformal field theories in four dimensions are gauge theories, an unparticle's interactions with other unparticles that contain the hidden gauge fields $Y$, such as $\phi^\dagger_i D_\mu \phi_j$ in our toy models, must be considered. And in the toy models above, three-point functions, which contribute to decays, get new contributions from the unparticle interactions and from Higgs-Higgs-unparticle couplings. \subsubsection{Possible Signatures} With all of the above effects accounted for, the potential for striking phenomenology emerges. Instead of an invisible sector with a cross-section given by one or another of Figs.~\ref{fig:CS1}--\ref{fig:CS3}, or in the figures of \cite{unQuiros}, we may have a flurry, or perhaps even a blizzard, of visible final states. Several of these are illustrated in Figs.~\ref{fig:gg2h2bbbb}--\ref{fig:gg5Y}. Decays of any of the scalars $\hat\phi_a$ (the mass eigenstates which are mixtures of the Higgs and the $\phi_i$) may generate final states that range from the well-known to the exceptional. Examples (with resonant pairs shown in brackets) include \begin{itemize} \item $gg\to\hat \phi_a\to \hat\phi_b \hat\phi_b \to (b \bar b) (b\bar b) $ (\reffig{gg2h2bbbb}.) \item $gg\to\hat\phi_a\to Y Y \to (\ell^+\ell^-)(\ell^+\ell^-)$ (\reffig{gg2h2YY}.) \item $gg\to\hat\phi_a\to Y Y \to (q\bar q)(\ell^+\ell^-)$ \item $gg\to\hat \phi_a\to \hat\phi_b \hat\phi_b \to (YY)(YY)\to[(q\bar q)(\ell^+\ell^-)][(\nu\bar \nu)(\ell^+\ell^-)]$ (\reffig{gg4Y}.) \item $gg\to\hat \phi_a\to \hat\phi_b \hat\phi_c \to (YY)(b\bar b)\to[(q\bar q)(\ell^+\ell^-)](b\bar b)$ \item $gg\to\hat \phi_a\to \hat\phi_b \hat\phi_c \to (YY)( \hat\phi_d \hat\phi_d )\to[(q\bar q)(\ell^+\ell^-)[(b\bar b)(b\bar b)]$ (\reffig{gg2YYphiphi}.) \end{itemize} Clearly this is not the entire list. These decays are in addition to classic decay modes, such as $\hat \phi_a \to W^+W^-$ or $\to b\bar b$, if kinematically allowed. Note the decays are mainly on shell; the fermions in the final states form resonances pairwise, so plots of the invariant mass of the dileptons will show peaks, making the four-lepton channel completely spectacular. (As this paper was completed, I learned that model A, investigated already in \cite{SchabWells, BowenWells}, is being further studied in \cite{GW}, where some of these decay modes of the Higgs boson are also noted.) Displaced vertices from $Y$ decays (or perhaps even from $\hat\phi$ decays) may also be present, adding additional spice to the story and reducing backgrounds. \makefig{0.4}{gg2h2bbbb}{The Higgs (or any sufficiently heavy $\hat \phi^a$) may decay to two $\phi$ particles, which each decay to heavy flavor; see \cite{NMSSM} for similar examples.} \makefig{0.4}{gg2h2YY}{The Higgs (or any sufficiently heavy $\hat \phi^a$) may decay to two $Y$ bosons, which decay to two standard model fermions each, often resulting in two-lepton-two-jet or four-lepton final states.} \makefig{0.4}{gg4Y}{The Higgs (or any sufficiently heavy $\hat \phi^a$) may decay to two $\phi$ particles, which decay to two $Y$ bosons, which decay to two standard model fermions each. } The physics of multi-particle decays of the Higgs boson, and of mixing of the Higgs with multiple scalars, has a long history. In the NMSSM it was noted that the decay $h\to aa$, $a\to b\bar b$ or $\tau^+\tau^-$, where $a$ is a light pseudoscalar, often arises \cite{NMSSM}. Mixing of the Higgs boson with invisible sterile scalars, spreading the Higgs signals among many resonances, has also been considered \cite{manyhiggs}. But the fact that multiple cascade decays can so generically lead to multiple resonances and high multiplicity in the final state has only been recently emphasized \cite{CFW,HV1} and the possible relevance of displaced vertices for discovering the Higgs boson has apparently also been overlooked until recently \cite{HV1,HV2,JHU} (see also comments in \cite{CFW}.) \makefig{0.4}{gg2YYphiphi}{The Higgs (or any sufficiently heavy $\hat \phi^a$) may decay in a cascade to eight-fermion final states; see \cite{CFW} for a similar example, and also \cite{HV2}.} Even this is not all. If the $Y$ is rather light, but the hidden gauge coupling is not small, then it is easy for one or more $Y$ particles to be radiated in the production process, as in \reffig{gg2phiphiA}. These can then decay to additional standard model particle pairs, as in \reffig{gg5Y}, again perhaps with a displaced vertex. And still more complexity may arise in models B and C if the nonabelian $Y$ fields, whose masses are related to the $w_i$, can decay to one another, increasing the multiplicity of final-state particles still further. We should not forget the unparticle production cross-section near threshold for the lightest $\hat\phi$. We still have a continuum above $2m$ and $\phi$-onium resonances just below $2m$. But the number of resonances depends on $m_Y$; as the Compton wavelength of $Y$ decreases, so does the number of resonances, eventually to zero. As the resonances decay, or annihilate, they may produce mainly $Y$ bosons, whose decays would make for striking events. Or if the $Y$ is too heavy, decay may occur through an off-shell Higgs boson. However, the rate for this resonance production may be very small. \makefig{0.4}{gg5Y}{As in \reffig{gg4Y}, with additional radiation of a $Y$ boson; the probability for this process is proportional to the hidden sector gauge coupling, possibly further enhanced by soft logarithms. Here $f$ represents any kinematically-accessible standard model fermion, $u,d,s,c, b,t, e, \mu, \tau, \nu_i$.} By now the fact that this is a hidden valley model should be fully clear. We have new light neutral resonances, with long lifetimes, appearing in potentially large numbers through decay cascades and through radiation \cite{HV1}. There are new Higgs decays \cite{HV1,HV2}. If this is a supersymmetric world, as in model C, we will also have the physics of \cite{HV3}, whose details depend on the relative masses of the standard model and hidden sector LSPs. Amid all of this, the measurement of the conformally invariant high-energy tail on the production process may seem less urgent, though it remains a very important probe of the hidden sector. Whether it is easy or difficult to measure this tail clearly depends on the visible signal, which may either brightly illuminate or badly obscure the kinematic variable whose behavior is controlled by the ``unparticle'' dimension. \subsection{The Unbroken Phase: A Second Look} \label{subsec:unbroken2} Now we should revisit the unbroken phase, to see what happens to the massless hidden gluons in models B and C. Of course the unbroken phase is a confining phase at low energy. If the conformal hidden gauge coupling is weak, then the confinement scale $\Lambda$ is far below $M$, and any hidden-sector hadrons may be invisible. But if the gauge coupling at the fixed point is fairly strong, as is usually the case when there are large anomalous dimensions for any operators, then confinement may kick in within an order of magnitude or two of the scale $M$; see for example \cite{springloaded,Nonestar}. In this case, we will see the remarkable phenomenology of a confining hidden valley model, a few examples of which were given in \cite{HV1}. There are a number of different possibilities, depending on the precise nature of the matter in the hidden sector and the masses of the matter. We will cover just a couple of them; this is not the full range of possibilities! \subsubsection{Light Matter in the Fundamental Representation} Suppose first that $N_f>1$, and that the Higgs couplings to the hidden sector break its $U(N_f)$ symmetry, so that $k$ of the $\phi$ fields end up heavy, while $N_f-k$ are light. (As stated, this is a bit fine-tuned. A more natural scenario is that other couplings in the hidden sector break the flavor symmetry, so that the Higgs couples most strongly at low energy only to those scalars whose mass operator has the lowest dimension.) A simple version of this case, with fermions instead of scalars and $N_f=2$, $k=1$, was discussed in \cite{HV1}. We will continue to call the heavy fields $\phi_i$, but will rename the light ones $\chi_r$, $r=1,\dots,N_f-k$. The heavy-heavy mesons $\sim \phi^\dagger_i\phi^j$ will be rarely produced, except for $\phi$-onium states, which we will ignore until the next section. But the heavy-light $\phi\chi$ mesons, analogous to $B$ and $D$ mesons, will always be produced in open $\phi$ production, through the process $gg\to h\to\phi^\dagger \phi$. (Here the Higgs boson may be on- or off-shell.) The light-light $\chi\chi$ mesons, analogous to $\rho$, $K$ and $\pi$ mesons, will also be produced in open $\phi$ production. Thus, just like open-charm production in the standard model, $\phi^\dagger \phi$ pair production typically leads to two stable (and invisible) heavy-light mesons, along with some number of light mesons. The higher the center-of-mass energy, the more light mesons produced. At high enough energy this process is driven by a parton-shower of hidden sector $Y$ bosons. The visibility of the signal then depends on whether there are $\phi_i\to h\phi_j$ decays, as discussed in Sec.~\ref{subsec:unbroken1}, and on whether the light mesons can decay to the standard model. By assumption the Higgs has smaller couplings to the $\chi_r$ than to the $\phi_i$, but they need not be zero, so the Higgs can mediate decays of some light meson states. Other small couplings, possibly higher dimension operators that couple hidden-sector operators involving $\chi$ to standard model operators, may also mediate light meson decays. One example was given in \cite{HV1}; many other examples, with varying phenomenology, may be invented. If one allows oneself the freedom to introduce all possible operators, as in \cite{allunparticle}, then essentially any decay modes for the light mesons allowed by symmetry, with a vast range of lifetimes, are possible. Thus, the signatures in models B and C may include the following (see \reffig{gghhvhads}): \begin{itemize} \item For each of the signatures outlined in Sec.~\ref{subsec:unbroken1}, and the invisible production processes in that section, we should add one or more light mesons. \item As illustrated in \cite{HV1}, these mesons are most likely to be scalars and pseudoscalars decaying to heavy quarks and leptons, or to gluon/photon pairs, and possibly spin-one mesons decaying to leptons and quarks more democratically. \item Displaced decays are possible for light pseudoscalars and very light vectors, or for any light state if the $\chi_r$ couple much more weakly than $\phi_i$ to the standard model. \end{itemize} Other hidden hadrons with more complicated decays are certainly possible \cite{HV1}. For instance, in model C, we also have supersymmetric partners of these states, which if supersymmetry is not badly broken may allow for fermionic hadrons with additional three-body decays. Decays to four particles are also not uncommon. Indeed there is much more to say about the supersymmetric case, and the interplay of supersymmetry breaking with hidden-valley phenomenology. \makefig{0.4}{gghhvhads}{In a confining theory with light $\chi$ fields and heavy $\phi$ fields, the production of $\phi$ particles leads through fragmentation to two heavy-light mesons (which if stable are invisible) and a number of light-light mesons, some of which may decay to the standard model, possibly with displaced vertices. The standard model final states are variable and are shown here schematically as sets of unmarked thin solid lines.} Note that from the unparticle point of view, the production of $n$ light mesons and two heavy mesons must be described using an $n+3$-point function involving gauge invariant operators built from $\chi$ and $\phi_i$. Again, this correlation function would be highly suppressed in the conformal limit compared to other processes, but here it benefits from the cuts and resonant enhancements that arise from the production of many light on-shell states with relatively narrow widths, along with the large phase space for decays of this class. Without including unparticle interactions and treating them with great care, one might overlook these complex yet dominant processes. \subsubsection{Heavy Matter Only} Now suppose instead that all the matter in the hidden sector becomes massive relative to $\Lambda$ after conformal symmetry breaking. Then the low-energy limit is a pure hidden Yang-Mills theory, with a hidden confining flux tube. This very generic situation was also considered in \cite{HV1}. The low-lying hidden hadrons are hidden glueball states, of mass $\sim \Lambda$. When a pair of $\phi_i$ is produced, they cannot escape one another; they are bound by a string that cannot easily break. This has a dramatic effect on the unparticle propagator: in such a theory, the propagator is not a cut above $2m$ but is instead a sum of bound state resonances continuing up to very high energy, with a cut setting in only at $q^2=(4m)^2$, where two such resonances can be pair produced. Note these states are not stable: all can decay by emission of hidden glueballs except for the lightest, which can decay via annihilation to two or more hidden glueballs. The widths of the resonances grow as one goes up the tower, just due to the phase space for the decays, so eventually they blur together to form a continuum which must satisfy the contraints of conformal invariance. Where this happens depends on the details of the theory, but above this scale the calculations done with conformal unparticle propagators will apply. As an aside, note that in the case where the fields $\phi$ carry electric charge or color, they are often called ``quirks'' \cite{KLN, HV1}. In this case their bound states may also radiate standard model gauge fields. This was very briefly considered in \cite{HV1} for confinement scales well above 1 GeV, but the physics is very subtle, and the question of the expected LHC signals is still under study \cite{HW}. The case of quirks with confinement scales well below 1 GeV has many special features and has been considered by \cite{KLN,HW}. Clearly the observability of the physics depends in this case on whether the hidden glueballs can decay visibly. According to lattice simulations \cite{Morningstar} there will be numerous glueballs, of various spins, parity and charge conjugation, which cannot decay to other glueballs. How each one decays --- its decay mode, lifetime, and branching fractions --- depends in detail on whether there are nonzero couplings between hidden gauge invariant operators ${\rm tr} F_{\mu\nu} F^{\mu\nu}$, ${\rm tr} F_{\mu\nu} \tilde F^{\mu\nu}$, ${\rm tr} F_{\mu\nu} F^{\nu\rho}$, etc., and operators in the standard model. Because of the high dimension of these operators it is easy to arrange that all these decays would occur outside LHC detectors. But if one or more of the lifetimes is sufficiently short, the result will be a classic signature of a hidden valley: long-lived light neutral resonances produced in abundance, with likely missing energy and large event-to-event fluctuations. In particular, hidden $\phi$-onium production will produce some number of low-$p_T$ glueballs, with various $J^{PC}$ quantum numbers, emitted as a $\phi$-onium state relaxes to the ground state, followed by a blast of glueballs produced in the annihilation process; see \reffig{gghhonium}. The signatures are then \begin{itemize} \item A number of high-$p_T$ glueballs with $p_T\sim m$, and a number of low-$p_T$ glueballs with $p_T\ll m$. \item Decays of the various glueballs, whose masses span a range of about a factor of three, to different final states, such as $gg$, $b\bar b$, $\gamma\gamma$, $ggg$, etc. \item Possible displaced vertices from one or more glueball decays. \item The $\phi$-onium annihilation may occasionally occur through $h^*$, producing any final state of the Higgs, such as $ZZ$, in place of the high-$p_T$ glueballs. \end{itemize} Unfortunately there is no known method for obtaining reliable predictions for the $p_T$ spectrum of the glueballs and their standard model daughters. Much additional work on this example is needed. \makefig{0.4}{gghhonium}{In a confining theory with only heavy $\phi$ fields, the production of $\phi$ particles produces bound $\phi$-onium states which decay toward their ground state via hidden glueball emission, finally annihilating to multiple hard hidden glueballs. The hidden glueballs may decay to the standard model, possibly with displaced vertices. The standard model final states are variable and are shown here schematically as sets of unmarked thin solid lines.} \subsubsection{Possibilities for Future Study} There are many other possibilities. The hidden gauge group may not be $SU(N)$; either the $\phi$ fields or the light $\chi$ fields, if any, may not be in the fundamental representation; the phase of the theory may be partially broken and partially confined. Each of these possibilities will change the details drastically, but in the majority of cases the basic features of the hidden valley scenario, and its experimental implications, will be retained. While the conformal invariance of the theory and the dimensions of the operators do constrain some inclusive observables, the most dramatic effects on the phenomenology are beyond the easy reach of unparticle methods. \subsection{Other Models, Other Couplings} \label{subsec:others} We have seen that a wide variety of signals can arise even in simple toy models. There is an enormous diversity of phenomenological possibilities, as is typical in the hidden-valley scenario. But we have only discussed a very small set of unparticle models, those with a scalar unparticle with dimension somewhat below 2, with a coupling to the Higgs boson. Are these cases special? Also, we have often used weak-coupling intuition for guidance. Is this misleading? \subsubsection{Stronger coupling and $d_{\cal O}\ll 2$} In model C, as we decrease $N_f/N_c$, it is known that the hidden gauge coupling becomes stronger and $d_{\cal O}$ decreases from 2 toward 1. Should we expect something completely different from our discussion above as $d_{\cal O}$ approaches 1? The reader is invited to consider the case $d_{\cal O}=1+\epsilon$. There the operator ${\cal O}$ can be treated a scalar that couples weakly to a conformal sector, for example through a linear coupling to an operator of dimension $3-\epsilon$, or quadratically to an operator of dimension slightly less than 2. The arguments can be repeated using toy models. Excellent toy models are Banks-Zaks supersymmetric fixed points with additional gauge-singlet superfields, as in Seiberg ``magnetic'' fixed points \cite{SeibergNAD}, coupling in the superpotential to squark-antisquark bilinears. Not surprisingly, since this is just model C with additional scalars, and since we already considered model C with additional scalars (the Higgs boson itself!) in our examples above, the physics in this case has the same features, somewhat rearranged. The resonances (both strong and weak), mixing, cascade decays and complicated multibody final states that we have seen here may arise there as well, though with rates that decrease to zero as $\epsilon\to 0$. The exercise is left for the reader. \subsubsection{If there is no coupling to the Higgs boson} If the $|H|^2 {\cal O}$ coupling is absent, then some of the effects discussed in the earlier sections may be absent as well. In particular, we may not see unusual Higgs boson decays, or the effect of Higgs mixing with the unparticle sector. However, we may still see unusual decays of other heavy particles, including very rare decays of $Z$, $W$ or $t$, or very common decays of new particles, such as supersymmetric partners, little Higgs partners, Kaluza-Klein partners, $Z'$ bosons, right-handed neutrinos, or new scalars other than the Higgs boson. What matters more than the Higgs coupling is whether there is a mass gap which is sufficiently large. How might a large mass gap (larger than a few hundred MeV, at least) naturally arise without the Higgs boson coupling? This is very easy to imagine, since we know there must exist some mechanism that generates the electroweak scale in the standard model sector. For instance, supersymmetry breaking in our own sector, if generated via gauge mediation or supergravity mediation, will naturally generate supersymmetry breaking in other sectors as well. While this breaking may be somewhat suppressed, it may still lead to a mass gap (or ledge) at or somewhat below the 100 GeV scale. Technicolor models may easily break symmetry groups larger than that of the standard model, including those of hidden sectors. A further possibility is that the very mechanism that leads to the local couplings at low energy between the two sectors is precisely the same as that which generates the mass gap; for example, if there are massive particles charged under both groups, the masses at 1 to 10 TeV may destablize a quasi-fixed-point, causing the hidden gauge coupling to run strong and confine at a scale at 1 to 100 GeV. The point is that it is very easy to imagine models with a large mass gap without invoking the coupling to the Higgs boson. The physics that ensues can, as before, leave the theory in any number of phases: broken, confined, or partly both, with any number of possible light modes that can decay via couplings to the standard model. The question of whether the hidden sector is invisible is very model-dependent. \subsubsection{Effects in top quark decays} If there are couplings to the top quark, then even without couplings to the Higgs boson one may find remarkable signals. For example, in the $t\to c{\cal O}$ transition considered in \cite{Un1}, one may have, instead of missing energy, a decay to many particles, such as in \reffig{t2c4Y}, where the top quark decays to nine particles, including four hidden-sector resonances decaying to two particles each. This particular process can occur if the hidden sector is in a broken phase, where the $\phi\to YY$ decay can occur. Note this is a fully reconstructable top quark decay. The total differential rate for this process may still given by the unparticle prediction \cite{Un1}, because the energy released in the decay $t\to c{\cal O}$ may be large, compared to the masses of the $\phi$ particles. But unfortunately the kinematic variable which one needs in order to measure the unparticle dimension involves identifying which of the jets is the charm quark. This may in some cases be the most energetic jet, but clearly it will be very challenging to make the inclusive measurement and determine the unparticle dimension directly. A similar problem would arise in other visible signals, such as $t\to c b\bar b b\bar b$, or $t\to c \gamma\gamma g g$, with or without missing energy. However, this problem may be absent if the hidden sector particles (such as the $Y$ in \reffig{t2c4Y}) decay with a displaced vertex. \makefig{0.4}{t2c4Y}{The decay of a top quark to a charm quark plus an unparticle may result in a decay to more than three visible particles and little or no missing energy. Many other final states are possible, depending on the details of the hidden sector. The unparticle measurement of \cite{Un1} requires identifying the charm quark.} \subsubsection{Supersymmetric decays, and analogous cases} With supersymmetry, unparticle couplings to supersymmetric particles can lead to challenging decays of the lightest standard model superpartner, as discussed in \cite{HV3}. For instance, a neutralino -- perhaps, in our toy model C, through its Higgsino coupling to the unsparticle -- may decay to $\phi$ and its superpartner, which in turn decays to $ \phi$ and $\tilde Y$, the invisible hidden gaugino (\reffig{LSP24Y}.) This type of decay can significantly reduce the missing energy signal which is typically used to find supersymmetry, and can replace it with soft jets and leptons \cite{HV3}. More research to understand how to find such a signal is needed. Another interesting possibility \cite{HV3} is that the lightest standard model superpartner is not neutral. For example, if this lightest standard model particle is a stau, then the decay $\tilde \tau\to \tau \tilde {\cal O}$ to a unsparticle is similar to the $t$ decay of \cite{Un1}, in that the $\tilde\tau\to \tau$ kinematics may reflect the dimension of $\tilde {\cal O}$. However, as with the top quark decay mentioned above, the visibility of the kinematical power law will depend on the details of the final state emerging from the unsparticle, as it is converted into visible particles. Note also that the $\tilde\tau$ may easily be long-lived (this is less likely in the neutralino case) and may decay with a displaced vertex \cite{HV3}. Essentially the same physical phenomena can arise in any model with new particles that carry a new conserved global symmetry, such as KK-parity in extra-dimensional models, or T-parity in little-Higgs models. As long as there are particles in our sector and in the hidden sector carring the new charge, the possibility of interesting cross-sector decays exists. The scenario of a new heavy particle in the standard model (the lightest particle carrying the new charge) and an unparticle sector with conformal invariance and a mass gap below 100 GeV (which will have a lighter particle carrying the new charge) virtually {\it guarantees} the decays studied in \cite{HV3} will occur. The only question is whether the decays are visible, and this depends on the details of the hidden sector and the size of the mass gap. \makefig{0.4}{LSP24Y}{The decay of a neutralino through an unsparticle $\chi\to\tilde {\cal O}$ can result in many visible particles plus one stable invisible particle, which here is the hidden gaugino $\tilde Y$. The decay $\tilde\tau\to \tau \tilde {\cal O}$ can have a similar final state.} Obviously there is much more to do in this arena. Exploration of the possibilities is important as a first step to ensuring that none of these challenging signals will escape detection. \section{The Stephanov Model and Hidden Valleys} \label{sec:unSteph} An interesting approach to unparticles was provided by Stephanov \cite{unSteph}, where an unparticle was modeled by appealing to the five-dimensional language inspired by the AdS/CFT correspondence, also known as ``gauge/string'', ``gauge/gravity'', or ``boundary/bulk'' duality \cite{malda,GKP,WittenAdS}. (See also the work of Randall and Sundrum \cite{RS1,RS2}.) The five-dimensional language, as we will see, is indeed instructive, but only will guide us if we use it fully. We will need the full gauge-string correspondence, not the watered-down gauge/gravity version, in order to capture all the physics and see the hidden valley in full (s)unshine. \subsection{The Stephanov Viewpoint and Hidden Valleys} In order to regulate and then interpret the unparticle propagator, Stephanov broke the conformal invariance with an infrared cutoff. (Note this is not deconstruction, but compactification \footnote{Deconstruction \cite{ACG} refers to the discretizing of a space, as in \cite{SonSteph}, and representing it as a gauge theory in one lower dimension. The number of Kaluza-Klein modes in a deconstructed theory is finite. The introduction of an infrared cutoff while retaining the continuity of the five-dimensional space is a form of compactification; the number of Kaluza-Klein modes is countably infinite. In this context this cutoff is known as the ``hard wall'' model, a model often used in the description of a confining gauge theory, which is why ``Randall-Sundrum 1'' \cite{RS1} can be viewed as dual to technicolor.}.) This cutoff is often used in the gauge/string literature as a model of how to describe confinement in the gauge theory in terms of a five-dimensional (5d) theory on a warped space-time. In short, Stephanov's method of regulating the unparticle propagator is simply this: a model of a two-point function in a hidden sector which is confining in the infrared and conformal at larger energies. Many examples of such theories are known; one explicit example with a dual string description is given in \cite{Nonestar}. Models of the same type were already considered in Sec.~\ref{subsec:unbroken2}. But the predictions presented in that section are very different from those of Stephanov. This makes it far from obvious that we are dealing with a hidden valley. Let us review these predictions and see where the difficulties with them lie. First, Stephanov predicted a narrow tower of states. Second, he suggested these states would decay to standard model particles through the coupling to the unparticle. Third, he suggested these states could have very long lifetimes and could be detected from displaced vertices. Finally, he suggested the lifetime $\tau_n$ of the $n^{th}$ state would be related to the dimension of the unparticle by $\tau_n\sim m_n^{p-2d_{\cal O}}$, where $p$ is a (known) positive number. This tower of long-lived states, with a directly measurable lifetime-to-mass relation set by the dimension of the unparticle, is an impressive prediction of unparticles not shared by typical hidden valley models. But there is a good reason for this. The point is that Stephanov's formulas apply for a hidden gauge theory in the limit that the number of colors $N$, and the 't Hooft coupling $\lambda \equiv \alpha N$, where $\alpha= g^2/4\pi$ is the hidden gauge coupling, are extremely large --- how large will be explored below. (Meanwhile $N_f$, the number of flavors of matter fields, must remain finite and small, so these are not classic Banks-Zaks-type or Seiberg-type fixed point theories.) In this limit, a confining gauge theory has a spectrum that is merely an infinite tower of stable non-interacting hadrons of spin $\leq 2$. Many familiar aspects of gauge theory would be absent, including all high-spin hadrons, BFKL dynamics, parton showers, and the like. This differs so dramatically from QCD, and even from gauge theories with dozens of colors, that it was not recognized as a hidden valley. But the interpretation is this: if one were to take a hidden valley model into this extreme regime, it would eventually resemble the narrow-tower model. Less extraordinary models will have, not surprisingly, more ordinary predictions similar to those of \cite{HV1}. \subsection{Stephanov's approach} In a companion paper \cite{myunSteph}, I will more carefully and pedagogically add in the $1/N$ and $1/\sqrt\lambda$ corrections to the ``narrow-tower'' model. I will also consider some variants of this model and see how easily the predictions may be altered. (For instance, in many realistic models, the tower may have a finite or infinite number of states extending only over a finite range of energies, with a continuum above. The continuum may also have additional embedded resonances. Alternatively, the density of narrow states may suddenly change by a finite factor at some scale. The spacing of the states need not be uniform; there may be degeneracies of states that grow with $n$; etc.) I will also consider further how hard one must push the theory to make the original narrow-tower model of \cite{unSteph} appropriate to the physics. Here, I will keep things short and state some of the more subtle claims without proof. First some notation. The radial coordinate in $AdS_5$ will be called $r$, running from a boundary at $r=\infty$ to a horizon at $r=0$; the five-dimensional metric is \begin{equation} ds^2 = {r^2\over R^2} (-dt^2+dx^2+dy^2+dz^2)+{dr^2\over R^2} \ . \end{equation} For RS experts, my coordinate is chosen such that if I cut off the space at $r=r_{{\rm UV}}$ for large $r$ and at $r_{{\rm IR}}$ at small $r$, then in RS1 the Planck brane is at $r_{{\rm UV}}$ and the TeV brane at $r_{{\rm IR}}$. In RS2 the interpretation would be slightly different; but in any case, the space represents in this case a theory which is conformal between the two energy scales $\mu_{{\rm UV}}\sim r_{{\rm UV}}/R^2$ and $\mu_{{\rm IR}}\sim r_{{\rm IR}}/R^2$. With no UV or IR cutoff, and with the addition of a well-behaved five dimensional compact space $X$ to make a total of ten dimensions, a superstring theory on this space would, according to gauge/string duality, precisely represent a conformal gauge theory. Stephanov considered an AdS space with $r_{{\rm UV}}=\infty$ and $r_{{\rm IR}}$ finite. In a fully consistent setting, this would correspond to a theory which is conformal at high energy but at low energy has some sort of conformal symmetry breaking. Cutting off the space sharply, without any nuances, is called the ``hard-wall'' model. It has been used extensively for study of confining gauge theories at large 't Hooft coupling in gauge-string duality \cite{hardscat, DIS, haduniv, rhouniv, Brodsky, BPST}. It has its limitations, but is often useful. In such a model, the scale $\mu_{{\rm IR}}\sim r_{{\rm IR}}/R^2$ is of order $\Lambda$, the confinement scale. Other models \cite{softwall} give similar structure, though the details differ. It is a natural conjecture (independent of $\lambda$) that in the $N\to\infty$ limit of a confining gauge theory the two-point function of a reasonable operator can be exactly written as \bel{twopointAdS} \vev{{\cal O}(q){\cal O}(-q)} = \sum_n {|F_n|^2\over q^2-m_n^2 + i\epsilon } \end{equation} where the masses $m_n$ are those of the confined hadrons $|n\rangle$ created by acting with the operator ${\cal O}$ on the vacuum, \begin{equation} {\cal O}|0\rangle = \sum_n F_n |n\rangle\ . \end{equation} Within the hard-wall model and its cousins, and using the low-energy five-dimensional gravity theory, these equations can be shown to be true without subtleties. Thus this equation (and its analogues for higher spin) is correct for $\lambda\to\infty$, $N\to\infty$, $\lambda/N$ fixed and not too large, at least for primary operators with spin and dimension of order 1. One can see that within the hard-wall model, and many of its variants, the only significant change as $d_{\cal O}$ changes is the $n$-scaling of the $F_n$, as reviewed in \cite{unSteph}. The $m_n$ change; their $n$-scaling typically does not. However, this precise feature is a property of a particular model, and as noted in \cite{unSteph} the constraints are rather weak. Conformal invariance only requires that the two-point function approaches a particular power law. In the limit that $r_{{\rm IR}}\to 0$, as reviewed in \cite{unSteph}, the spacing between the modes goes to zero, and the two-point function must regain its conformal form. But this requirement imposes only a single relation between the large-$n$ behavior of the $m_n$ and that of the $F_n$. As emphasized in \cite{unSteph}, the requirements of conformal invariance on \Eref{twopointAdS} do not permit the mass spectrum of the tower to be predicted from the operator dimensions alone. Thus to measure $d_{\cal O}$, one must measure something else. \subsection{Finite $N$ Effects on the Spectrum} It \cite{unSteph} it was proposed that one should measure the lifetimes of the states. Let us review the calculation. Although there are technical problems with the particular case chosen, the basic logic is correct; with a coupling in the Lagrangian $\sim \hat c {\cal O}_{SM}{\cal O}$, where ${\cal O}_{SM}$ is a standard model operator, the decay of a state $\ket n$ in the tower to a standard model state such as $\mu^+\mu^-$ is proportional to \begin{equation} \big| \hat c \ \vev{\mu^+\mu^-|{\cal O}_{SM}|0}\ \vev{0|{\cal O}|n} \ \big|^2 \end{equation} For example, in the case \cite{unSteph} considered, one obtains (converting to a notation in which $m_n\sim\Lambda n^{\sigma}$ at large $n$) a decay rate of the form \begin{equation} \Gamma_{\ket{n}\to\mu^+\mu^-} \sim \alpha^{(n)}_{{\rm eff}} m_n \end{equation} where the effective coupling is \begin{equation} \alpha_{{\rm eff}}^{(n)} = {c^2 A_{\cal O} \over 16 \pi^2} \left({m_n\over M_Z}\right)^{2(d_{{\cal O}}-1)} \end{equation} Here $A_{\cal O}$ is a normalization constant for the unparticle, of order 1, and we have converted $\hat c$ into a dimensionless constant $c$ times the appropriate power of $M_Z$, following \cite{unSteph}. This rate can be enormously suppressed if either (1) $c$ is very small, or (2) $\Lambda, m_n \ll M_Z$ and $d_{\cal O}$ is significantly above 1. We see, therefore, that if the lifetimes of the states can be measured, then so can $d_{\cal O}$. But how can they be measured? \subsubsection{A necessary condition} Can these states decay with visibly displaced vertices? This requires lifetimes in the picosecond range or longer. For decays to electrons (muons), the electron (muon) mass is of order 1 (100) MeV, so let us take $m_n\sim 1\ (100)$ MeV as well, to lengthen the lifetime as much as possible. Then we must have \begin{equation} \alpha_{{\rm eff}}^{(n)}\alt 10^{-9}\ \ ({\rm decay\ to\ electrons}) \end{equation}\begin{equation} \alpha_{{\rm eff}}^{(n)}\alt 10^{-11}\ \ ({\rm decay\ to\ muons}) \end{equation} These tiny numbers are already an issue since production rates, compared to ordinary electromagnetic processes, are very small. However, there is a more serious issue. \subsubsection{Why the previous condition is not sufficient} The above condition for displaced vertices, while necessary, is not sufficient! {\it It is only appropriate if the state $\ket{n}$ has no other decay modes.} And this is not true, except typically for the ground state and perhaps the first excited state in every tower. The narrow-tower model implicitly assumes that the 5d scalar field that represents the scalar unparticle is non-interacting. This is equivalent to assuming that the unparticle has no 3- or higher-point functions. This is true in the $N\to\infty$ limit (and also in the free $d_{\cal O}\to d_{min}$ limit, where ${\cal O}$ becomes an ordinary free particle.) But the infinite $N$ limit is very misleading (as is the $d_{\cal O}\to d_{min}$ free particle limit.) At any finite $N$ a conformal gauge theory will have $k$-point functions for $k>2$. Equivalently, the 5d scalar field {\it will have self-interactions} (if $d_{\cal O}>d_{min}$). As a result, the unparticle now has higher-point functions. Also, one cannot treat one unparticle tower in isolation. There are always other fields in the bulk. At the very least, the 5d graviton {\it must} be present, because it represents the energy-momentum tensor of the hidden sector, which is part of any conformal theory. Each such field, in a confining gauge theory, will have its own tower. And of course the graviton interacts with itself and with all other fields in the bulk. In fact, in any conformal gauge theory one expects many fields in the bulk, with many quantum numbers. Any conserved currents in the theory, for instance, will be represented as 5d massless gauge fields, and they too will interact with themselves, with the graviton, and with any 5d fields that carry the corresponding conserved charge. All this is to say that there is no conformal gauge theory without three-point functions and operator product expansion (OPE) coefficients --- except at $N\to\infty$ --- and that $T_{{\mu\nu}}$ and conserved currents $J_\mu$ always have a nontrivial OPE. Once these interactions are introduced, we no longer expect a tower of extremely narrow states decaying to standard model particles. Any state with high mass will decay via these interactions. The states may still be relatively narrow, but nowhere near as narrow as predicted in \cite{unSteph}. \subsubsection{Another necessary condition} Let us now estimate the widths of the excited states. (We assume $d_{\cal O}$ is not very close to $d_{min}$; otherwise special treatment is required.) The width of the $n^{th}$ state ($n>1$) to other hidden sector states, may be very roughly estimated as \begin{equation} \sum_{n',n''}\Gamma_{\ket{n}\to \ket{n',n''}} \sim {g(n) m_n\over 8\pi N^2} \end{equation} where $g(n)$ characterizes the growth in the number of decay channels and monotonically grows with $n$. The states become narrow rather quickly with $N$, for fixed $n$, but conversely their widths grow with $n$. Suppose as $n$ becomes large that $m_n\sim n^{\sigma}\Lambda$ and $g(n)\sim C n^\beta$. Typically $\sigma\leq 1$ (it is $\frac12$ in QCD and in string theory and is 1 in many gauge-gravity duality examples.) Meanwhile one might naively expect $\beta\sim 2$, accounting for the scaling of available channels with $n$, but in most computable theories all but $n$ couplings are small, as in \cite{haduniv}, so to be conservative let us only assume $\beta\geq 1$. Finally $C>1$ accounts for the presence of multiple towers of states in the theory, which provide multiple classes of decay channels. If $N_f$ is large, then $C\sim N_f$; the situation for small $N_f$ is less clear, but $C$ is certainly larger than 1. The states bleed together when \begin{equation} m_n-m_{n-1}\sim n^{\sigma-1} \Lambda \end{equation} is of order \begin{equation} \Gamma_n\sim {C n^{\sigma+\beta}\over 8\pi N^2} \Lambda \end{equation} and thus occurs at \begin{equation} n\sim\left[ {8 \pi\over C} N^2\right]^{1/(\beta+1)} \ . \end{equation} Since $\beta+1\geq 2$, we expect at most the first $N$ states to be narrow relative to their separation. Actually this is often a large overestimate, due to the fact that we have ignored stringy effects; we will return to this in the Sec.~\ref{subsec:finitelambda}. The inverse of these widths puts an upper limit on the lifetimes of the excited states. Unless \begin{equation} {g(n)\over 8\pi N^2}\alt 10^{-9}\ \ ({\rm decay\ to\ electrons}) \end{equation}\begin{equation} {g(n)\over 8\pi N^2}\alt 10^{-11}\ \ ({\rm decay\ to\ muons}) \end{equation} the lifetimes will be too short for displaced vertices. Thus, except for the lowest one or two states, for which $g(n)=0$, this condition requires $N\sim 10^4$ for electrons and $10^5$ for muons. And this is generous, because we assumed the lowest possible mass for the decaying state. Also, we would hope to see at least four or five states, in order to measure a power law, and $g(n)\sim Cn^\beta$ is often large compared to one and grows with $n$. {\it In short, we do not expect a tower of states with displaced vertices unless both $N\gg 10^4$ and $\alpha_{{\rm eff}}^{(n)}$ is very small.} What, then, is the phenomenology more likely to be? There are two possibilities, depending on whether the decays of the hidden sector states are to other hidden sector states or to standard model particles. Decays to standard model states will dominate only if \begin{equation} \label{whowins} \alpha_{{\rm eff}}^{(n)} > {g(n)\over 8\pi N^2} \end{equation} But $\alpha_{{\rm eff}}^{(n)}$ cannot be large, or effects from the hidden sector would already have been seen. (In fact, if $\alpha_{{\rm eff}}^{(n)}$ is of the same order as $g(n)\over 8\pi N^2$, then constraints on $\alpha_{{\rm eff}}^{(n)}$ are even stronger than is often realized, because of effects that we will discuss in Sec.~\ref{subsec:unproduceN}.) Again, we are forced to take large $N$ --- not as large as required for a tower of displaced vertices, but still very large. Being very generous, we would require $N\sim 300$ in almost any conceivable situation; much larger $N$ is required, for instance, in the case of a vector unparticle mixing with the $Z$ boson. \subsubsection{If decays to standard model particles dominate} If $N$ is of order 100 or more and the unparticle coupling is as large as is allowed by experiment, then there is a narrow window in which \begin{itemize} \item the widths of the states are determined by the unparticle coupling and \item the widths of the states are large enough to measure. \end{itemize} Assuming resolutions in the few MeV range, and masses in the few GeV range, one might imagine that if $\alpha_{{\rm eff}}\agt 10^{-3}$ then one could measure the scale-invariant prediction of \cite{unSteph} through a tower of states with growing widths. However, if $\alpha_{{\rm eff}}$ is too small, the states simply won't have a measurable width, making the prediction untestable. In this case one can only measure the masses $m_n$. But recall that these are not determined by scale invariance, and cannot be used to measure $d_{\cal O}$. One might hope that since of order $N$ states may have narrow widths, higher states in the tower might always have $\alpha_{{\rm eff}}^{(n)}$ so large that the widths are large enough to measure. But a little thought shows there is no guarantee of such a regime. The higher states move closer together as $n$ increases, if $\sigma<1$, so instead of the widths growing to measurable size, the distance between adjacent states may shrink to unmeasurable size. Also, as $n$ increases so does $g(n)$, so it is possible that the higher states do not decay preferentially to the standard model. And worse, the logic used in the estimates in this entire section breaks down when $n$ is large enough that stringy effects must be accounted for; see Sec.~\ref{subsec:finitelambda}. \subsubsection{If decays to standard model particle do not dominate} If $N$ is less than 100 or so, or if $\alpha_{{\rm eff}}^{(n)}$ is small, the decays within the hidden sector dominate. {\it In this case the lifetimes are not determined by scale invariance; they are determined by $g(n)$, which depends on the details of the hidden sector.} Moreover, the partial widths to lepton pairs are typically very small, unless $N$ is very large, making the line-shape of the resonances difficult to observe. This is by far the most likely scenario for a hidden sector! {\it Thus we are not very likely to observe dilepton pairs from a tower of states}. For reasonable values of $N$, and for $\Lambda > 2m_e$, { we will find at most $N$ rather narrow states, decaying too rapidly to other hidden sector states for a displaced vertex, and with tiny branching fractions to dileptons.} The lifetimes of the states will not be set by the dimension of the unparticle. {\it The lightest state or states in some towers, which are the only states that cannot decay within the hidden sector, will have much longer lifetimes, and may decay to standard model particles with displaced vertices} (though possibly outside our detectors.) Only in a narrowly tuned case --- small $N_f$, very large $N$, and a coupling large enough that the widths of the first few states are rather large and are determined by the unparticle coupling, might the lifetimes be both measurable and detemined by scale invariance. \subsubsection{Summary} Let us summarize the implications of this section. Reference \cite{unSteph} includes a correct computation of the {\it partial width} for each state in the tower to decay to standard model particles and become observable. But one cannot then assume that this partial width is the total width, and invert it to infer long lifetimes for all the states in the tower. Instead, at reasonable $N$, almost every state in the tower, except the lowest one or two, will decay predominantly and rather rapidly to other states in the tower, or to states in other towers associated to other operators. The lifetimes will be much shorter than estimated in \cite{unSteph}, so there will be no displaced vertices. Moreover, the branching fraction to standard model states will be tiny, and it will be very difficult to measure the partial widths. This is simply the statement, familiar from QCD itself, that most hadrons in a gauge theory decay rapidly to other hadrons, and have large widths to do so; their branching fractions to, say, $e^+e^-$ are very small. Thus, unfortunately, the predictions of \cite{unSteph}, while possible at extraordinarily large $N$, are unlikely to be seen in nature. \makefig{0.4}{narrowtower}{The total cross section $\sigma(s)$ for hidden-sector production in an $e^+e^-$ collider, in the narrow tower model of an unparticle.} \subsection{Unparticle Production at Finite $N$} \label{subsec:unproduceN} \makefig{0.4}{comparetowers}{The total cross section $\sigma(s)$ for hidden-sector production in an $e^+e^-$ collider. The thick curve is for small $N$ and resembles QCD; the thin curve is for larger $N$ and more resonances are visible. The first resonance is much narrower than the others, as it can decay only by emission of standard model particles; the others decay within the hidden sector. See \cite{HVWis} for a study of a hidden valley with a light dilepton resonance.} The cross-section in the narrow tower model takes the form of \reffig{narrowtower}. In \reffig{comparetowers} are shown possible cross-sections for more realistic towers, for small $N$ and large $N$; the small $N$ case resembles low-energy QCD in the $\rho$ channel, and the large $N$ case resembles charmonium production without open charm. But this is the total cross-section for hidden sector production, whereas the cross-section for $e^+e^-\to \mu^+\mu^-$, both for small and larger $N$, suffers from the low dilepton branching fraction for hidden-sector states; it is shown in \reffig{comparetowers2}. Only the first resonance is potentially observable, and since it is small and very narrow, it may easily have been missed up to now. \makefig{0.4}{comparetowers2}{A cartoon of the cross-section for $e^+e^-\to \mu^+\mu^-$ in a model at small to moderate $N$. The falling standard model production rate is supplemented by a single extremely narrow resonance, the lowest resonance in \reffig{comparetowers}. Its height has been exaggerated greatly for clarity. None of the other resonances have measurable branching fractions to $\mu^+\mu^-$. Only a handful of visible resonances are expected in a generic hidden valley, at most one or two for each tower.} In fact, QCD is an excellent model for the hidden sector. From the point of view of the leptons of the standard model, with electromagnetism turned off, it {\it is} a hidden sector, coupled to leptons only by the Fermi interaction. It is no accident that AdS/QCD methods, placed into the hidden sector and treated with care, reproduce QCD-like physics in their effect on $e^+e^-\to \mu^+\mu^-$. Now suppose that we do choose a theory with $N\sim 20$ and choose to run an $e^+e^-$ collider on one of the excited hidden-hadron resonances (say, $n=10$). What will we see? Certainly we will not observe the process shown in \reffig{unrare}; the branching fraction is too small. Most of the time the resonance will undergo a cascade decay to several light hidden hadrons, each stable against decay to others, which {\it in turn} will decay with long lifetimes to standard model particles. This classic hidden valley signature is shown in \reffig{uncommon}. \makefig{0.4}{unrare}{An allowed but very rare process, in which the tenth resonance is produced by and decays back to standard model fermion pairs.} \makefig{0.4}{uncommon}{A much more common process; the 10th resonance, produced by standard model fermion pairs, decays through a cascade. In the final state appear several particles, each the lightest $(n=1)$ resonance of a tower. Each of these then decays (possibly late) to standard model particles, here assumed to be fermion pairs.} For large $n/N$, the very language that we used in the previous paragraph breaks down. The sum in \Eref{twopointAdS} is modified in two ways at finite $N$. First, the poles at $m_n^2$ move off the real axis to become resonances. This already gives the two-point function the form shown in \reffig{comparetowers}. But we also must supplement the formula \eref{twopointAdS} with cuts from multi-particle production, which is suppressed by factors of $N$ but becomes increasingly important at large $q^2$. We need not produce hadrons only by producing them resonantly, as in $e^+e^-\to \rho \to \pi^+\pi^-$; we may simply produce them directly, as in $e^+e^-\to \rho^+ \pi^-\pi^0$. As $q^2$ increases, the cuts accumulate, eventually stealing support from the resonances and building up the continuum contribution to the two-point function. The dominant production at large $q^2$ is thus not even of single unstable heavy hadrons, but rather of multiple light or moderately light hadrons, whose phase space grows very rapidly as $q^2$ increases. Any excited hadrons among those produced will decay to the lightest ones, and a large number of light hadrons may then decay with long lifetimes to standard model particles. The production process, absent in the narrow tower model, is illustrated in \reffig{likely}; of course it also gives a high-multiplicity final state. {\it Importantly, even if the dilepton branching fractions of the excited states are large, rare high-multiplicity events must not be ignored, as they provide strong constraints on new hidden sectors.} Even a small number of events with four or more leptons or photons would have been easily observed, since standard model backgrounds fall so rapidly with multiplicity. In turn, this implies there are much stronger constraints on $\alpha_{{\rm eff}}^{(n)}$, and thus on $c$, the unparticle coupling, than have so-far appeared in the literature. And in turn, because of \Eref{whowins}, this makes the likelihood of observing excited states with measurably large dilepton branching fractions even smaller, and the likelihood of high-multiplicity states even larger. \makefig{0.4}{likely}{At higher energy, multiple resonances are produced together; each undergoes a cascade decay as in \reffig{uncommon}.} Thus, instead of a tower of neutral resonances with long lifetimes, the prediction of the narrow-tower model at a reasonable $N$ is indeed that of a typical hidden valley. As in \cite{HV1}, events that access the hidden sector will result in {\it light neutral resonances with long lifetimes, produced in abundance, with large event-to-event fluctuations and possibly large missing energy.} Here the abundance arises through the cascade decay of a heavy resonance, and/or through nonresonant production of multiple particles. \subsection{Effect of Finite $\lambda$} \label{subsec:finitelambda} Still we have not captured all the physics. If $\lambda$ is infinite, there are no states in the spectrum with spin higher than two. But any confining gauge theory has hadrons with arbitrary spin. In gauge/string duality it has been found \cite{GKP} that the ratio of the masses of hadrons with spin $>2$ and higher to the lightest hadrons of spin 0 through 2 is $(\lambda)^{1/4}$. Thus there are towers of states of spin 5/2 and above whose lowest states lie near $(\lambda)^{1/4}\Lambda$. Note this need not be that large; if $N=100$ one cannot take $\lambda>100$ (see \cite{myunSteph} for more details) and the fourth root of 100 is only about 3. Thus it is only legitimate to fully neglect these states, in this case, for processes which have center-of-mass energy below, say, $5\Lambda$, or perhaps $10\Lambda$. At higher energy, the higher-spin states are present, increasing the phase space for heavy resonance decays and the rate of multi-hadron production at large $q^2$. This drives us still further from the model of a narrow tower. In addition, even a low-spin tower itself is altered by mixing of the original states in the tower with low-spin strings when the excitation level $n\sim \lambda^{1/4}$. This is a key breakdown of the gravity approximation to the string theory. Although the mixing is small, the number of strings at any given excitation level grows exponentially, so at large $n$ these states cannot be neglected. Unparticle production is then of massive strings in five dimensions, not of low-mass scalars in five dimensions. These massive strings then decay to a number of light resonances that lie at the bottom of the various towers. This is almost enough to recover our one missing piece: the parton shower, and ensuing hadronization. This is claimed in \cite{HV1} as the dominant high-energy process in a hidden valley, and is certainly the dominant process in QCD. Where is it in this five-dimensional langauge? More details will be given in \cite{myunSteph}, and the argument uses some subtle features of string theory, but I will simply claim here that the parton shower at large $\lambda$ is dual to a string falling in five dimensions toward the hard wall. What is happening from the five dimensional point of view is easy to understand by looking carefully at what happens in the gauge theory. Let us use the language of QCD, speaking of color, quarks, antiquarks and gluons; we will then carry over the insight into the hidden sector. When a quark and antiquark are produced, they emit gluons, whose color lines are correlated with the quarks and with each other, as captured in 't Hooft's double-line notation. As is well known, at any moment in time we can draw a line from the quark to the gluon whose anticolor is correlated with the quark's color, and from there to the next gluon whose anticolor is correlated with the first gluon's color, and so forth --- forming a string. This is the same string which is used at the moment of fragmentation in the Lund string model, but before fragmentation occurs, during the parton shower, this is a string falling in five dimensions. (Note this is no classical string; it is a quantum, fluctuating, string.) It falls from $r\sim\sqrt{\hat s}/R^2$ down to $r = \Lambda/R^2=r_{{\rm IR}}$, where it stops falling and hadronization occurs. As the string falls, an observer at a fixed $r$, corresponding to a probe of the string at some momentum scale $\propto r$, can resolve smaller and smaller structures within the string \cite{Susskind}. Thus the string contains more and more resolveable partons as the scale decreases. Is this claim, that the full string theory is needed to see the parton shower, plausible? As another line of argument, which is really the same argument in a crossed channel, consider the following. The same operators which control parton splitting in DGLAP evolution control the leading-order behavior of parton splitting in the parton shower. We must keep the classically-twist-two operators of spin 4 and higher at weak coupling, if we are to see the parton shower. If we then dial up the coupling, and {\it drop} all operators of dimension $\sim \lambda^{1/4}$, which include all operators of spin $>2$, we will be throwing away the parton shower itself. A more precise version of this argument will be presented elsewhere. Of course, when the string reaches the bottom at $r_{{\rm IR}}=\Lambda R^2$, it may for an instant be viewed as a highly excited, highly tangled hadron. It cannot be viewed as one of the hadrons in the tower of a simple five-dimensional scalar; such hadrons are five-dimensional points and have no internal structure. Here we have an extended object, with many quantum numbers, fundamentally a string. Interactions then cause the string to fragment, slowly if $N$ is large, quickly if $N$ is smaller or if the number of light flavors $N_f$ is nonzero. The hadronization process is, as in the Lund string model, the breaking apart of a string which is largely {\it four-dimensional}, being localized in the fifth dimension near the minimum value of $r_{{\rm IR}}$. In short, we must remember that $\lambda$ is finite, and include the strings, if we are to see the physics of parton showers and the process of hadronization. This physics can play an important role in the phenomenology, as outlined in \cite{HV1}. \subsection{Conclusions} In this section a more physical version of the (formerly)-narrow-tower model has been reconstructed. When modes of the hidden sector are produced, we expect the following signatures: \begin{itemize} \item At low energy $\sim \Lambda$, one may find a few light stable hidden hadrons of low spin, which decay back to standard model particles with long lifetimes. \item At higher energies one finds excited resonances, which decay rapidly to two or three light stable hidden hadrons, which in turn decay to standard model particles in the final state. \item Next the resonances become a continuum; production of several hidden hadrons becomes likely, leading to an increasing multiplicity of light hidden hadrons and consequently of standard model particles. \item At still larger energy, the parton shower begins to play a role in the evolution of the final state, making a purely hadronic description of the process inconvenient and indeed misleading, but without changing the basic signature of a high-multiplicity event. \item In all of these processes, the final state consists of a number of light neutral long-lived hidden-sector hadrons, with a multiplicity that grows with energy. \item The lightest hadrons have lifetimes orders of magnitude longer than most other hadrons, and may produce observable displaced vertices or give missing-energy signals. \end{itemize} The specific signals observed will of course depend on the details of its gauge and matter content of the hidden sector. But having included five-dimensional interactions and five-dimensional strings, we now see that this model has all the features and signatures of a confining hidden valley. As we did not rely upon strict conformal invariance, the result is largely independent of whether there are true unparticles at energies above the confinement scale. However, despite the universality of the result, there are some important features of the hidden-valley signal which can be affected by strong dynamics. Let us now turn our attention to these. \section{Other Effects of Strong Dynamics on Hidden Valley Phenomenology} \label{sec:othereffects} From the way I have presented things, the reader might be left with the impression that the ultraviolet strong dynamics that is present in conformal field theories with large anomalous dimensions has no impact whatsoever on the infrared physics. But this is not the case. Even if the low-lying states in the hidden sector are visible, giving hidden valley signals and making unparticle methods less central to the phenomenology, the strong dynamics can have a crucial impact on what we will see. Yet this comes not from the produced ``unparticle'', but from other effects of strong coupling on the phenomenology: on resonances, on flavor symmetries within the hidden sector, on supersymmetry breaking within the hidden sector, and on the hidden parton shower. \subsection{Effect of Strong Dynamics on Resonances} \label{subsec:resonances} As we noted in Sec.~\ref{subsec:unbroken1}, one may well expect narrow resonances just below the point where continuum production of the hidden sector begins. (There may also be resonances, narrow or wide, within the continuum.) The spacing between the resonances, and the number of resonances that precede the continuum, are an important opportunity for learning about the hidden sector, just as charmonium was an important probe of QCD. Had QCD been more strongly coupled, the spacing between the charmonium states would have been wider, and the number of states below the open-charm continuum might have been very large. (Indeed in the large $\lambda$ limit, using \cite{KarchKatz}, it was found \cite{wintersD7} that a number of extremely deeply bound states may in some cases appear well below the onset of a continuum. The known examples require supersymmetric cancellations, but perhaps there are other mechanisms to obtain similar effects.) In fact, if the resonances are from a massive field $\phi$ whose mass does {\it not} strongly destabilize the conformal dynamics, then the $\phi$-onium states will still be bound by exchange of effectively-massless gauge bosons. This can happen in the toy models B and C of Sec.~\ref{sec:breakCFT} if only one of the $N_f\gg 1$ scalars becomes massive at $M$, the others remaining light. The $\phi$-onium states will form an almost perfectly positronium-like system (though possibly at much stronger coupling) and the strong coupling can be measured from the positions of the resonances. Note this neither measures nor depends upon the dimension of any unparticle; conformal exchange is always Coulomb exchange, and only its coefficient depends on the coupling. However, the central phenomenological question is whether and how the resonances can be observed. The branching fraction directly to two or three standard model particles will be low in this case, because annihilation to the effectively-massless hidden sector gauge bosons will dominate; see \cite{HVWis} for a brief discussion of hidden quarkonium. If the low-energy theory has some of the signatures of Sec.~\ref{subsec:broken} or \ref{subsec:unbroken2}, then the decays will be observable. Still, detailed kinematic reconstruction will have poor resolution at a hadron collider, so it is unclear how much information can be extracted without an ILC. \subsection{Effect of Strong Dynamics on Global Symmetries} \label{subsec:flavor} In models with conformal dynamics and approximate global symmetries, renormalization effects can often either enhance or destroy those symmetries. This is determined by whether global-symmetry-breaking operators have larger or smaller dimensions than global-symmetry-preserving operators; if they are larger, then the global symmetry is accidentally restored at low energy, while if smaller then flavor is badly broken. For instance, in models A and B, a critical question is whether the adjoint or singlet scalar-bilinear has a larger anomalous dimension. As an illustration, let us imagine how different the standard model might have been had it been conformal at high energy. Imagine for example that the standard model itself became strongly interacting above the electroweak scale. This is not a completely consistent example, but still instructive. A very important effect is that the Yukawa operators coupling the Higgs boson to standard model fermions would have nontrivial anomalous dimensions. If the dimensions of the Yukawa operators were all less than 4, than all Yukawa couplings would be driven large, all masses would be driven toward 100 GeV. But if all the operators were irrelevant (as in the simplest versions of technicolor) then all masses would end up small (which is in part why simple technicolor has trouble with the top quark Yukawa coupling.) If all masses are driven very small then there can be greatly enhanced symmetries: for example, light $c$, $b$ and $t$ quarks would give larger chiral symmetries below the QCD scale. This would then introduce stronger GIM-like suppressions into flavor-changing processes, and organize the hadrons into larger multiplets. Another more subtle effect is that mixing angles between quarks could also be reduced by strong dynamics. If some Yukawa operators have very different anomalous dimensions from others, then the matrix of Yukawa couplings may tend to become highly structured and the CKM matrix is driven diagonal. Indeed, an analogous phenomenon was put to use in a realistic model of flavor in \cite{NSflavor}. In a similar way, strong dynamics in the hidden sector can easily drive physics into regimes where hidden flavor symmetries are naturally approximate, rather than being either violated at order one or exact. Such a structure will in turn can have observable effects, such as \begin{itemize} \item A large multiplet of nearly degenerate hidden states which cannot decay to one another, and thus must decay via emission of standard model particles; \item Increased lifetimes for decays between hidden-sector ``generations,'' due to reduced intergenerational mixing angles. \end{itemize} Both of these can have a major impact on the phenomena observed. Unfortunately, these signatures are not unique to, nor are they required by, strong dynamics, so they are not smoking guns for large anomalous dimensions. Also, I know of no new urgent experimental issues raised by this possibility that are not already under discussion. In short, though interesting, this consequence of strong coupling is something that theorists might consider further, but appears not to be urgent for experimentalists preparing for the LHC. \subsection{Effect of Strong Dynamics on Supersymmetry Breaking} It is well known that strong dynamics in a supersymmetric theory can suppress part or all of supersymmetry breaking in that sector; for an application to phenomenology, see \cite{NSSUSY} (which contains a review of the basic physics in an appendix.) Thus although supersymmetry beraking in our sector may have a characteristic scale of 100s of GeV, the hidden sector may be much more supersymmetric than our own. The spectrum of hidden sector particles is not easy to determine, but might show approximate degeneracies amongst bosons and fermions. Indeed it is even possible that the discovery of supersymmetric particles, or convincing verification of supersymmetry in nature, will occur through the hidden valley phenomenology. For example, if in addition to hidden-hadron decays to the standard model there are supersymmetric hidden-hadrons (analogous to R-hadrons within QCD \cite{farrar}), then the decay patterns of the various states may reveal degeneracies, and perhaps relations among decay rates or branching fractions, that would be characteristic of a supersymmetric theory. One amusing example is that of $\phi$-onium resonances within a still-conformal sector, a case mentioned in Sec.~\ref{subsec:resonances}. There one could imagine predicting, and observing, effects from the supersymmetric generalization of positronium. Let us make two immediate cautionary remarks. This scenario is not unique to strong dynamics. Supersymmetry breaking could be suppressed in the hidden sector by other, purely perturbative means. For example, in gauge mediation the messenger sector coupling to the hidden sector may be absent, or the relevant messengers very heavy, such that the standard model sector receives a louder message and gets a larger array of soft masses than does the hidden sector. Also, very weak gauge couplings in the hidden sector would result in rather weak supersymmetry-breaking effects. Furthermore, even if supersymmetry-breaking effects are large for most particles, approximate symmetries or dimensional counting arguments can easily make gaugino masses very small, leaving a hidden sector whose low energy physics might accidentally be an almost exactly supersymmetric Yang-Mills theory, with massless hidden gluons and with hidden gluinos light compared to the hidden confinement scale. The other problem is that degeneracies in the hidden sector spectrum can arise from ordinary bosonic global symmetries, or simply from kinematics. For example, in a hidden pure Yang-Mills theory, the masses of metastable hidden glueballs which can only decay to the standard model sector lie within a narrow band, just by kinematics. In a model with light pions, the observable pions may have very similar masses because they transform as a simple multiplet under an accidental global symmetry --- possibly enhanced through strong dynamics, as discussed above in Sec.~\ref{subsec:flavor}. Thus, it is certainly possible that a hidden sector will reveal the first superpartners and other features of supersymmetry at the LHC, but it is likely to be far from obvious. Fortunately, it appears that there are no special experimental challenges in the supersymmetric limit, so that detection and analysis techniques do not need to be specially tuned. It appears to be enough to make careful but standard measurements of multiple processes. \subsection{Effect of Strong Dynamics on the Parton Shower} By contrast, the last effect I want to consider is of both theoretical and experimental importance. In hidden valley models with strong dynamics far above the mass gap, the parton shower can be very much more powerful than in QCD. This potentially can turn hard jets into soft spray, and make events more spherical, with much higher multiplicities, than in hidden valleys with weaker couplings above the mass gap. This possibility poses significant experimental challenges, whose details cannot be known without further theoretical development. Let us first see why the parton shower is the most sensitive ingredient in a hidden valley to strong conformal or near-conformal dynamics in the 1--1000 GeV range. The first element in a hidden valley model is the coupling between the two sectors via some communicator, can be impacted by the strong dynamics, but indirectly, in a way that might not readily be observed. (There could be a large impact on the line shape of an easily observed particle, however.) The third element, a mass gap, explicitly involves the breaking of conformal invariance, and we have seen how the phenomenology can emerge in many different ways that are not especially sensitive to the dimensions of operators in the conformal regime. What of the second ingredient, the multi-particle production mechanism? Cascade decays explicitly involve breaking of conformal symmetry, and hadronization is a violent violation of conformal symmetry. But the parton shower {\it is} conformal dynamics in action. The very form of the parton shower is determined by anomalous dimensions of operators, as noted above. These are not the low-spin low-dimension operators which we might couple to the standard model in the Lagrangian, but Wilson lines, or in the crossed channel, the high-spin high-dimension classical-twist-two operators which are always present in a gauge theory. If these operators have small anomalous dimensions, the parton shower is inefficient. In QCD the quark in a $q\bar q$ production process loses only a moderate fraction of its energy to gluon emission, much of which is collinear with the quark, maintaining a coherent jet. For this reason, $q\bar q$ production gives predominantly two-jet events, sometimes three. In a weakly-coupled hidden valley, this jetty structure in the hidden sector is typically retained, though blurred, as the hidden hadrons decay into standard model particles. An example \footnote{This event was generated using the HVMC Monte Carlo, version 0.5 \cite{HVMC}. This Monte Carlo is based on Pythia 6.4, combining its routines to simulate $Z'$ decay to the hidden sector, showering and hadronization within the hidden sector, and decay of hidden-sector hadrons back to standard model particles, which is followed by standard model showering and hadronization.} is shown in \reffig{weakvjets}. However, if the DGLAP operators have large anomalous dimensions, as occurs in gauge theories at large $\lambda$ with string theory dual descriptions, then the parton shower is very efficient. (See \cite{DIS} for a related effect in deep inelastic scattering.) The quarks and gluons shower so quickly, through both collinear and soft emission, that they all become soft. Soft emission is not collinear with the initial quark, and so the original direction of the quark's motion is largely forgotten. Moreover, hard emission is also not supressed as it is in weak coupling, so the production process itself is likely to have several gluons along with the quark and antiquark. Altogether, the events at strong coupling are likely to be more spherical than jetty, though I do not know how to calculate the fluctuations away from perfect sphericity. \footnote{The statements made here are clearest at large $N$; there will be $1/N^2$ corrections to these statements that deserve more study, in which color singlet combinations of partons are radiated off to begin their own, separate parton shower. Whether these could introduce strong fluctuations in the appearance of the events is not yet clear.} Also, the number of hidden hadrons will be larger, and their $p_T$ distribution much softer than at weak coupling. A guess at the appearance of such an event is shown in \reffig{strongvjets}; again I emphasize this is a guess. \footnote{A $Z'$ decay to a hidden quark and antiquark was dressed with a Pythia parton shower of hidden gluons, in which the showering rate was enhanced by fixing the gauge coupling at a large enough value that collinear effects largely vanished by the scale of hidden confinement. The result may bear only a passing resemblance to the actual physics of a strongly coupled theory, but this guess appears physically reasonable, and should at least be thought-provoking.} Notice how the preferred axis and the strongly variable calorimeter signal in \reffig{weakvjets} are absent in \reffig{strongvjets}. \makefig{0.4}{weakvjets}{An event (generated with HVMC 0.5) in which a 3.2 TeV $Z'$ decays to 30 GeV v-pions (see \cite{HV1} for definitions) in a hidden sector which has a weak coupling above $\sim 100$ GeV. Notice the thrust axis is roughly vertical, though the events are by no means not pencil-like. The event shown contains roughly twenty bottom quarks and tau leptons.} \makefig{0.4}{strongvjets}{An event (generated with HVMC 0.5) in which a 3.2 TeV $Z'$ decays to 30 GeV v-pions (see \cite{HV1} for definitions) in a hidden sector which has a strong coupling at all energies. Notice the event is now spherical. The event shown contains roughly fifty bottom quarks and tau leptons.} There are many challenges here which need to be addressed. For example, what will result from the application of jet algorithms to these events? How much energy must such events have such that they are easy to see? Are there any serious detector backgrounds which could mimic such an effect? What if most of particles produced are long-lived; could the pattern recognition software become too confused to operate properly? And if such events are identified, what questions can we ask of them to identify their source? What observables will most usefully allow analysis of such events? And finally, what is needed in formal theoretical development so that the guesswork that goes into \reffig{strongvjets} can be replaced by reliable prediction? \section{Conclusion} We have seen that unparticle models \cite{Un1, Un2,allunparticle} with mass gaps \cite{unSteph, unFox, unQuiros} are typically examples of hidden valley models \cite{HV1,HV2,HV3}. Not all hidden valley models are conformal; not all unparticle models have mass gaps which result in particles with observable decays; but there are many ``hidden valley/unparticle'' models with both features. We have seen that in these cases the dominant exclusive processes typically involve classic hidden valley phenomenology: new neutral light particles with long lifetimes, often produced with high multiplicity, along with new Higgs decays (and also decays of LSPs and related particles, though we did not study these here) to the hidden-valley particles. Some of the hidden-valley particles themselves decay to standard model particles, possibly with observably displaced vertices. To see this required some tweaking of results in the literature, in particular, clarifying what can and cannot be said about conformal symmetry breaking using the language of unparticles \cite{unFox}, and adjusting the narrow-tower model of unparticles \cite{unSteph} to include various corrections which though small make a huge change in the physical phenomena. We also saw that the assumption that the hidden sector physics is invisible, as for example in \cite{unQuiros}, is often too pessimisstic; the Higgs decays may not only be visible, they may be spectacular, as in \cite{HV1,HV2}. Still, in addition to this, inclusive studies using the above-mentioned events, and certain rare processes, may be able to reveal the special conformal kinematics associated to unparticles. I have also suggested that a dominant effect of strong coupling, and in particular large anomalous dimensions for twist-two operators, is the strong enhancement of the parton shower, and an increased sphericity, higher multiplicity, and lower $p_T$ spectrum in high-energy hidden valley events. More work is needed to confirm this suggestion, and to understand how strong dynamics, with or without conformal invariance, can affect the phenomenology of hidden valleys in other observable ways. As in QCD, where both exclusive and inclusive questions have their merits, and where approximate scale invariance plays an important role in many analyses of QCD data, the study of a hidden sector should proceed by combining information from both exclusive final states (which, if visible, involve hidden valley phenomenology) and inclusive final states (which are determined by unparticle computations in the scale-invariant region, but not elsewhere.) It may happen that all exclusive phenomena are invisible, and then one can only discuss the physics from the inclusive point of view. But if the exclusive events can be observed, they are typically more abundant, and are often more spectacular, more easily separated from background, and more informative. They are also often very unusual, and in some cases may pose serious challenges for the Tevatron and LHC experiments. These challenges should be addressed (and in some contexts are already being addressed) in the immediate future. In conclusion, conformal invariance and inclusive signatures are powerful tools, but they are not powerful enough to address the physics of conformal-symmetry breaking, where the full diversity and complexity of quantum field theory may be found. Two-point functions of local composite operators, and the constraints of conformal invariance, simply cannot capture the phenomenological richness so often found in a hidden valley. \ \ I am pleased to thank A.~De Roeck, J.L.~Feng, H.~Lubatti, M.~Graesser, B.~ Mele, S.~Thomas, D.~Ventura and K.~Zurek for useful discussions.
{ "redpajama_set_name": "RedPajamaArXiv" }
4,467
Kamil Özdağ (born 7 March 1953) is a Turkish wrestler. He competed in the men's freestyle 52 kg at the 1976 Summer Olympics. References External links 1953 births Living people Turkish male sport wrestlers Olympic wrestlers of Turkey Wrestlers at the 1976 Summer Olympics Place of birth missing (living people)
{ "redpajama_set_name": "RedPajamaWikipedia" }
29
El Ahorro Supermarket is a supermarket chain in Texas, United States. It caters to Hispanic Americans. Rafael Ortega heads the chain. Ortega also owns the La Michoacana Meat Markets chain. The Spanish word "ahorro" means "saving." El Ahorro had 15 regular stores in Texas. During that year the company entered an agreement with Supervalu, owner of the Save-A-Lot chain. The companies agreed to convert six former Save-A-Lot stores into co-branded Supervalu El Ahorro stores. The partnership that operates the co-branded stores is named Adventure Supermarkets LLC. Of the co-branded stores, three are in Houston, one is in Brownsville, one is in Harlingen, and one is in Victoria. Operations as "Save-A-Lot El Ahorro" began at the end of May 2011. After the Davis Food City chain of stores closed in 2007, some locations were acquired by El Ahorro in 2008. References External links El Ahorro Supermarket Hall, Christine. "El Ahorro, Save-A-Lot check out new grocery concept." Houston Business Journal. Sunday August 8, 2010. Companies based in Houston Supermarkets of the United States
{ "redpajama_set_name": "RedPajamaWikipedia" }
3,337
Q: Problems calling Android's getSharedPreferences(); from SQLiteOpenHelper class First I want to describe my situation briefly. I have two classes, one MainClass and one DataBaseHelper class, which extends SQLiteOpenHelper. From my MainClass I call a method in the DataBaseHelper class to open a data base. Before opening the data base I want to check the users data base version (this is important as soon as I want to update the data base and push it to the Android market). So from the DataBaseHelper class I call the following method, which is in the MainClass. public int checkCurrentDbVersion(){ // Restore preferences SharedPreferences settings = getSharedPreferences(PREFERENCES, 0); int dbUpgradeVar = settings.getInt("dbUpgradeVar", 1); return dbUpgradeVar; } I call the checkCurrentDbVersion() method from the DataBaseHelper class like so: MainClass currentDbVersion = new MainClass(); int oldDbVersion = currentDbVersion.checkCurrentDbVersion(); As soon as the debugger runs the following line, it stops. SharedPreferences settings = getSharedPreferences(PREFERENCES, 0); What am I doing wrong? I have no constructor defined. Could that be the failure? Best Regards Johe A: I found a solution, which I wanna share. It can be found here: Passing data through intents instead of constructors I forgot the context (I am still not 100% sure what the context is all about, but anyways...). So to get the code working I changed it like so: public int checkCurrentDbVersion(Context context){ // Restore preferences SharedPreferences settings = context.getSharedPreferences(PREFERENCES, 0); int dbUpgradeVar = settings.getInt("dbUpgradeVar", 1); return dbUpgradeVar; } Call the method private final Context myContext; /* *do some other stuff here */ MainClass currentDbVersion = new MainClass(); int oldDbVersion = currentDbVersion.checkCurrentDbVersion(myContext); A: Here is my solution 1.my app can not use. import androidx.appcompat.app.AppCompatActivity; SharedPreferences settings = new AppCompatActivity().getSharedPreferences(PREFERENCES, 0); 2.works fine in my app public static boolean isLoggedIn(AppCompatActivity activity) { final SharedPreferences loggedSP = activity.getSharedPreferences(SP_name.get_Logged_SPname(), MODE_PRIVATE); return loggedSP.getBoolean(SP_name.get_Logged_SPkey(),false); } to execute it in my main activity boolean a = LoginRepository.isLoggedIn(this);
{ "redpajama_set_name": "RedPajamaStackExchange" }
7,450
A simple stopwatch utility written in Java using Swing, and the [MVP pattern](https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93presenter). Features start, stop, and reset functionality, as well as the ability to alter the interface (display of running led, milliseconds, etc). ## This application depends on the following resources: * JUnit (<http://junit.org/>) for unit testing. * Mockito (<https://code.google.com/p/mockito/>) for mock object testing. ## Contact Detail: - Email: lindsay.w.bradford@gmail.com - Twitter: http://twitter.com/hollowdreamer ## License This program and the accompanying materials are made available under the terms of the BSD 3-Clause licence which accompanies this distribution in the file LICENCE.md. Further detai on the licence can be found at: (<http://opensource.org/licenses/BSD-3-Clause>)
{ "redpajama_set_name": "RedPajamaGithub" }
7,237
In February survey CVVM focused in one of its topics on Benes Decrees.The first question had to represent czech public opinions about validity of Benes Decrees in future. 64% of respondents think that the Benes Decrees should continue to be in force, 7 % stood up for their cancellation, 29% did not answered this question. Expulsion of Sudet-German is considered to have been just by almost two thirds of respondents (54%), on the other hand a total of 27 % think it unjust.
{ "redpajama_set_name": "RedPajamaC4" }
6,226
Book Talk: Barbara Chase-Riboud Monumentale: The Bronzes 1pm CT Pulitzer Curator, Stephanie Weissberg, Akili Tommasino, Associate Curator of Modern and Contemporary Art at the Metropolitan Museum of Art; Courtney Martin, Director of the Yale Center for British Art; and Christophe Cherix, Chief Curator of Drawings and Prints at the Museum of Modern Art, are coming together for a discussion on the life and work of Barbara Chase-Riboud. Participants have contributed writings to the exhibition publication, Barbara Chase-Riboud Monumentale: The Bronzes, which was co-published by Princeton University Press. This free program will be hosted on Zoom; registration is required. Available in Shop: "Barbara Chase-Riboud" Monumentale: "The Bronzes" Available in Shop: "I Always Knew: A Memoir" by Barbara Chase-Riboud Portrait of Barbara Chase-Riboud by Virgina Harold "Barbara Chase-Riboud" Monumentale: "The Bronzes" is now available for pre-order at the museum shop. 1/2 "I Always Knew: A Memoir" by Barbara Chase-Riboud is available at the museum shop. 2/2 Barbara Chase-Riboud Monumentale: The Bronzes Sep 16, 2022 – Feb 5, 2023
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
3,723
{"url":"https:\/\/byjus.com\/absolute-value-equations-calculator\/","text":"# Absolute Value Equations Calculator\n\nThe Absolute Value Equations Calculator an online tool which shows Absolute Value Equations for the given input. Byju's Absolute Value Equations Calculator is a tool\nwhich makes calculations very simple and interesting. If an input is given then it can easily show the result for the given number.\n\n#### Practise This Question\n\nWhich of the following pair will not produce dihydrogen gas?","date":"2019-04-21 18:10:18","metadata":"{\"extraction_info\": {\"found_math\": false, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.8351156115531921, \"perplexity\": 1296.8105314732}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2019-18\/segments\/1555578532050.7\/warc\/CC-MAIN-20190421180010-20190421202010-00032.warc.gz\"}"}
null
null
\section{\label{int}Introduction} The interest in \textit{r}-modes, first studied more than twenty years ago \cite{pp}, has increased dramatically when it was discovered \cite{and}, and afterwards confirmed more generally \cite{fm}, that \textit{r}-modes are driven unstable by gravitational radiation reaction in perfect-fluid stars with arbitrary small rotation. Soon afterwards, it was shown \cite{lom,aks} that in a newly born, hot, rapidly rotating neutron star, bulk and shear viscosity do not suppress the \textit{r}-mode instability for a wide range of relevant temperatures and angular velocities of the star. As a result, the neutron star could spin down to just a small fraction of its initial angular velocity, thus providing a possible explanation for the relatively small spin rates of young pulsars in supernova remnants \cite{lom,aks}. The gravitational waves emitted by the young neutron star during this spin-down phase could be detected by enhanced versions of laser interferometer detectors \cite{olcsva}. It was also suggested that in accreting neutron stars in low-mass x-ray binaries the gravitational-wave emission due to the \textit{r}-mode instability could balance the spin-up torque due to accretion, thus limiting the maximum angular velocity of these stars to values consistent with observations \cite{bil,akst}. Of fundamental importance in judging the astrophysical relevance of the \textit{r}-mode instability is the determination of the saturation amplitude of the mode. In recent years, several authors have addressed this issue, both analytically and numerically, taking into account different nonlinear effects \cite{rls,rlms1,rlms2,sf,ltv,ltv2,glssf,afmstw}. An approximate analytical expression for differential rotation induced by \textit{r}-modes was first derived using the linearized fluid equations by expanding the velocity of a fluid element located at a certain point in powers of the mode's amplitude, averaging over a gyration, and retaining only the lowest-order nonvanishing term \cite{rls,rlms1}. This differential rotation can then interact with the magnetic field of a newly born, hot, rapidly rotating neutron star, limiting the growth of the \textit{r}-mode instability or, for strong magnetic fields, even preventing it from developing \cite{rls,rlms2}. The first numerical study of nonlinear \textit{r}-modes was performed for a rapidly-rotating relativistic star without a gravitational radiation force \cite{sf}. These studies show there is no suppression of the mode, even when its initial amplitude is of order unity, and also confirm the existence of differential rotation induced by r-modes. In a subsequent numerical study of the nonlinear evolution of \textit{r}-modes of a rapidly-rotating Newtonian star with a gravitational radiation force, it was shown that the amplitude of the mode grows to order unity before strong shocks near the stellar surface rapidly damp the mode \cite{ltv,ltv2}. However, a more recent numerical simulation has found no evidence that the decay of the mode's amplitude is due to such shocks near the surface of the star; instead, the catastrophic decay of a \textit{r}-mode's amplitude of order unity is due to a leaking of energy into other fluid modes, leading to a differentially rotating configuration \cite{glssf}. Recently, the nonlinear coupling between stellar inertial modes has been analyzed, with the conclusion that \textit{r}-modes may saturate at much lower values than previous investigations had revealed \cite{afmstw}. Despite this fact, it was found that the \textit{r}-mode instability could still explain the spin clustering at the fast end of the spin distribution of neutron stars in low-mass x-ray binaries and that gravitational waves from newly born, hot, rapidly rotating neutron stars, as well as from old neutron stars in low-mass x-ray binaries, could be detected by enhanced versions of laser interferometer gravitational wave detectors \cite{afmstw}. In this paper, we are concerned with the role of differential rotation in the evolution of the \textit{r}-mode instability. Recently, a nonlinear extension of the linear \textit{r}-mode perturbation was found within the nonlinear theory up to second order in the mode's amplitude $\alpha$ in the case of a Newtonian, barotropic, perfect-fluid star rotating with constant angular velocity $\Omega$ \cite{sa}. This nonlinear extension describes differential rotation of pure kinematic nature that produces large scale drifts along stellar latitudes. This solution contains two separate pieces, one induced by first-order quantities and another determined by the choice of initial data. These two pieces cannot cancel each other, since one is stratified on cylinders and the other not. Thus, differential rotation is an unavoidable kinematic feature of \textit{r}-modes. The differential rotation found in Ref.~\cite{sa} does contribute to the second-order physical angular momentum of the \textit{r}-mode, implying that the phenomenological model proposed in Ref.~\cite{olcsva} to study the evolution of the \textit{r}-mode instability in newly born, hot, rapidly rotating neutron stars has to be modified so that the physical angular momentum of the perturbation includes, not only the canonical angular momentum, but also the contribution from second-order differential rotation. In this paper, we study such a modified model, arriving at the conclusion that differential rotation plays an important role in the evolution of the \textit{r}-mode instability. In section \ref{dif-rot} we derive an analytical expression for the physical angular momentum of the r-mode perturbation, using the second-order solution of Ref.~\cite{sa}. In section \ref{model}, the model for the evolution of the \textit{r}-mode instability is presented and a system of nonlinear, coupled, differential equations for the mode's amplitude $\alpha$ and the star's angular velocity $\Omega$ is derived. In section \ref{role}, the analytical solution of the above mentioned system of differential equations is presented, revealing that differential rotation does play an important role in the evolution of the \textit{r}-mode instability. Finally, in section \ref{con} we present the conclusions. \section{\label{dif-rot} Physical angular momentum of the \textit{r}-mode perturbation} For small-amplitude perturbations of slowly rotating, Newtonian, barotropic, perfect-fluid stars, the \textit{r}-mode solution to the linearized Euler and continuity equations is, in spherical coordinates $(r,\theta,\phi)$, given by: \begin{subequations} \label{vf} \begin{eqnarray} \hspace{-0.6cm} \delta^{(1)} v^r &=& 0, \label{vf1} \\ \hspace{-0.6cm} \delta^{(1)} v^{\theta} &=& \alpha \Omega C_{l} l \left( \frac{r}{R} \right)^{l-1} \sin^{l-1}\theta \sin (l\phi+\omega t), \label{vf2} \\ \hspace{-0.6cm} \delta^{(1)} v^{\phi} &=& \alpha \Omega C_{l} l \left( \frac{r}{R} \right)^{l-1} \sin^{l-2}\theta \cos\theta \cos (l\phi+\omega t), \label{vf3} \end{eqnarray} \end{subequations} where $\delta^{(1)} v^i$ are the contravariant components of the first-order Eulerian change in velocity, $R$ and $\Omega$ are, respectively, the radius and the angular velocity of the unperturbed star, \begin{equation} \omega=-\Omega l+ \frac{2\Omega}{l+1} \end{equation} is the mode's angular frequency in an inertial frame, $\alpha$ is the mode's amplitude and $C_l$ is chosen to be \begin{equation} C_l=(2l-1)!!\sqrt{\frac{2l+1}{2\pi(2l)!l(l+1)}}. \end{equation} To the velocity field (\ref{vf}) corresponds a Lagrangian vector displacement $\xi^{(1)i}$, \begin{subequations} \label{ld} \begin{eqnarray} \xi^{(1)r}&=&0, \label{ld1} \\ \xi^{(1)\theta}&=& - \frac12 \alpha C_l l(l+1) \left( \frac{r}{R} \right)^{l-1} \sin^{l-1} \theta \cos (l\phi+\omega t), \label{ld2} \nonumber \\ & & \\ \xi^{(1)\phi}&=& \frac12 \alpha C_l l(l+1) \left( \frac{r}{R} \right)^{l-1} \sin^{l-2}\theta \cos\theta \sin(l\phi+\omega t), \label{ld3} \nonumber \\ & & \end{eqnarray} \end{subequations} satisfying, at lowest order in $\Omega$, the equations \begin{equation} \delta^{(1)} v^i=\partial_{t} \xi^{(1)i} + v^k\nabla_k\xi^{(1)i} - \xi^{(1)k}\nabla_k v^i \end{equation} and \begin{equation} \nabla_i(\rho \xi^{(1)i}) = 0, \end{equation} where $v^i=\Omega \delta_{\phi}^i$ and $\rho$ are, respectively, the fluid velocity and the mass density of the unperturbed star. The knowledge of the first-order vector displacement $\xi^{(1)i}$ is sufficient to compute the canonical angular momentum of the \textit{r}-mode perturbation \cite{fs}; using Eq.~(\ref{ld}), one obtains: \begin{eqnarray} J_c &=& - \int \rho \partial_{\phi} \xi^{(1)i} \left( \partial_{t} \xi^{(1)}_i +v^k \nabla_k \xi^{(1)}_i \right) dV \nonumber \\ &=& -\frac14 \alpha^2 \Omega l(l+1) R^{2-2l} \int_0^R \rho r^{2l+2} dr. \label{canangm} \end{eqnarray} However, the canonical angular momentum $J_c$ is not the full physical angular momentum at second order $\delta^{(2)}J$, the difference being given by \cite{fs} \begin{equation} \delta^{(2)}J- J_c=\frac{1}{\Omega}\int\rho v^i \Delta_{\xi}^{(2)}v_i dV, \label{angm} \end{equation} where $\Delta_{\xi}^{(2)}v_i$ is the second-order Lagrangian change in velocity \begin{eqnarray} \Delta^{(2)}_{\xi}v_i &=& \partial_{t} \xi^{(1)k} \nabla_i \xi^{(1)}_k + v^k \nabla_k \xi^{(1)m}\nabla_i \xi^{(1)}_m \nonumber \\ & & +\; \partial_{t} \xi^{(2)}_i + v^k \left( \nabla_i \xi^{(2)}_k + \nabla_k \xi^{(2)}_i \right), \label{lc2v} \end{eqnarray} which, as opposed to the canonical angular momentum, also contains terms linear in the second-order Lagrangian displacement vector $\xi^{(2)a}$, in addition to terms quadratic in $\xi^{(1)a}$. Therefore, the computation of the physical angular momentum at second order requires the knowledge of $\xi^{(2)a}$. In a recent paper \cite{sa}, a nonlinear extension of the linear \textit{r}-mode was found within the nonlinear theory up to second order in the mode's amplitude in the case of a slowly rotating, Newtonian, barotropic, perfect-fluid star: \begin{subequations} \label{difrot} \begin{eqnarray} \delta^{(2)} v^r&=& 0, \label{difrot1} \\ \delta^{(2)} v^{\theta}&=&0, \label{difrot2} \\ \delta^{(2)} v^{\phi} &=& \frac12 \alpha^2 \Omega C_l^2 l^2(l^2-1) \left(\frac{r}{R} \right)^{2l-2} \sin^{2l-4}\theta \nonumber \\ & & +\; \alpha^2 \Omega A r^{N-1} \sin^{N-1}\theta, \label{difrot3} \end{eqnarray} \end{subequations} where $A$ and $N$ are arbitrary constants fixed by initial data. This second-order solution represents differential rotation, producing large scale drifts of fluid elements along stellar latitudes. The first term on the right-hand side of Eq.~(\ref{difrot3}) is induced by first-order quantities, while the second term is determined by the choice of initial data. Since these two terms cannot cancel each other, differential rotation is an unavoidable feature of \textit{r}-modes. Let us in the present work restrict ourselves to the case $N=2l-1$. We also redefine $A$ to be \begin{equation} A=\frac12 C_l^2 l^2 (l+1) R^{2-2l} K, \label{parK} \end{equation} where $K$ is a constant fixed by the choice of initial data. At lowest order in $\Omega$, the contravariant components of the second-order Lagrangian displacement vector $\xi^{(2)i}$, corresponding to the axisymmetric time-independent velocity field (\ref{difrot}), are determined by the equations \cite{sa} \begin{eqnarray} \delta^{(2)} v^i &=& \partial_{t} \xi^{(2)i} + v^k\nabla_k\xi^{(2)i} - \xi^{(2)k}\nabla_k v^i \nonumber \\ & & -\; \xi^{(1)k} \nabla_k\delta^{(1)} v^i, \label{vi2a} \\ \nabla_k \xi^{(2)k} &=& \frac12 \nabla_k \xi^{(1)m} \nabla_m \xi^{(1)k}, \label{vi2b} \\ \xi^{(2)k}\nabla_k\rho &=& -\frac12\xi^{(1)k}\xi^{(1)m}\nabla_k\nabla_m\rho, \label{vi2c} \end{eqnarray} which yield the solution \cite{sa}: \begin{subequations} \label{edc} \begin{eqnarray} \xi^{(2)r} &=& \frac{1}{16} \alpha^2 C_l^2 l^2 (l+1)^2 R \left( \frac{r}{R} \right)^{2l-1} \nonumber \\ & & \times \sin^{2l-2}\theta \left( \sin^2\theta-2 \right), \label{edc1} \\ \xi^{(2)\theta} &=& \frac{1}{16} \alpha^2 C_l^2 l^2 (l+1)^2 \left( \frac{r}{R} \right)^{2l-2} \nonumber \\ & & \times \sin^{2l-3}\theta \cos\theta \left( \sin^2\theta+2l-2 \right), \label{edc2} \\ \xi^{(2)\phi} &=& \frac14 \alpha^2 \Omega C_l^2 l^2 (l+1)(2K+2l-1) \left( \frac{r}{R} \right)^{2l-2} \nonumber \\ & & \times \sin^{2l-2}\theta \; t + C(r,\theta), \label{edc3} \end{eqnarray} \end{subequations} where $C$ is an arbitrary function of $r$ and $\theta$. In Eq.~(\ref{vi2a})--(\ref{vi2c}), terms quadratic in first-order quantities give rise to double ``frequency" terms of the type $\cos[2(l\phi+\omega t)]$ inducing a second-order solution corresponding to an oscillating response at a ``frequency" twice that of the linear \textit{r}-mode; however, in this article these double frequency oscillations will not be considered since they give a zero contribution to the second-order physical angular momentum of the \textit{r}-mode perturbation. Using the above expressions for the second-order Lagrangian displacement vector $\xi^{(2)i}$, the physical angular momentum at second order is computed to be \cite{sa}: \begin{eqnarray} \delta^{(2)} J &=& \frac12 \alpha^2 \Omega \left[ 2Kl+(l-1)(2l+1) \right] R^{2-2l} \nonumber \\ & & \times \int_0^R \rho r^{2l+2} dr. \label{physangm} \end{eqnarray} As can be seen from Eqs.~(\ref{canangm}) and (\ref{physangm}), the physical angular momentum of the \textit{r}-mode perturbation is not equal, in general, to the canonical angular momentum. The former contains also a part linear in the second-order Lagrangian change in velocity, which, as pointed out in Ref.~\cite{fs}, is related to conservation of circulation in the fluid. Since, as shown in Ref.~\cite{sa}, at second order \textit{r}-modes do not conserve vorticity, it follows that, in general, the physical and canonical angular momentum do not coincide. However, specific choices of $K$ can be made such that the integral in Eq.~(\ref{angm}) vanishes. Such a case ($K=-2$, for $l=2$) was studied in Ref.~\cite{olcsva} within a phenomenological model for the evolution of the \textit{r}-mode instability. There is not, however, to our knowledge, a physical condition that forces $K$ to take such a particular value. Therefore, in this paper we study the evolution of the \textit{r}-mode instability for arbitrary values of $K$. \section{\label{model}The evolution model} In this paper, the simple model proposed in Ref.~\cite{olcsva} to study the evolution of the \textit{r}-mode perturbation in newly born, hot, rapidly rotating neutron stars is adopted with two modifications. First, the physical angular momentum of the \textit{r}-mode perturbation is not taken to be just the canonical angular momentum as in Ref.~\cite{olcsva}. Instead, we will use the full physical angular momentum (\ref{physangm}), which, as already discussed in the previous section, because of the presence of differential rotation induced by \textit{r}-modes, is different, in general, from the canonical angular momentum. A second modification, less significant, is related to the proposal of Ref.~\cite{hl} to deduce the evolution equations for the mode's amplitude $\alpha$ and the star's angular velocity $\Omega$ just from angular momentum considerations. Since the most unstable \textit{r}-mode is the $l=2$ mode, we will focus in this paper our attention only on the evolution of this mode. We will also assume, as in Ref.~\cite{olcsva}, that the mass density $\rho$ and the pressure $p$ of the fluid are related by a polytropic equation of state $p=k\rho^2$, with $k$ such that $M=1.4 M_{\bigodot}$ and $R=12.53$ km. In our model, it is assumed that the total angular momentum of the star is given by \begin{equation} J=I\Omega+\delta^{(2)}J \label{amtotal} \end{equation} where the momentum of inertia of the equilibrium configuration $I$ is given by \begin{equation} I = \frac{8\pi}{3} \int_0^R\rho r^4 dr = \tilde{I}MR^2, \end{equation} with $\tilde{I}=0.261$, and the physical angular momentum of the \textit{r}-mode perturbation $\delta^{(2)}J$ is given by \begin{eqnarray} \delta^{(2)} J &=& \frac12 \alpha^2 \Omega (4K+5) \frac{1}{R^2} \int_0^R\rho r^6 dr \nonumber \\ &=& \frac12 \alpha^2 \Omega (4K+5) \tilde{J} M R^2, \label{physangm2} \end{eqnarray} with $\tilde{J}=1.635\times10^{-2}$. The total angular momentum of the star $J=J(\alpha,\Omega)$ decreases due to the emission of gravitational radiation according to \cite{olcsva} \begin{equation} \frac{dJ}{dt}=3 \tilde{J} MR^2 \frac{\alpha^2\Omega}{\tau_{GR}}, \label{dim-mat} \end{equation} where the gravitational-radiation timescale $\tau_{GR}$ is given by \cite{lom} \begin{equation} \frac{1}{\tau_{GR}} = \frac{1}{\tilde{\tau}_{GR}} \left( \frac{\Omega^2}{\pi G \bar{\rho}} \right)^3, \label{gr-timescale} \end{equation} with the fiducial timescale $\tilde{\tau}_{GR}=-3.26\,\mbox{s}$. In the above equation, $\bar{\rho}$ is the average mass density of the star and $G$ is the gravitational constant. From Eqs.~(\ref{amtotal})--(\ref{gr-timescale}) it is straightforward to obtain a differential equation for the time evolution of $\alpha$ and $\Omega$: \begin{eqnarray} & &\left[ 1+\frac{1}{3}(4K+5)Q\alpha^2 \right] \frac{d\Omega}{dt} \nonumber \\ & & \hspace{1.0cm} +\; \frac{2}{3}(4K+5)Q\Omega\alpha \frac{d\alpha}{dt}= 2Q\frac{\Omega\alpha^2}{\tau_{GR}}, \label{first-e} \end{eqnarray} where $Q=3\tilde{J}/(2\tilde{I})=0.094$. Following the proposal of Ref.~\cite{hl} to deduce the evolution equations just from angular momentum considerations, we assume that the physical angular momentum of the \textit{r}-mode perturbation $\delta^{(2)}J$ increases due to the emission of gravitational radiation and decreases due to the dissipative effect of viscosity, \begin{equation} \frac{d\delta^{(2)}J}{dt}=-2\delta^{(2)}J \left( \frac{1}{\tau_{GR}} + \frac{1}{\tau_V} \right). \label{varpam} \end{equation} For the viscous timescale $\tau_V$, we take the expression derived in Ref.~\cite{lom} for the simple case of the linear \textit{r}-mode (\ref{vf}) of a newly born, hot, rapidly rotating neutron star with shear and bulk viscosity: \begin{equation} \frac{1}{\tau_V} = \frac{1}{\tilde{\tau}_S} \left( \frac{10^9\mbox{K}}{T} \right)^2 + \frac{1}{\tilde{\tau}_B} \left( \frac{T}{10^9\mbox{K}} \right)^6 \left( \frac{\Omega^2}{\pi G \bar{\rho}} \right), \label{v-timescale} \end{equation} with the fiducial timescales $\tilde{\tau}_S = 2.52\times10^8\,\mbox{s}$ and $\tilde{\tau}_B=6.99\times10^8\,\mbox{s}$. Several authors have taken into account other dissipative mechanisms, but in this paper we shall restrict ourselves, for illustrative purposes, to the above expression. {} From Eq.~(\ref{varpam}) one obtains, using Eq.~(\ref{physangm2}), a second differential equation for the time evolution of $\alpha$ and $\Omega$: \begin{equation} 2\Omega\frac{d\alpha}{dt}+\alpha\frac{d\Omega}{dt}=-2\alpha\Omega \left( \frac{1}{\tau_{GR}} + \frac{1}{\tau_V} \right). \label{second-e} \end{equation} Note that in Ref.~\cite{olcsva} it was assumed that it was the energy of the perturbation (in the rotating frame), and not the angular momentum, that increases due to emission of gravitational waves and decreases due to viscosity. For a constant $\Omega$ these two approaches are coincident; however, for a varying $\Omega$ they differ in quantities of the order of $Q\alpha^2$ \cite{hl}. From Eqs.~(\ref{first-e}) and (\ref{second-e}) it is straightforward to obtain a system of two, first-order, coupled, differential equations determining the time evolution of the amplitude of the \textit{r}-mode $\alpha(t)$ and of the angular velocity of the star $\Omega(t)$: \begin{eqnarray} \frac{d\Omega}{dt} &=& \frac83 (K+2)Q \frac{\Omega\alpha^2}{\tau_{GR}} + \frac23 (4K+5)Q \frac{\Omega\alpha^2}{\tau_{V}}, \label{eqmod1} \\ \frac{d\alpha}{dt} &=& -\left[ 1 + \frac43 (K+2)Q \alpha^2 \right] \frac{\alpha}{\tau_{GR}} \nonumber \\ & & - \left[ 1 + \frac13 (4K+5)Q \alpha^2 \right] \frac{\alpha}{\tau_{V}}. \label{eqmod2} \end{eqnarray} For $K=-2$, the above equations coincide with Eqs.~(2.7) and (2.8) of Ref.~\cite{hl} (for $1/\tau_{M}=0$) and with Eqs.~(3.14) and (3.15) of Ref.~\cite{olcsva} (up to terms of order $Q\alpha^2$). In this paper, as already mentioned above, we study the evolution of the \textit{r}-mode instability for an arbitrary value of $K$. \section{\label{role}The role of differential rotation in the evolution of \textit{r}-modes} The condition $\tau_{GR}^{-1}(\Omega)+ \tau_V^{-1}(\Omega,T) = 0$ gives the stability curve, i.e., the set of points in a diagram $(\Omega,T)$ for which the damping effect of viscosity balances exactly the driving effect of gravitational radiation. For a newly born, hot, rapidly rotating neutron star, there is an interval of temperatures and angular velocities of the star for which the gravitational timescale $\tau_{GR}$ is much smaller than the viscous timescale $\tau_{V}$, implying that the evolution of $\alpha$ and $\Omega$ is determined, in a good approximation, by the equations: \begin{eqnarray} \frac{d\Omega}{dt} &=& \frac83 (K+2)Q \frac{\Omega\alpha^2}{\tau_{GR}}, \label{omega} \\ \frac{d\alpha}{dt} &=& -\left[ 1 + \frac43 (K+2)Q \alpha^2 \right] \frac{\alpha}{\tau_{GR}}. \label{alfa} \end{eqnarray} These equations remain a good approximation to Eqs.~(\ref{eqmod1}) and (\ref{eqmod2}) for a period of time during which the temperature and angular velocity of the neutron star are such that the corresponding point, in a $(\Omega,T)$ diagram, lies well above the stability curve. For the model we have been considering, this period of time is about one year \cite{olcsva}. In solving the above system of equations, initial conditions at $t=t_0$ are chosen such that $|\delta^{(2)}J(t_0)| \ll I\Omega(t_0)$. For $\alpha_0\equiv\alpha(t_0)=10^{-6}$, this implies that $|K|\ll 10^{13}$. In the case $K<-5/4$, for which $\delta^{(2)}J<0$, as the amplitude of the mode grows due to the gravitational-radiation instability, the total angular momentum of the star, given by Eq.~(\ref{amtotal}), decreases and eventually becomes negative. To avoid this, the growth of the mode's amplitude has to be stopped by hand at a saturation value $\alpha_{sat}\leqslant\sqrt{-3/[(4K+5)Q]}$; integration is then carried on with a new set of equations for which $\alpha=\alpha_{sat}$ and the evolution of the angular velocity $\Omega$ is determined from Eq.~(\ref{first-e}), with $d\alpha/dt=0$, \begin{equation} \frac{d\Omega}{dt}=\frac{2\Omega}{\tau_{GR}} \frac{\alpha_{sat}^2Q}{1+\frac13(4K+5)\alpha_{sat}^2Q}. \label{first-e1} \end{equation} In the case $K\geqslant-5/4$, for which $\delta^{(2)}J\geqslant0$, as the amplitude of the mode grows, the total angular momentum decreases but remains always positive and, therefore, there is no need to saturate the mode by hand. In this case, $\alpha(t)$ and $\Omega(t)$ are determined solely by Eqs.~(\ref{omega}) and (\ref{alfa}) and, as will be shown below, the mode saturates naturally at a value that depends on the parameter $K$, namely, $\alpha_{sat}\propto(K+2)^{-1/2}$. As already mentioned above, the parameter $K$, introduced in Eq.~(\ref{parK}), is fixed by initial data, giving the initial amount of differential rotation associated to the \textit{r}-mode. Thus, depending on the choice of initial data, the saturation amplitude of the \textit{r}-mode can be of order unity, as assumed in the first models of evolution of the \textit{r}-mode instability \cite{olcsva}, or very small, as recent studies within the nonlinear theory seem to imply \cite{afmstw}. The requirement that the total angular momentum is always non-negative, implies that the right-hand side of Eq.~(\ref{alfa}) is always positive, i.e., $\alpha(t)$ increases in time for any value of $K$. It is also worth mentioning that, according to Eq.~(\ref{omega}), for $K\neq-2$, $\Omega(t)$ evolves on a gravitational timescale, that is, a change in the background motion of the star occurs simultaneously to the growth of the mode due to the gravitational-wave instability. This result is not completely unexpected, since such kind of behavior has already been reported in the case of a toy model of a thin spherical shell of a rotating incompressible fluid \cite{lu}. Let us first consider the case $K\geqslant-5/4$. As already mentioned above, in this case the total angular momentum of the star is always positive and the amplitude of the mode $\alpha(t)$ and the angular velocity of the star $\Omega(t)$ are determined solely by the system of Eqs.~(\ref{omega}) and (\ref{alfa}), from which it is straightforward to obtain: \begin{equation} \frac{d\Omega}{\Omega} = -\frac{\frac83(K+2)Q\alpha}{1+\frac43(K+2)Q\alpha^2} d\alpha. \end{equation} This differential equation yields the solution \begin{equation} \frac{\Omega}{\Omega_0} = \frac{1+\frac43(K+2)Q\alpha_0^2}{1+\frac43(K+2)Q\alpha^2}, \label{omega-alfa} \end{equation} where $\Omega_0\equiv\Omega(t_0)$ and $\alpha_0\equiv\alpha(t_0)$ are, respectively, the initial angular velocity of the star and the initial amplitude of the \textit{r}-mode perturbation. Using the above expression to eliminate $\Omega$ from Eq.~(\ref{alfa}) and integrating, one obtains: \begin{eqnarray} & &-\frac{1}{\tilde{\tau}_{GR}}\left( \frac{\Omega_0}{\sqrt{\pi G\bar{\rho}}} \right)^6 \left[ 1+\frac43(K+2)Q\alpha_0^2 \right]^6 (t-t_0) \nonumber \\ & & \quad \quad \quad =\ln \frac{\alpha}{\alpha_0} + \sum_{n=1}^5 \frac{5!}{2n(5-n)!n!}\left[\frac43(K+2)Q\right]^n \nonumber \\ & & \quad \quad \quad \quad \times \left( \alpha^{2n}- \alpha_0^{2n} \right). \label{alfa-t} \end{eqnarray} In the initial stages of the evolution of the \textit{r}-mode instability, the right-hand side of Eq.~(\ref{alfa-t}) is dominated by the first term and $\alpha$ increases exponentially, \begin{equation} \alpha(t) \simeq \alpha_0 \exp \left\{ 0.027 \left( \frac{\Omega_0}{\Omega_K} \right)^6 (t-t_0) \right\}, \label{l-fase} \end{equation} as expected; for later times, the right-hand side of Eq.~(\ref{alfa-t}) is dominated by the last term ($n=5$) and $\alpha$ increases very slowly as \begin{equation} \alpha(t) \simeq 2.48 \left( \frac{\Omega_0}{\Omega_K} \right)^{3/5} (t-t_0)^{1/10}\frac{1}{\sqrt{K+2}}. \label{m-fase} \end{equation} In Eqs.~(\ref{l-fase}) and (\ref{m-fase}), $t-t_0$ is given in seconds, $\Omega_K=(2/3)\sqrt{\pi G\bar{\rho}}$ is the Keplerian angular velocity at which the star starts shedding mass at the equator, we have used for the gravitational timescale the value $\tilde{\tau}_{GR}=-3.26\mbox{ s}$, and we have assumed that $K$ and $\alpha_0$ are such that $4(K+2)Q\alpha_0^2/3 \ll 1$. The smooth transition between the regimes (\ref{l-fase}) and (\ref{m-fase}) occurs for $t-t_0\simeq\mbox{few}\times 10^2$ seconds. The equation~(\ref{alfa-t}) describes the time evolution of the amplitude of the mode between the moment the gravitational-wave instability sets in and the moment viscosity effects become dominant and start damping the mode. It reveals that saturation of the \textit{r}-mode occurs just a few hundred seconds after the beginning of the exponential growth of $\alpha$. Thus, within the model of evolution we are studying, which includes the nonlinear effect of the differential rotation induced by \textit{r}-modes, the mode's amplitude saturates in a natural way (see Fig.~\ref{fig:alfa}). \begin{figure}[h] \includegraphics{alfa.eps} \caption{\label{fig:alfa}Time evolution of the amplitude of the \textit{r}-mode $\alpha$ for different values of $K$. The saturation value of the amplitude of the mode depends crucially on the parameter $K$, which is related to the amount of initial differential rotation associated with the \textit{r}-mode. The initial values of the amplitude of the mode and of the angular velocity of the star are, respectively, $\alpha_0=10^{-6}$ and $\Omega_0=\Omega_K$.} \end{figure} The saturation value of the amplitude of the mode depends crucially on the parameter $K$, $\alpha_{sat}\propto (K+2)^{-1/2}$. The initial amount of differential rotation associated with the \textit{r}-mode can be minimized by choosing $K$ appropriately, namely, $K\simeq0$, with the consequence that the amplitude of the mode saturates at values of order unity. In this case, other nonlinear effects as, for instance, mode--mode interactions, that saturate \textit{r}-modes at values much smaller than unity \cite{afmstw}, are more relevant than differential rotation in what concerns the saturation amplitude of the mode. However, if the initial differential rotation associated to \textit{r}-modes is significant, when compared with $\alpha_0^{-1}$, then the saturation amplitude $\alpha_{sat}$ can be as small as $10^{-3}-10^{-4}$. In this case, differential rotation plays an important role in the saturation of \textit{r}-modes. Let us now turn our attention to the time evolution of the angular velocity of the star $\Omega$. Integrating Eq.~(\ref{omega}), after eliminating $\alpha$ with Eq.~(\ref{omega-alfa}), one obtains the following solution: \begin{eqnarray} & & -\frac{2}{\tilde{\tau}_{GR}}\left( \frac{\Omega_0}{\sqrt{\pi G\bar{\rho}}} \right)^6 \left[ 1+\frac43(K+2)Q\alpha_0^2 \right]^6 (t-t_0) \nonumber \\ & & \quad \quad \quad = \sum_{n=1}^5 \frac{\left[1+\frac43(K+2)Q\alpha_0^2\right]^n}{n} \left[ \left( \frac{\Omega_0}{\Omega} \right)^n-1 \right] \nonumber \\ & & \quad \quad \quad \quad +\; \ln \frac{\Omega_0}{\Omega} + \ln \frac{1+\frac43(K+2)Q\alpha_0^2 - \frac{\Omega}{\Omega_0}}{\frac43(K+2)Q\alpha_0^2}. \label{omega-t} \end{eqnarray} In the initial stages of the evolution of the \textit{r}-mode instability, the right-hand side of Eq.~(\ref{omega-t}) is dominated by the last term and $\Omega$ decreases as \begin{equation} \frac{\Omega(t)}{\Omega_0} \simeq 1-\frac43(K+2)Q\alpha_0^2 \exp \left\{ 0.054 \left( \frac{\Omega_0}{\Omega_K} \right)^6 (t-t_0) \right\}; \label{i-fase} \end{equation} for later times, the right-hand side of Eq.~(\ref{omega-t}) is dominated by the first term ($n=5$) and $\Omega$ decreases slowly as \begin{equation} \frac{\Omega(t)}{\Omega_0} \simeq 1.30 \left( \frac{\Omega_0}{\Omega_K} \right)^{-6/5} (t-t_0)^{-1/5}. \label{f-fase} \end{equation} In Eqs.~(\ref{i-fase}) and (\ref{f-fase}), again, $t-t_0$ is given in seconds, we have used for the gravitational timescale the value $\tilde{\tau}_{GR}=-3.26\mbox{ s}$, and we have assumed that $K$ and $\alpha_0$ are such that $4(K+2)Q\alpha_0^2/3 \ll 1$. The smooth transition between the regimes (\ref{i-fase}) and (\ref{f-fase}) occurs for $t-t_0\simeq\mbox{few}\times 10^2$ seconds (see Fig.~\ref{fig:omega}). \begin{figure}[h] \includegraphics{omega.eps} \caption{Time evolution of the angular velocity of the star $\Omega$ for different values of $K$. After a few hundred seconds, the value of the angular velocity becomes quite insensitive to the value of $K$. The initial values of the amplitude of the mode and of the angular velocity of the star are, respectively, $\alpha_0=10^{-6}$ and $\Omega_0=\Omega_K$.} \label{fig:omega} \end{figure} Remarkably, in the later phase of the evolution, the angular velocity $\Omega$ does not depend on the value of $K$, and, consequently, does not depend on the saturation value of $\alpha$. As can be seen in Fig.~\ref{fig:omega}, already at $t-t_0=1000\,\mbox{s}$, for values of $K$ ranging from 0 to $10^8$, the angular velocities are not much different; for $t-t_0\simeq1\,\mbox{year}$ any difference becomes negligibly small. This contrasts with the results obtained in Ref.~\cite{olcsva}, where the value of $\Omega$ after about one year depends critically on the choice of $\alpha_{sat}$ (see case $K=-2$ below). After about one year of evolution, when the dissipative effect of viscosity becomes dominant and starts damping the mode, the angular velocity of the star reaches values consistent with observational results. Indeed, assuming that initially the star rotates with the maximum allowed angular velocity, $\Omega_0=\Omega_K=(2/3)\sqrt{\pi G \bar{\rho}}$, one obtains from Eq.~(\ref{omega-t}) that $\Omega_{\mbox{\small{one year}}}\simeq 0.042 \Omega_K$, in good agreement with the inferred initial angular velocity of the fastest pulsars associated with supernova remnants. From Eqs.~(\ref{m-fase}) and (\ref{f-fase}) follows that, for $t\gg \mbox{few}\times10^2$ seconds, the quantity $(\Omega/\Omega_0)\alpha^2$ does not depend on time, just on $K$, namely, $(\Omega/\Omega_0)\alpha^2\simeq8/(K+2)$, implying that the fraction of the initial angular momentum of the star that is transferred to the \textit{r}-mode is just a function of $K$, \begin{equation} \delta^{(2)}J/J_0\simeq \frac{K+5/4}{K+2}, \quad \mbox{for } t\gg \mbox{few}\times10^2 \mbox{ seconds}, \end{equation} where we have taken $J_0\simeq I\Omega_0$. Thus, for $K\gg1$, most of the initial angular momentum of the star $I\Omega_0$ is transferred to the \textit{r}-mode perturbation and, consequently, almost none is carried away by gravitational radiation emission. On the other hand, for $K=-5/4$, the angular momentum of the perturbation $\delta^{(2)}J$ remains zero during the evolution and, consequently, all the initial angular momentum of the star $I\Omega_0$ is available to be radiated away by gravitational radiation (see Figs.~\ref{fig:jota2l} and \ref{fig:jota}). \begin{figure}[h] \includegraphics{ampert.eps} \caption{Time evolution of the physical angular momentum of the \textit{r}-mode perturbation $\delta^{(2)}J$ for different values of $K$. For $K\gg1$, most of the initial angular momentum of the star $J_0\simeq I\Omega_0$ is transferred to the \textit{r}-mode perturbation and, consequently, almost none is carried away by gravitational radiation emission. The initial values of the amplitude of the mode and of the angular velocity of the star are, respectively, $\alpha_0=10^{-6}$ and $\Omega_0=\Omega_K$.} \label{fig:jota2l} \end{figure} \begin{figure}[h] \includegraphics{tamstar.eps} \caption{Time evolution of the total angular momentum of the star $J$ for different values of $K$. For small values of $K$, a significant part of the initial angular momentum of the star is radiated away by gravitational radiation. The initial values of the amplitude of the mode and of the angular velocity of the star are, respectively, $\alpha_0=10^{-6}$ and $\Omega_0=\Omega_K$.} \label{fig:jota} \end{figure} During the nonlinear evolution, the fluid develops a strong differential rotation. Let us define the average differential rotation $\Delta\Omega$ as the weighted variance of $\Omega$~\cite{ltv2}, \begin{equation} (\Delta\Omega)^2=\frac{\int\rho r^2 \sin^2\theta (\delta^{(2)}v^\phi-\bar{\Omega}_{dr})^2 dV}{\int\rho r^2 \sin^2\theta dV}, \label{aver-rot-dif} \end{equation} where the average angular velocity $\bar{\Omega}_{dr}$, characterizing the drift of fluid elements along stellar latitudes, is given by \begin{equation} \bar{\Omega}_{dr} =\frac{\delta^{(2)}J}{I} = \frac{\int\rho r^2 \sin^2\theta \delta^{(2)}v^\phi dV}{\int\rho r^2 \sin^2\theta dV}. \label{aver-ang-vel} \end{equation} For the polytropic model we have been considering, Eqs.~(\ref{aver-rot-dif}) and (\ref{aver-ang-vel}) yield for the average differential rotation the following expression: \begin{eqnarray} \Delta\Omega &=& \frac13 \alpha^2 \Omega Q {\biggl [} \frac{15}{56} \left( 24K^2+56K+35 \right) \frac{\tilde{I}\tilde{H}}{\tilde{J}^2} \nonumber \\ & & -\; (4K+5)^2 {\biggr ]}^{1/2}, \label{aver-rot-dif-n} \end{eqnarray} where $\tilde{H}=\int_0^R \rho(r) r^8 dr/(M R^6)=0.01$. As can be seen from Fig.~\ref{fig:Delta-omega}, after a few hundred seconds the average differential rotation increases rapidly, saturating at high values relatively to the initial angular velocity of the star. \begin{figure}[h] \includegraphics{deltaomega.eps} \caption{Time evolution of the average differential rotation $\Delta\Omega$ for different values of $K$. After a few hundred seconds the average differential rotation increases rapidly, saturating at high values relatively to the initial angular velocity of the star. The initial value of the amplitude of the mode is $\alpha_0=10^{-6}$.} \label{fig:Delta-omega} \end{figure} Note that the average differential rotation never vanishes for any value of $K$. This is a consequence of the fact that the second-order velocity field (\ref{difrot}) has two components, one induced by first-order quantities and another, fixed by initial data, which is a pure second-order effect. Since these two terms cannot cancel each other, a velocity drift of fluid elements along stellar latitudes is an unavoidable feature of the nonlinear \textit{r}-mode pulsation. Let us now turn to the case $K<-5/4$. As already mentioned above, at a certain point of the evolution, the \textit{r}-mode has to be saturated by hand in order to avoid that the total angular momentum of the star becomes negative. Thus, during a first stage of the evolution, $\alpha$ and $\Omega$ are determined from Eqs.~(\ref{omega}) and (\ref{alfa}) and during a second stage, $\alpha=\alpha_{sat}\leqslant\sqrt{-3/[(4K+5)Q]}$ and $\Omega$ is determined by Eq.~(\ref{first-e1}). The fact that a saturation value for $\alpha$ has to be fixed by hand introduces an element of arbitrariness which did not exist in the case $K\geqslant-5/4$. In particular, by choosing appropriately $\alpha_{sat}$ it is possible to arrange that the final value of $\Omega$ is consistent with observational data and, additionally, that a significant part of the initial angular momentum of the star is carried away by gravitational waves. For $-2<K<-5/4$ and $K<-2$, the solution of the system of equations (\ref{omega}) and (\ref{alfa}) is given by Eqs.~(\ref{alfa-t}) and (\ref{omega-t}). During the second stage of evolution, $\alpha=\alpha_{sat}\leqslant\sqrt{-3/[(4K+5)Q]}$ and $\Omega(t)$ is given by \begin{eqnarray} \Omega(t) &=& \Omega(t_*) {\biggl [} \frac{0.030\alpha_{sat}^2}{1+\frac13(4K+5) Q \alpha_{sat}^2} \nonumber \\ & & \times \left( \frac{\Omega(t_*)}{\Omega_K} \right)^6 (t-t_*) + 1 {\biggr ]}^{-1/6}, \label{omega-k54} \end{eqnarray} where $t_*$ is the time at which occurs the transition from the first to the second stage of evolution and $t-t_*$ is given in seconds. During the two stages of evolution we have just described, the total angular momentum of the star $J(t)$ decreases due to gravitational-wave emission. At the end of the second stage of the evolution, it is given by \begin{equation} J(t_{final}) \simeq J_0 \left[ 1 + \frac13 (4K+5)Q \alpha_{sat}^2 \right] \frac{\Omega(t_{final})}{\Omega_0}, \label{fam} \end{equation} where we have taken $J_0\simeq I\Omega_0$ and $\Omega(t)$ is given by Eq.~(\ref{omega-k54}). Finally, let us point out that, for $K<-2$, during the first stage of evolution, the angular velocity of the star increases. The case $K=-2$ has to be treated separately. It corresponds to the model studied in detail in Ref.~\cite{olcsva}. The system of equations (\ref{omega}) and (\ref{alfa}) yields the solution $\Omega=\Omega_0$ and $\alpha=\alpha_0 \exp\{-(t-t_0)/\tau_{GR}\}$. If the initial angular velocity is chosen to be $\Omega_0=\Omega_K$, then $\tau_{GR}=-37.1\,\mbox{s}$, implying that the perturbation grows exponentially from the initial amplitude $\alpha_0=10^{-6}$ to values of the order unity in just about $500\,\mbox{s}$ \cite{olcsva}. After this short initial period in which $\Omega$ is constant and $\alpha$ grows exponentially, the amplitude $\alpha$ has to be forced, by hand, to take a certain saturation value $\alpha_{sat}\leqslant Q^{-1/2}=3.26$, and the angular velocity of the star is then given by Eq.~(\ref{omega-k54}), with $K=-2$ and $\Omega(t_*)=\Omega_0$. The final angular momentum of the star is given by Eq.~(\ref{fam}) with $K=-2$. As can be seen from these equations, the final angular velocity and momentum of the star depend critically on the saturation value of the mode's amplitude $\alpha_{sat}$; for instance, after one year of evolution, for $\alpha_{sat}=1$ and $\Omega_0=\Omega_K$ one obtains $\Omega\simeq 0.1\Omega_K$ and $J\simeq 0.09 J_0$, while for $\alpha_{sat}=10^{-3}$ the angular velocity and momentum are, respectively, $\Omega\simeq 0.9\Omega_K$ and $J\simeq 0.9 J_0$. \section{\label{con}Conclusions} In this paper we have studied the role of differential rotation in the evolution of the \textit{r}-mode instability. We have adopted the simple model of Ref.~\cite{olcsva}, with two modifications: (i) the physical angular momentum of the \textit{r}-mode perturbation includes not only the canonical angular momentum but also a piece linear in second-order quantities, corresponding to differential rotation inducing large scale drifts of fluid elements along stellar latitudes; (ii) the evolution equations are deduced just from angular momentum considerations. The first modification is a quite important one, resulting from the fact that differential rotation is an unavoidable kinematic feature of \textit{r}-modes \cite{sa}. The presence of this differential rotation implies that at second order in the mode's amplitude \textit{r}-modes do not preserve vorticity of fluid elements. This in turn implies that the canonical angular momentum is not the full angular momentum at second order; one should also include a part linear in the second-order Lagrangian change in velocity, which is related to conservation of circulation in the fluid. The second modification, less significant, leads to a system of equations for the evolution of the \textit{r}-mode instability that differs from the ones of Ref.~\cite{olcsva} just in a quantity of the order of $Q\alpha^2$. A detailed justification for this modification can be found in Ref.~\cite{hl}. Within this model, we have derived a system of two first-order, coupled, differential equations (\ref{eqmod1}) and (\ref{eqmod2}), determining the time evolution of the amplitude of the \textit{r}-mode $\alpha(t)$ and the angular velocity of the star $\Omega(t)$. For the gravitational and viscous timescales appearing in these equations, we have used the expressions derived in Refs.~\cite{lom,aks} for the simple case of \textit{r}-modes of a newly born, hot, rapidly rotating neutron star with shear and bulk viscosity. In this case, the driving effect of the gravitational radiation reaction overcomes the damping effect of shear and bulk viscosity for about one year, while the temperature of the star decreases from about $10^{10}$ K to about $10^9$ K. During this period of time, in which the \textit{r}-mode instability is active, the gravitational timescale is much smaller than the viscous timescale, $\tau_{GR}\ll\tau_V$, and, therefore, the evolution of $\alpha$ and $\Omega$ can be determined, in a good approximation, by the system of equations (\ref{omega}) and (\ref{alfa}). The system of equations (\ref{omega}) and (\ref{alfa}) contains a parameter $K$, which is fixed by initial data and gives the initial amount of differential rotation associated with the \textit{r}-mode. The specific case $K=-2$, for which the physical angular momentum of the \textit{r}-mode perturbation coincides with the canonical angular momentum, was studied in great detail in Ref.~\cite{olcsva}. There is not, however, to our knowledge, any physical condition that forces $K$ to take such a particular value $K=-2$. Therefore, we have solved, both numerically and analytically, the system of equations (\ref{omega}) and (\ref{alfa}) for arbitrary $|K|\ll 10^{13}$. This upper limit for $K$ results from the fact that one wishes to impose the condition that the initial absolute value of the physical angular momentum of the perturbation $|\delta^{(2)}J(t_0)|$ is much smaller than the angular momentum of the unperturbed star $I\Omega_0$. In terms of the initial drift of fluid elements along stellar latitudes, this condition implies that $(v_{dr})_0\ll\Omega_0R$ at the equator of the star. It could be argued that some dissipative mechanism, such as bulk viscosity or magnetic coupling, would reduce differential rotation in a newly born, hot, rapidly rotating neutron star, before the \textit{r}-mode instability sets in, implying that $K\simeq0$. However, even in that case, a residual differential rotation of \textit{r}-modes would be present in the star and the average differential rotation $\Delta\Omega$ would increase exponentially as the amplitude of the \textit{r}-mode grows, saturating at values relatively high as compared with the initial angular velocity of the star. Finally, in what concerns the admissible values of $K$, let us point out that for $K<-5/4$, as the amplitude of the mode grows due to the gravitational-radiation instability, the total angular momentum of the star decreases and eventually becomes negative. To avoid this unphysical situation, the growth of the mode's amplitude has to be stopped by hand at a saturation value $\alpha_{sat}\leqslant\sqrt{-3/[(4K+5)Q]}$ and integration has then to be carried on with a new set of equations. The fact that a saturation value for $\alpha$ has to be fixed by hand introduces an element of arbitrariness into the solution, permitting, for instance, that agreement between the predicted final value of the angular velocity of the star and the value inferred from astronomical observations can always be achieved by simply fine tuning the value of $\alpha_{sat}$. One would wish, of course, that such an agreement, even qualitative, would arise in a natural way, without fine tuning the parameters of the model. For this reason, we have concentrated most of our attention on the case $K\geqslant-5/4$, in which such arbitrariness does not exist. {} From the exact analytical solution (\ref{alfa-t}) and (\ref{omega-t}) of the system of equations (\ref{omega}) and (\ref{alfa}) one can extract several conclusions. First, the amplitude of the \textit{r}-mode saturates in a natural way a few hundred seconds after the mode instability sets in. The saturation amplitude depends on the parameter $K$, namely, $\alpha_{sat}\propto(K+2)^{-1/2}$. Therefore, if the initial differential rotation of \textit{r}-modes is small ($K\simeq0$), then the \textit{r}-mode saturates at values of the order of unity. On the other hand, if the initial differential rotation is significant ($K\gg1$), then the saturation amplitude can be as small as $10^{-3}-10^{-4}$. These low values for the saturation amplitude of \textit{r}-modes are of the same order of magnitude as the ones obtained in recent investigations on wind-up of magnetic fields \cite{rls} and on nonlinear mode-mode interaction \cite{afmstw}. Second, the value of the angular velocity of the star becomes, after a short period of evolution ($t\gg \mbox{ few}\times 10^2$ s), very insensitive to the value of the parameter $K$, i.e., it becomes insensitive to the saturation value of the mode's amplitude. From Eq.~(\ref{omega-t}) for $t=t(\Omega)$, it can be easily obtained that the angular velocity after about one year of evolution of the \textit{r}-mode instability is $0.042\Omega_K$ (for any $K$), in good agreement with the inferred initial angular velocity of the fastest pulsars associated with supernova remnants. Finally, the value of the physical angular momentum $\delta^{(2)}J$ of the \textit{r}-mode perturbation tends, after a short period of evolution ($t\gg \mbox{ few}\times 10^2$ s), to a constant value given approximately by $[(K+\frac54)/(K+2)]J_0$. Thus, for $K\gg1$ most of the initial angular momentum of the star is transferred to the \textit{r}-mode and, consequently, almost none is carried away by gravitational radiation. That the \textit{r}-mode absorbs most of the initial angular momentum of the star is explained by the fact that the mode develops a very strong differential rotation. Indeed, the average differential rotation, given by Eq.~(\ref{aver-rot-dif-n}), increases exponentially, saturating after a few hundred seconds of evolution at very high values relatively to the initial angular velocity of the star. On the other hand, for values of $K\simeq-5/4$, the transfer of angular momentum to the \textit{r}-mode is less strong and, consequently, more angular momentum is available to be carried away by gravitational radiation. As we have seen, differential rotation introduces in the evolutionary picture of the \textit{r}-mode instability new and somewhat unexpected features. This differential rotation has a kinematic origin, being induced by terms quadratic in the velocity field of the linear \textit{r}-mode. However, differential rotation of \textit{r}-modes could also be induced by the gravitational-radiation reaction force. Indeed, within a toy model of a thin spherical shell of a rotating incompressible fluid it was shown that differential rotation of \textit{r}-modes is also driven by a gravitational radiation force, leading to the conjecture that in real stars one could observe a similar behavior \cite{lu}. The existence of this differential rotation induced by gravitational radiation and its influence on the evolution of the \textit{r}-mode instability will be addressed in a future publication \cite{ds}. \begin{acknowledgments} We thank \'Oscar Dias for helpful discussions. This work was supported in part by the Funda\c c\~ao para a Ci\^encia e a Tecnologia (FCT), Portugal. BT acknowledges financial support from FCT through grant PRAXIS XXI/BD/21256/99. \end{acknowledgments}
{ "redpajama_set_name": "RedPajamaArXiv" }
1,111
package com.kyle.identity.repository; import com.kyle.identity.entity.Role; import org.springframework.data.jpa.repository.JpaRepository; import org.springframework.stereotype.Repository; @Repository("roleRepository") public interface RoleRepository extends JpaRepository<Role, Integer> { Role findByRole(String role); }
{ "redpajama_set_name": "RedPajamaGithub" }
7,341
{"url":"http:\/\/math.stackexchange.com\/questions\/65419\/if-a-function-converges-at-infinity-does-that-imply-that-it-increases-decreases","text":"# If a function converges at infinity, does that imply that it increases\/decreases at a diminishing rate?\n\nIf I have a function, say $f(x)$, and I can prove that the function decreases with $x$, and that it converges to a constant when $x = \\infty$, does that prove that it decreases at a decreasing rate? In other words, does $f(i+1) < f(i) \\text { and } f(\\infty) = c \\to f(i+1) - f(i+2) < f(i)-f(i+1)$ ? I don't see how it could be any other way, but I often overlook things. Also, would this be the same as saying that, if I can prove the first derivative is negative and that the function converges, that the second derivative must be positive?\n\n-\n\nThe rate of decrease will tend to zero in the limit, but that doesn't mean the rate of decrease is itself always decreasing. In other words, $f\\to c$ implies $\\Delta f\\to0$ (the forward difference) and monotonicity ensures $\\Delta f<0$, but it's still possible for $\\Delta^2 f$ (the second difference) to alternate sign.\n\nHere's a graphic to visualize how it's possible (on a local scale):\n\nIn the above, the black dots represent a monotone decreasing sequence (which we'll say converges to something). We put in between each black term a green term: each green term is slightly lower than the previous black dot, and slightly higher than the next black dot (I put red and blue lines in to make this more apparent), so the green and black dots together create a new monotone decreasing sequence. However, look at the slopes of the orange and purple lines: the slopes get smaller and bigger and smaller and bigger and so on, alternating. But the slopes represent the forward difference $\\Delta a_n = a_{n+1}-a_n$, so this means second forward difference $\\Delta^2a_n$ is changing sign!\n\n-\nWow, thanks so much, the visuals really helped! \u2013\u00a0 Jand Sep 18 '11 at 4:35\n\nIn other words, does $f(i+1) < f(i) \\text { and } f(\\infty) = c \\to f(i+1) - f(i+2) < f(i)-f(i+1)$?\n\nNo. Consider, for example: $$f(2n) = 5\\times2^{-n}$$ $$f(2n+1) = 3\\times2^{-n}$$ This is strictly decreasing $\\mathbb N\\to \\mathbb Q$ and converges towards 0, but the first differences keep oscillating up and down.\n\n-\nI am sorry but I don't understand your function. What is $f(x)$ in this case? \u2013\u00a0 Jand Sep 18 '11 at 3:52\n@JandR He split it up for even and odd cases. \u2013\u00a0 mixedmath Sep 18 '11 at 3:56\n\nI'm going to rephrase a bit.\n\nIf you have a monotone function that has a finite limit, then you must also have that the limit of the derivative is 0 (and the second derivative too, to address your convexity question).\n\nWhy? Assume not - lead to a contradiction. It comes quickly.\n\nI wanted to note that the monotone condition is important here - even a function that does have a limit but is not monotone might have crazy derivatives with highly nonintuitive behavior.\n\n-\nI think OPs asking if $f$ monotone decreasing implies $f'$ is also monotone, which I believe to be false. \u2013\u00a0 anon Sep 18 '11 at 3:18\nJust so I understand, you are saying, \"Yes\"? Showing $f(i+1)<f(i)$ (for all i) implies monotonicity, and then the fact that the derivative is negative and converges to 0 implies that it decreases at a decreasing rate? \u2013\u00a0 Jand Sep 18 '11 at 3:21\n$f(x) = \\sin^2(\\pi x)\/x$ satisfies $f(x+1)<f(x)$ for all $x>0$, but is not monotone at all. \u2013\u00a0 Henning Makholm Sep 18 '11 at 3:25\n@Henning... good point! hmm... turns out, now that I think about it, the function I am thinking of is discrete, with intervals at 1. Maybe I should open a new question about this. \u2013\u00a0 Jand Sep 18 '11 at 3:28\n@JandR No, it doesn't. Think of a downward zigzag with a geometric series style scaling factor (so that it converges). The derivative will be getting steeper, less steep, steeper, less steep all the time. Negative, yes. Concave? No. \u2013\u00a0 mixedmath Sep 18 '11 at 4:12","date":"2015-08-03 21:40:12","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 1, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.9599789381027222, \"perplexity\": 324.34141025609966}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.3, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2015-32\/segments\/1438042990114.79\/warc\/CC-MAIN-20150728002310-00336-ip-10-236-191-2.ec2.internal.warc.gz\"}"}
null
null
Thank you for your interest in joining our team at Deyes High School. This is where you can find information about our latest vacancies and apply. Either a Teaching/Support Staff Application Form. A Letter of Application addressing the points in the Job Pack. An Equal Opportunities in Recruitment Monitoring Form. For more information about employment opportunities please get in contact with us. Follow the Lydiate Learning Trust's vacancies Twitter account for all the latest information regarding employment opportunities across the Trust.
{ "redpajama_set_name": "RedPajamaC4" }
9,927
Italian style and American heart for this "Chevrolet Testarossa" This convertible Corvette is unrecognizable since its transformation into Testarossa. However, it keeps its American roots with its original V8.… Chevrolet Monte-Carlo: out of the barn after 10 years of abandonment! This 1976 Chevrolet Monte-Carlo had been collecting dust and grime for 10 years in an old barn. After all this… Chevrolet Commemorative Sport Wagon: one of a kind! Unique example, it uses the basis of a 2004 Corvette C5 with aesthetic elements from the Corvette C1, the "original"… Chevrolet presents its first Silverado EV electric pickup To respond to the Ford F-150 Lightning, the Chevrolet Silverado EV takes the basis of the electric Hummer and displays… Auto Plus Classiques: the Chevrolet Corvette C4 (1983) With its impressive shark silhouette, this Corvette is not discreet! Massive proportions, streamlined headlights, vigorous-styled rims and a big V8.… Chevrolet Corvette C8s shattered by tornadoes The Bowling Green plant in Kentucky, United States, did not escape the many tornadoes that hit the country in early… Chevrolet, GMC: GM prepares 2 rival pickups for Tesla Cybertruck The General Motors group is preparing to unveil two new electric pickups to take on the Tesla Cybertruck and the… A Chevrolet Corvette Grand Sport for sale for $ 200,000? One of the 5 Chevrolet Grand Sport from 1963 for only $ 200,000? Do not dream, this is a replica,… Abandoned for 16 years, will this Chevrolet Impala restart? A 1967 Chevrolet Impala left in a field for sixteen years tries to be restarted after its long slumber. Impossible…
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
9,601
Bewehrungsstahl, Betonstahl oder Armierungseisen, früher auch Moniereisen, dient als Bewehrung (Verstärkung) von Stahlbetonbauteilen und wird nach dem Einbau in die Schalung mit Beton vergossen. Form und Eigenschaft Heutzutage kommt in Deutschland ausschließlich Betonstahl mit einer charakteristischen Fließ- oder Streckgrenze von 500 N/mm² zur Anwendung. Die erforderlichen Eigenschaften sind beispielsweise in der DIN EN 1992, DIN 488 (früher DIN 1045-1) oder in der Europäischen Norm EN 10080 geregelt. Der Betonstahl wird in verschiedenen Formen produziert. In Deutschland sind lieferbar: Betonstabstahl B500B (nach DIN 488) (früher "BSt 500 S(B)"), als warmgewalzter und gerippter Stabstahl mit Durchmessern von 6, 8, 10, 12, 14, 16, 20, 25, 28, 32 und 40 mm und Lieferlängen bis 18 m (als Sonderwalzung bis 24 m). In der Schweiz sind die Durchmesser 6, 8, 10, 12, 14, 16, 18, 20, 22, 26, 30, 34 und 40 mm lieferbar. Betonstahlmatten B500A und B500B (nach DIN 488) (früher "BSt 500 M(A) und (B)"), in verschiedenen Varianten, als fertig verschweißte Matten aus geripptem und profiliertem sowie kaltverformtem Stabstahl (Duktilitätsklasse A) oder warmgewalztem Betonstahl (Duktilitätsklasse B) mit Durchmessern von 6 mm bis 14 mm (14 mm nur in hochduktiler Ausführung, 6 bis 12 mm in normalduktiler oder hochduktiler Ausführung (Betonstahlmatten in Duktilitätsklasse B (hochduktil) werden nur auf Anforderung gefertigt, keine Lagerhaltung). Betonstahl in Ringen B500B (nach DIN 488) (früher "BSt 500 S(B)") warmgewalzt, im Durchmesserbereich von 6 bis 16 mm und bzw. B500A (nach DIN 488) (früher "BSt 500 S(A)") kaltgerippt, im Durchmesserbereich von 6 bis 12 mm für die Weiterverarbeitung auf Richt- und Schneide- oder Bügelbiegeautomaten. Bewehrungsdraht B500A+G glatt oder B500A+P mit flacher Profilierung (nach DIN 488) im Durchmesserbereich von 4 bis 12 mm für das Bewehren von z. B. Stahlbetonrohren oder Porenbeton-(früher Gasbeton-) oder Schachtbauteilen. Gitterträger als biegesteife Bewehrung bei Halbfertigteildecken und -wänden Die modernen Betonstähle sind bezüglich ihrer Verformungseigenschaften durch einen Elastizitätsmodul von 200.000 bis 210.000 N/mm² und die Einteilung in Duktilitätsklassen gekennzeichnet. In Deutschland gibt es die normalduktile Klasse A für die kaltverformten Stähle mit einem Verhältnis zwischen Zugfestigkeit und Fließgrenze von mindestens 1,05 und einer Stahldehnung unter Höchstlast von mindestens 2,5 % sowie die hochduktile Klasse B für die warmverformten Stähle mit mindestens 1,08 bzw. 5 %. Daneben muss der hochduktile Erdbebenstahl Klasse C mit einem Verhältnis zwischen Zugfestigkeit und Fließgrenze von mindestens 1,15 und einer Stahldehnung unter Höchstlast von mindestens 8 % erwähnt werden, der in Teilen Europas verwendet wird und eine reduzierte Fließgrenze von 450 N/mm² besitzt. Der Wärmeausdehnungskoeffizient für Stahl ist im Mittel wie bei Beton [1/K], die Wärmeleitfähigkeit mit 50 [W/(m · K)] unterscheidet sich dagegen von Beton. Die heutigen Betonstähle sind alle schweißgeeignet. Eine wichtige Eigenschaft des Betonstahls ist dessen Verbund mit dem umgebenden Beton. Zur Verbesserung des Verbunds werden Rippen aufgerollt oder aufgewalzt. Die Rippen haben eine maximale Höhe von 4,5 % und einen Abstand von 60 % des Stabdurchmessers. Durch die Rippen wird eine lokale Verzahnung zwischen dem Beton und dem Stahl erreicht, was eine optimale Kraftübertragung über eine kurze Verbundlänge ermöglicht. Korrosionsschutz Mögliche Ursachen für die Korrosion des Bewehrungsstahls sind Fehlstellen im Beton durch Risse, Kiesnester oder unzureichende Betonüberdeckung, die das Einwirken von Chloriden durch Tausalzbelastung oder Meeresatmosphäre ermöglichen. Der im Beton enthaltene Zementstein schützt den Bewehrungsstahl durch sein alkalisches Milieu mit einem pH-Wert von 12–14 vor Korrosion. Bei einem Wert < 10 ist dieser Schutz, die sogenannte Passivierung, nicht mehr sichergestellt. Da der Beton mit der Zeit von außen nach innen karbonatisiert (Reaktion des im Beton enthaltenen alkalischen Kalkhydrats mit Luft-Kohlendioxid zu Kalkstein (Ca(OH)2 + CO2 → CaCO3 + H2O)) und dabei in seinem pH-Wert absinkt, muss der Bewehrungsstahl vollständig und mit einer ausreichenden Betondeckung umschlossen sein. Das Zusammenbinden der einzelnen Bewehrungselemente mittels Draht (Rödeln) und der Einbau von Abstandshaltern (z. B. aufgeklemmte Räder aus Kunststoff oder Blöckchen aus Beton) zwischen Schalung und Bewehrung gewährleistet, dass der Bewehrungsstahl an der voraus geplanten Position im Bauteil mit ausreichender Betondeckung liegt. Für einen verbesserten Korrosionsschutz oder als Schutz vor Rostläufern bei dünnwandigen Sichtbetonbauteilen kann Betonstahl feuerverzinkt oder mit Epoxid beschichtet werden. Nichtrostender Stahl ist eine weitere Möglichkeit. Relativ neu ist die Glasfaser-Bewehrung. Für kleinere Querschnitte kommen zusätzlich textile Bewehrungen, insbesondere aus Glasfasergelegen, zur Anwendung. Zum Schutz gegen Korrosion des Bewehrungsstahles infolge Karbonatisierung oder Chlorideindringung kann auch ein Kathodischer Korrosionsschutz mit einer Fremdstromanode, die über Gleichrichter mit einem Schutzstrom (eigentlich nur eine Polarisierung) gesteuert werden, angewendet werden. Dies kann beispielsweise im Brückenbau zur Anwendung kommen. Alle Varianten unterliegen in Deutschland der Bauaufsicht. Das heißt, abweichend von der Norm hergestellte Tragwerke benötigen eine bauaufsichtliche Zulassung für die eingesetzten Komponenten oder eine Zustimmung im Einzelfall für das spezielle Bauvorhaben durch die Landesbaubehörde. Eine Liste bauaufsichtlich zugelassener Bewehrungselemente führt das Deutsche Institut für Bautechnik. Oberflächenschutzsysteme, wie die Imprägnierung der Betonoberflächen mit einem Hydrophobierungsmittel oder das Aufbringen von Beschichtungen, dienen ebenfalls dem verbesserten Korrosionsschutz des Betonstahls, insbesondere, wenn der Beton bereits bis zur Tiefe des Stahls karbonatisiert ist (z. B. im Zuge der Instandsetzung). Kennzeichnung Betonstabstahl Die heutigen Betonstabstähle weisen zwei Rippenflächen auf. Eine der Flächen kennzeichnet durch eine besondere Anordnung der Schrägrippen die Betonstahlsorte. Die andere Fläche trägt die Kennzeichen des produzierenden Werkes, welche einander mindestens auf jedem laufenden Meter folgen. Diese beginnen mit zwei verbreiterten Schrägrippen, es folgt jeweils zwischen verbreiterten Schrägrippen zunächst das Land und anschließend das betreffende Werk. Das Feld für das Werk kann in Zehner- und Einerstellen unterteilt sein. Anmerkungen: Es gibt sowohl in der Ländergruppe 8 wie in der 9 polnische, türkische und moldawische Betonstabstahlhersteller. Weitere Bewehrungselemente Gewindestahl ist ein Beton- oder Spannstahl mit Schrägrippen, die gewindeartig ausgebildet sind und von verschiedenen Unternehmen hergestellt wird. Er wurde für die Bewehrungstechnik entwickelt, um damit eine mechanische Verbindung über Schraubmuffen zu ermöglichen. Heute wird der Gewindestahl zusätzlich in der Geotechnik als Verpresspfahl und Erdanker eingesetzt. Als Ersatz für Querkraftbewehrung gibt es Doppelkopfanker und Dübelleisten. In Bauteilen mit besonderen Anforderungen an Korrosion, Zerspanung oder elektrisch/magnetische Eigenschaften kann auch zugelassene nichtmetallische GFK-Bewehrung zum Einsatz kommen. Geschichte Erfinder der Eisenbewehrung war der Franzose Joseph Monier, nach ihm nennt man die Bewehrung (Monierung) auch Moniereisen (im Bau-Jargon häufig wie das Verb "monieren" ausgesprochen). Monier war Gärtner und ärgerte sich, dass die Pflanzkästen aus Beton für die transportablen Orangenbäumchen in den von ihm betreuten herrschaftlichen Gärten zu oft brachen. Andere ältere, aber auch heute noch gebräuchliche Bezeichnungen sind Armierungsstahl (im Gegensatz zu Konstruktionsstahl) oder Schlaffstahl (im Gegensatz zu Spannstahl). Durch Torsion kaltverformte Bewehrungsstähle weisen eine erhöhte Festigkeit auf. Sie werden als TOR-Stahl bezeichnet und waren lange Zeit durch eine verdrillte Längsrippe aus den Walzüberständen gekennzeichnet, die sich günstig auf den Verbund mit dem Beton auswirkt. TOR-Stahl wurde vom Österreicher Rudolf Schmidt im Jahr 1936/1937 erfunden. Die Bezeichnung ist in Österreich noch gebräuchlich. Bewehrungsstahl wird heute zum Erreichen der genormten mechanischen Eigenschaften meist über das in den 1970er Jahren entwickelte Tempcore-Verfahren direkt aus der Walzhitze gehärtet oder als Draht kaltgereckt. Seltener wird auch nur über Legierungen die Festigkeit erreicht. Die in Stahlbetonbauteile einzubauende Bewehrung wird auf Zeichnungen (Verlegeplänen) bezüglich Anzahl, Durchmesser, Form und Lage dargestellt und vermaßt. Für die Bestellung der Bewehrung können auch separate Stahllisten erstellt werden. Entwicklung in Deutschland Bis Mitte der 1930er-Jahre wurden keine speziellen Betonstähle als Bewehrung verwendet, sondern Stäbe, Flacheisen und Profile mit einer glatten Oberfläche und einer Streckgrenze um oder über 250 N/mm². Die Aktivierung der Tragfähigkeit des glatten Stahls erfolgte dabei weniger durch den Verbund zwischen Beton und Stahl als vor allem durch die Verankerung mit Haken und Schlaufe. Der Isteg-Stahl, bestehend aus zwei Drähten aus glattem Baustahl, die zu einer 2-drähtigen Litze verseilt wurden, war ab 1933 der erste spezielle deutsche Betonstahl mit verbesserten Verbundeigenschaften. Zur gleichen Zeit wurde in Deutschland das Baustahlgewebe zugelassen, bestehend aus Matten oder Rollen (bis 6 mm). Ab 1935 wurden zwecks Materialersparnis durch Verwinden (Tordieren) von Rundstählen hochfeste Betonstähle entwickelt, anfangs ohne Querrippen. 1937 wurden die Bewehrungsstähle in Gruppen eingeteilt. Die Gruppe I umfasste den BSt 22/34 mit einer Mindeststreckgrenze von 220 N/mm², die Gruppe II den BSt 34/50 mit einer Mindeststreckgrenze von 340 N/mm², die Gruppe III den BSt 42/50 mit einer Mindeststreckgrenze von 420 N/mm², und die Gruppe IV entspricht den heutigen Betonstählen. Bei Nachrechnungen oder Verstärkungen alter Bauwerke sind die Festigkeiten der alten Stahlsorten in statischen Berechnungen zu berücksichtigen. Ab 1959 wurde der hochwertige schräg gerippte Rippentorstahl als Betonstahl IIIb bauaufsichtlich zugelassen. Dieser wurde festigkeitssteigernd zusätzlich noch (im Werk) durch Verdrehen (Tordieren von Torsion – daher das "Tor" im Namen) kaltverformt. Die heutige Rippenform wurde schließlich ab 1961 für eine bessere Dauerschwingfestigkeit des Betonstahls IV entwickelt. Literatur Dieter Rußwurm: Betonstähle für den Stahlbetonbau. 1993, ISBN 3-7625-3078-5. Peter Bindseil: Betonstähle – vom Beginn des Stahlbetonbaus bis zur Gegenwart. Bauwesen, Berlin 2002, ISBN 3-345-00803-3. Hansgerd Kämpfe: Bewehrungstechnik – Grundlagen-Praxis-Beispiele-Wirtschaftlichkeit. Vieweg und Teubner, Wiesbaden 2010, ISBN 978-3-8348-0767-0. Weblinks Website – Institut für Stahlbetonbewehrung Website der International Rebar Producers and Exporters Association (IREPAS) Allgemeine Bauaufsichtliche Zulassung und Bauartgenehmigung für feuerverzinkte Betonstähle. (PDF; 2 MB) Einzelnachweise Stahl Baustahl
{ "redpajama_set_name": "RedPajamaWikipedia" }
4,863
Q: install4J, windows 2008R2 and java 6 permission issues We are using install4j to install our application on Windows 2008R2. With Java 1.4 installed the install4j effort works fine. with Java 6 installed the install4j effort results in permission issues with C:\Program Files(x86). Can anyone comment on the different permissions Windows 2008 might require for an install4j executable when using java 6 compared to java 1.4? FYI Windows 2008 64bit, java 32bit, install4j 32bit thank you A: There are no differences in permission handling between Java 1.4 and Java 6. Permissions in install4j are handled by the "Request privileges" action which is typically located in the "Startup" node of your installer. For installing into C:\Program Files (x86), you need elevated privileges. If your installer works with Java 1.4 and does not work with Java 6, there must be some other error. I would suggest to look at the file .install4j/installation.log for more information.
{ "redpajama_set_name": "RedPajamaStackExchange" }
3,045
{"url":"http:\/\/get-software.net\/help\/Catalogue\/entries\/fullpict.html","text":"Full page pictures\n\nThe package provides picture-mode environments whose size is related to the size of the page. The fullpict environment takes arguments to specify the dimension of the picture in picture mode \u2018unit lengths\u2019; the value for \\unitlength is defined by the fact that the picture is to be 90% of \\textwidth. Similar environments halfpict (half the width of fullpict), and scalepict (a given percentage of the width of fullpict), are also defined.\n\nThe author is Bruce Shawyer.\n\nLicense: noinfo Version dated: 2003-10-31 Catalogued: 2015-08-03","date":"2016-05-28 13:46:02","metadata":"{\"extraction_info\": {\"found_math\": false, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.9514544606208801, \"perplexity\": 4867.305243713843}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2016-22\/segments\/1464049277807.82\/warc\/CC-MAIN-20160524002117-00164-ip-10-185-217-139.ec2.internal.warc.gz\"}"}
null
null
{"url":"https:\/\/docs.dgl.ai\/generated\/dgl.function.u_mul_v.html","text":"dgl.function.u_mul_v\u00b6\n\ndgl.function.u_mul_v(lhs_field, rhs_field, out)\n\nBuiltin message function that computes a message on an edge by performing element-wise mul between features of src and dst if the features have the same shape; otherwise, it first broadcasts the features to a new shape and performs the element-wise operation.\n\n>>> import dgl","date":"2020-06-05 12:59:07","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.34782665967941284, \"perplexity\": 5442.744538780779}, \"config\": {\"markdown_headings\": false, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2020-24\/segments\/1590348500712.83\/warc\/CC-MAIN-20200605111910-20200605141910-00258.warc.gz\"}"}
null
null
Q: How to show a window from a PyQt5 app already running? self.show() I have an app with several windows already working, but I don't really understand the concept. I have a window with a QPushButton: self.ui.btn.clicked.connect(self.btn_clicked) And this code below, which works very nicely if used alone. (example to show a graph) import matplotlib matplotlib.use('Qt5Agg') import matplotlib.pyplot as plt from PyQt5 import QtWidgets from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar class ScrollableWindow(QtWidgets.QMainWindow): def __init__(self, fig): self.qapp = QtWidgets.QApplication([]) QtWidgets.QMainWindow.__init__(self) self.widget = QtWidgets.QWidget() self.setCentralWidget(self.widget) self.widget.setLayout(QtWidgets.QVBoxLayout()) self.widget.layout().setContentsMargins(0,0,0,0) self.widget.layout().setSpacing(0) self.fig = fig self.canvas = FigureCanvas(self.fig) self.canvas.draw() self.scroll = QtWidgets.QScrollArea(self.widget) self.scroll.setWidget(self.canvas) self.nav = NavigationToolbar(self.canvas, self.widget) self.widget.layout().addWidget(self.nav) self.widget.layout().addWidget(self.scroll) self.show() exit(self.qapp.exec_()) # create a figure and some subplots fig, axes = plt.subplots(ncols=4, nrows=5, figsize=(16,16)) for ax in axes.flatten(): ax.plot([2,3,5,1]) # pass the figure to the custom window a = ScrollableWindow(fig) What I would like to do, is to put this code inside my function def btn_clicked(self): # here copy this nice code To show up the graph once I clicked on the button. But obviously, exit(self.qapp.exec_()) can't be used otherwise the common error is showing up: QCoreApplication::exec: The event loop is already running So I delete that line of code, but then it is not working either. Here I dont understand why. My app is already running, MainWindow is showing up normally, why isn't it possible to show some other window, without using exec_() self.show() should theoretically show up the graph isn't it? What am I doing wrong? Thanks everyone. To provide a very simple application example which could be used by everyone, I join this pyqt5 code with 1 PushButton. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(437, 239) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.btn = QtWidgets.QPushButton(self.centralwidget) self.btn.setGeometry(QtCore.QRect(140, 60, 151, 41)) self.btn.setObjectName("btn") MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.btn.setText(_translate("MainWindow", "PushButton")) self.btn.clicked.connect(self.btn_clicked) def btn_clicked(self): # here I would like to use the code from above to show a graph if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
{ "redpajama_set_name": "RedPajamaStackExchange" }
4,906
Q: Select into tables dynamically with variables I have some code to create tables based on a set of dates I define. Example, I have 5 dates, and they are aren't consecutive. For any of these dates, I want to create a table and I am currently using a Select into. I am having to do this 5 times, even though the only thing changing is the name of the new table created and the date. Is there a way to do this in an elegant way. I started writing some code, but I am struggling to get it to loop through all the dates I want. The way I have written it currently, I only works if I edit the date at the start. DECLARE @MyDate DATE; SET @MyDate = '2019-01-01'; SET @TableName = 'Table1'; SELECT * into @TableName FROM Original_Table WHERE Query_Date = @MyDate; A: Is this a one time thing or do you have to do this on a regular basis? If it's the first, than I would just do it and get it over with. If it's the latter, then I suspect something is very wrong with the way that system is designed - but assuming that can't be changed, you can create a stored procedure that will do this using dynamic SQL. Something like this can get you started: CREATE PROCEDURE dbo.CreateTableBasedOnDate ( @MyDate DATE, -- sysname is a system data type for identifiers: a non-nullable nvarchar(128). @TableName sysname ) AS -- 200 is long enough. Yes, I did the math. DECLARE @Sql nvarchar(200) = -- Note: I'm not convinced that quotename is enough to protect you from sql injection. -- you should be very careful with what user is allowed to execute this procedure. N'SELECT * into '+ QUOTENAME(@TableName) +N' FROM Original_Table WHERE Query_Date = @MyDate;'; -- When dealing with dynamic SQL, Print is your best friend. -- Remark this row and unremark the next only once you've verified you get the correct SQL PRINT @SQL; --EXEC sp_ExecuteSql @Sql, N'@MyDate Date', @MyDate GO Usage: EXEC CreateTableBasedOnDate '2018-01-01', 'zohar'; A: Use dynamic SQL: DECLARE @MyDate DATE, @TableName varchar(50); SET @MyDate = '2019-01-01'; SET @TableName = 'Table1'; DECLARE @sql NVARCHAR(4000); DECLARE @params NVARCHAR(4000); SELECT @sql=N' SELECT * INTO ' + QUOTENAME(@TableName) + ' FROM Original_Table WHERE Query_Date = @MyDate;'; SELECT @params = N'@MyDate DATE'; EXEC sys.sp_executesql @sql, @params, @MyDate=@MyDate Note that dynamic SQL can be dangerous as it opens up a path for SQL injection. Its fine if you are just using it in your own local scripts, but take care if you e.g. wrap this in a procedure that is more widely accessible. A: I would use dynamic SQL although I would add another variables for the schema: DECLARE @MyDate nVarchar(50) = '2019-01-01', @Schema nVarchar (50) = 'dbo', @TableName nVarchar(250) = 'Table1', @SQL nVarchar(500); Set @SQL = ' SELECT * into '+ QUOTENAME(@Schema)+'.'+ QUOTENAME(@TableName) +' FROM Original_Table WHERE Query_Date = '+ @MyDate +'; ' --print @SQL Exec(@SQL) You can use the print statement to see how the SQL will look before executing this properly. You may also want to look at adding this as a stored procedure.
{ "redpajama_set_name": "RedPajamaStackExchange" }
2,030
Students are encouraged to contact the Career Center early in their academic careers in order to utilize the services available in the development of their long-range planning. The staff assists students with career counseling through evaluation and testing in the areas of skills analysis, interest identification, and values clarification. Computer-assisted career information searches and a Career Resource Library are also available. The Career Center provides pre-employment preparation assistance through videotaped mock interviews and workshops on r�sum� writing, interviewing skills, and conducting an effective job search. Representatives of business, government, industry, education, and social agencies recruit UTD students at Career Expos and on-campus interviews. The office maintains daily updated on-line job listings for part-time, on-campus, and full-time positions with public and private employers. The Career Center also manages the Internship/Cooperative Education program. All students register with the Career Center by completing an online registration profile through the UTD CareerWorks system. The student uploads a resume into the system, which can, in turn, be referred to employers. Employers also have access to candidate r�sum�s via R�sum� Books in the UTD CareerWorks system. For more information, contact the Career Center in McDermott Library 1.312. (Phone: 972-883-2943, Web: www.utdallas.edu/student/career, Email: [email protected]).
{ "redpajama_set_name": "RedPajamaC4" }
5,459
OKAY, i haven't drawn anything for about a months time now, so im not entirely sure of how this looks like. Looks like a bunch of copy/pasted stuff at closer look. But... I don't know how to draw on the PC, so I wouldn't know. HAHA, thanks, it's actually all from scratch, but my tablet is really giving me a hard time.
{ "redpajama_set_name": "RedPajamaC4" }
2,406
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta name="viewport" content="width=device-width"/> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> </head> <body> <img src="/image.jpg"/> </body> </html>
{ "redpajama_set_name": "RedPajamaGithub" }
2,020
{"url":"https:\/\/www.semanticscholar.org\/paper\/VERITAS-Observations-of-the-Vicinity-of-the-Cygnus-Weinstein\/c9a1a9a80046703b799405a6a40611f2274558c0","text":"\u2022 Corpus ID: 119181376\n\n# VERITAS Observations of the Vicinity of the Cygnus Cocoon\n\n@article{Weinstein2013VERITASOO,\ntitle={VERITAS Observations of the Vicinity of the Cygnus Cocoon},\nauthor={A. Weinstein},\njournal={arXiv: High Energy Astrophysical Phenomena},\nyear={2013}\n}\n\u2022 A. Weinstein\n\u2022 Published 9 March 2013\n\u2022 Physics\n\u2022 arXiv: High Energy Astrophysical Phenomena\nThe study of $\\gamma$-ray emission from galactic sources such as supernova remnants (SNR) may provide key insights into their potential role as accelerators of cosmic rays up to the knee ($\\sim 10^{15}$ eV). The VERITAS Observatory is sensitive to galactic and extragalactic $\\gamma$-ray sources in the 100 GeV to 30 TeV energy range. We report here on VERITAS observations of the vicinity of the cocoon of freshly accelerated cosmic rays reported by Fermi, which lies between potential accelerators\u2026\n1 Citations\n\n## Figures from this paper\n\n\u2022 Physics\n\u2022 2016\nThe investigation of VHE gamma-ray sources by any methods, including mirror Cherenkov telescopes, touches on the problem of the cosmic ray origin and, accordingly, the role of the Galaxy in their\n\n\u2022 ApJS,\n\u2022 2010","date":"2023-02-07 15:06:24","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.5814703106880188, \"perplexity\": 7583.7918195571265}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2023-06\/segments\/1674764500619.96\/warc\/CC-MAIN-20230207134453-20230207164453-00466.warc.gz\"}"}
null
null
\section{Introduction} In this paper we deal with an integro-differential equation of fractional order derived from the classical Peierls--Nabarro model for crystal dislocations. Specifically we will focus on the case in which the fractional order of the equation is low, which corresponds to a situation in which the long-range elastic interactions give the highest contribute to the energy. In this framework, we will describe the evolution of the atom dislocation function by showing that, for sufficiently long times and at a macroscopic scale, the dislocation function approaches the superposition of a finite number of dislocations. These individual dislocations have size equal to the characteristic period of the crystal and they occur at some specific points, which in turn evolve according to a repulsive potential and reacting elastically to the external stress. More precisely, we consider the problem \begin{equation}\label{Pp} v_t=L_s v-W'(v)+\sigma_\epsilon(t,x) \quad {\mbox{ in }} (0,+\infty)\times{\mathds R}, \end{equation} where~$s\in(0,1)$, $L_s$ is the so-called fractional Laplacian, and~$W$ is a $1$-periodic potential. More explicitly, given $\varphi\in C^2({\mathds R})\cap L^\infty({\mathds R})$ and $x\in{\mathds R}$, we define $$ L_s \varphi(x):=\frac{1}{2} \int_{\mathds R} \frac{\varphi(x+y)+\varphi(x-y)-2\varphi(x)}{|y|^{1+2s}}\,dy.$$ We refer to~\cite{SilvTH, DPV12} for a basic introduction to the fractional Laplace operator. As for the potential, we assume that \begin{equation}\label{Wass} \left\{ \begin{array}{lllll} W\in C^{3,\alpha}({\mathds R}), \quad {\mbox{ for some }} 0<\alpha<1, \\ W(x+1)=W(x) \quad {\mbox{ for any }}x\in{\mathds R}, \\ W(k)=0 \quad {\mbox{ for any }}k\in{\mathds Z}, \\ W>0 \quad {\mbox{ in }}{\mathds R}\setminus{\mathds Z}, \\ W''(0)>0. \end{array} \right. \end{equation} As customary,~$\epsilon>0$ is a small scale parameter, and $\sigma_{\epsilon}$ plays the role of an exterior stress acting on the material. We suppose that $$ \sigma_{\epsilon}(t,x):=\epsilon^{2s}\sigma(\epsilon^{1+2s}t,\epsilon x), $$ where~$\sigma$ is a bounded uniformly continuous function such that, for some~$\alpha\in(s,1)$ and~$M>0$, it holds \begin{equation}\begin{split}\label{sigma} &\|\sigma_x\|_{L^{\infty}([0,+\infty)\times{\mathds R})}+\|\sigma_t\|_{L^{\infty}([0,+\infty)\times{\mathds R})} \leq M, \\ &|\sigma_x(t,x+h)-\sigma_x(t,x)|\leq M|h|^{\alpha}, \quad {\mbox{ for every }} x,h\in{\mathds R} {\mbox{ and }} t\in[0,+\infty). \end{split}\end{equation} The problem in \eqref{Pp} arises in the classical Peierls--Nabarro model for atomic dislocation in crystals, see e.g.~\cite{Nab97} and references therein. In this paper, our main focus is on the fractional parameter range~$s\in(0,1/2)$, which corresponds to a strongly nonlocal elastic term, in which the energy contributions coming from far cannot be neglected and, in fact, may become predominant. We refer to~\cite{GM12} for the case~$s=1/2$ and to~\cite{DIPPV} for the case~$s\in(1/2,1)$. We define $$v_{\epsilon}(t,x):=v\left(\frac{t}{\epsilon^{1+2s}},\frac{x}{\epsilon}\right)$$ and we look at the equation satisfied by the rescaled function~$v_{\epsilon}$, that is, recalling \eqref{Pp}, \begin{eqnarray}\label{ACpar} \left\{ \begin{array}{ll} (v_{\epsilon})_t= \displaystyle\frac{1}{\epsilon}\biggl(L_s v_{\epsilon} - \displaystyle\frac{1}{\epsilon^{2s}}W'(v_{\epsilon})+\sigma(t,x)\biggr) \quad {\mbox{ in }}(0,+\infty)\times{\mathds R},\\ v_{\epsilon}(0,\cdot)=v_{\epsilon}^0 \quad {\mbox{ in }}{\mathds R}. \end{array} \right. \end{eqnarray} Following~\cite{PSV13, CSire}, we introduce the basic layer solution~$u\in C^{2,\alpha}({\mathds R})$ (here~$\alpha=\alpha(s) \in(0,1)$), that is, the solution of the problem \begin{eqnarray}\label{AC} \left\{ \begin{array}{ll} L_s u-W'(u)=0 \quad{\mbox{ in }}{\mathds R},\\ u'>0,\quad u(-\infty)=0, \quad u(0)=1/2, \quad u(+\infty)=1. \end{array} \right. \end{eqnarray} The name of layer solution is motivated by the fact that~$u$ approaches the limits~$0$ and~$1$ at~$\pm\infty$. More quantitatively, there exists a constant~$C\geq1$ such that \begin{equation}\label{phi} |u(x)-H(x)|\leq C|x|^{-2s} \quad {\mbox{ and }} \quad |u'(x)|\leq C|x|^{-(1+2s)}, \end{equation} where~$H$ is the Heaviside function, see Theorem~2 in~\cite{PSV13}. As a preliminary result, we will prove a finer asymptotic estimate on the decay of the layer solution: \begin{theorem}\label{TH-decay} Let $s \in (0,1/2)$. There exist constants~$C>0$ and $\vartheta>2s$ such that \begin{equation*} \left|u(x)-H(x)+\frac{1}{2s\,W''(0)}\frac{x}{|x|^{1+2s}}\right|\leq\frac{C}{|x|^{\vartheta} } \quad {\mbox{ for any }}x\in{\mathds R}, \end{equation*} with $\vartheta$ depending only on~$s$. \end{theorem} To state our next result, we recall that the semi-continuous envelopes of~$u$ are defined as $$ u^*(t,x):=\limsup_{(t',x')\rightarrow(t,x)}u(t',x') $$ and $$ u_*(t,x):=\liminf_{(t',x')\rightarrow(t,x)}u(t',x'). $$ Moreover, given~$x_1^0<x_2^0<\ldots<x_N^0$, we consider the solution~$\big(x_i(t)\big)_{i=1,\ldots,N}$ to the system \begin{eqnarray}\label{ODE2} \left\{ \begin{array}{ll} \dot{x_i} =\gamma\biggl(-\sigma(t,x_i)+ \displaystyle\sum_{j\neq i} \displaystyle\frac{x_i-x_j}{2s\,|x_i-x_j|^{2s+1}}\biggr) {\mbox{ in }}(0,+\infty), \\[4ex] x_i(0)=x_i^0, \\ \end{array} \right. \end{eqnarray} where \begin{equation}\label{gamma} \gamma=\left(\int_{{\mathds R}}(u')^2\right)^{\!-1}. \end{equation} For the existence and uniqueness of such solution see Section~8 in~\cite{FIM09}. We consider as initial condition in \eqref{ACpar} the state obtained by superposing $N$ copies of the transition layers, centered at $x_1^0,\dots,x_N^0$, that is \begin{equation}\label{initial} v_{\epsilon}^0(x)=\frac{\epsilon^{2s}}{\beta}\sigma(0,x)+ \sum_{i=1}^Nu\left(\frac{x-x_i^0}{\epsilon}\right), \end{equation} where \begin{equation}\label{beta} \beta:=W''(0)>0. \end{equation} The main result obtained in this framework is the following: \begin{theorem}\label{TH} Let $s\in (0,1/2)$, assume that~\eqref{Wass}, \eqref{sigma} and~\eqref{initial} hold, and let \begin{equation*} v_0(t,x)=\sum_{i=1}^N H(x-x_i(t)), \end{equation*} where~$H$ is the Heaviside function and~$(x_i(t))_{i=1,\ldots,N}$ is the solution to~\eqref{ODE2}. Then, for every~$\epsilon>0$ there exists a unique viscosity solution~$v_{\epsilon}$ to~\eqref{ACpar}. Furthermore, as~$\epsilon\rightarrow0$, the solution~$v_{\epsilon}$ exhibits the following asymptotic behavior: \begin{equation*} \limsup_{{(t',x')\rightarrow(t,x)}\atop{\epsilon\rightarrow0}} v_{\epsilon}(t',x')\leq(v_0)^*(t,x) \end{equation*} and \begin{equation*} \liminf_{{(t',x')\rightarrow(t,x)}\atop{\epsilon\rightarrow0}}v_{\epsilon}(t',x')\geq(v_0)_*(t,x) \end{equation*} for any~$t\in[0,+\infty)$ and~$x\in{\mathds R}$. \end{theorem} When $s=1/2$ the result above was proved in~\cite{GM12}, where it was also raised the question about what happens for other values of the parameter~$s$. In~\cite{DIPPV}, the result was extended to the case~$s\in(1/2,1)$. So the main purpose of this paper was to obtain the result for the remaining range of~$s\in(0,1/2)$. {F}rom the physical point of view, this range of parameters is important since it corresponds to the case of a strong nonlocal elastic effect: notice indeed that the lower the value of~$s$ the stronger become the energy contributions coming from far. We refer to~\cite{GM12, DIPPV} for a more exhaustive set of physical motivations and heuristic asymptotics of the model we study. We also remark that, differently from~\cite{GM12}, we do not make use of any harmonic extension results, that are specific for the fractional powers of the Laplacian, and so our proof is feasible for more general types of integro-differential equations. The cornerstone to prove Theorem~\ref{TH-decay} (and hence Theorem~\ref{TH}) is given by the following decay estimate at infinity, which we think has also independent interest: \begin{theorem}\label{DECAY} Let $s \in (0,1/2)$, and let~$v\in L^{\infty}({\mathds R})\cap C^2({\mathds R})$ such that \begin{equation} \label{va0} \lim_{x\rightarrow\pm\infty}v(x)=0.\end{equation} Suppose that there exists a function~$c\in L^{\infty}({\mathds R})$ such that $c(x)\geq\delta>0$ for any~$x\in{\mathds R}$ and for some~$\delta>0$, and \begin{equation}\label{eqM} -L_s v +cv = g, \end{equation} where~$g$ is a function that satisfies the following estimate \begin{equation}\label{Mest} |g(x)|\leq\frac{C}{1+|x|^{4s}} \quad {\mbox{ for any }}x\in{\mathds R}, \end{equation} for some constant~$C\geq0$. Then, there exist $\vartheta\in(2s,1+2s]$ depending only on $s$, and a constant~$\overline C\geq0$ depending on~$C$, $\delta$, $\|c\|_{L^\infty({\mathds R})}$, and~$s$, such that $$ |v(x)|\leq\frac{\overline C}{1+|x|^{\vartheta}} \quad {\mbox{ for any }}x\in{\mathds R}. $$ \end{theorem} In our setting, we will use Theorem \ref{DECAY} in the proof of Theorem~\ref{TH-decay} (there, the function $v$ in the statement of Theorem \ref{DECAY} will be embodied by the difference between the solution~$u$ of problem \eqref{AC} and a suitable heteroclinic solution of a model problem, so that in this case condition \eqref{va0} is automatically satisfied). The explicit value of the exponent~$\vartheta$ that appears in the statement of Theorem \ref{DECAY} will be given in formula~\eqref{SGAMMA}, but such explicit value will not play any role in this paper (the only relevant feature for us is that~$\vartheta>2s$). We think that it is an interesting open problem to determine the optimal value of the exponent~$\vartheta$ in a general setting. Theorem~\ref{DECAY} may be seen as the strongly nonlocal version of Corollary~5.13 in~\cite{GM12} and Corollary~7.1 in~\cite{DIPPV}, where similar decay estimates (with different exponents) where obtained when~$s=1/2$ and~$s\in(1/2,1)$, respectively. However, the techniques in~\cite{GM12, DIPPV} are not sufficient to obtain the desired decay estimates when~$s\in(0,1/2)$, so the proof of Theorem~\ref{DECAY} here will rely on completely different methods. Roughly speaking, we use suitable test functions in order to obtain an integral decay estimates (this will be accomplished in Proposition~\ref{PRTHM1}) and then we use barriers and sliding arguments to infer from it a pointwise estimate. Remarkably, differently from the classical case where pointwise estimates follow from integral ones using a suitable version of the weak Harnack inequality (see e.g. Theorem~4.8(2) in~\cite{CAFCAB}), in our case, to the best of our knowledge, the fractional analog of this weak Harnack inequality is not known. To overcome this difficulty, some careful estimates on the fractional Laplacian of a function below a barrier are employed (these estimates will be obtained in Corollary~\ref{ULCO}). The rest of the paper is organized as follows. The proof of Theorem~\ref{DECAY} is contained in Sections~\ref{summation}--\ref{P:T}. More precisely, we collect some preliminary elementary estimates in Section~\ref{summation}. Then, in Sections~\ref{IEAP} and~\ref{ICAI}, we estimate the fractional Laplacian of a function below a barrier by taking into account the contribution in a neighborhood of a given point and the contribution coming from infinity. An integral decay estimate is given in Section~\ref{D:S} and the proof of Theorem~\ref{DECAY} is completed in Section~\ref{P:T}. With this we have the basic technical tools to prove Theorem~\ref{TH-decay} in Section~\ref{E:TH}. Then, Sections~\ref{L infty}--\ref{csiufff} are devoted to the proof of Theorem~\ref{TH}. Namely, Section~\ref{L infty} collects some uniform bounds that are used in Section~\ref{7sddd} to construct the solution of a corrector equation and prove its regularity. With this, the proof of Theorem~\ref{TH} is completed in Section~\ref{csiufff}. \section{An auxiliary summation lemma}\label{summation} Here we present some technical summation estimates, to be used in the forthcoming Section~\ref{ICAI}. For the sake of generality, we prove the results in Sections \ref{summation}-\ref{D:S} in ${\mathds R}^n$, for any $s \in (0,1)$ and $n\ge 1$. \begin{lemma}\label{pl1} Let~$s \in (0,1)$, $x_0\in{\mathds R}^n$ such that~$|x_0|\ge 3$, and $\vartheta\in(0,n+2s]$. Then $$ \sum_{{k\in {\mathds Z}^n \setminus\{0\}}\atop{|x_0+k|\le |x_0|/2}} \frac{1}{|k|^{n+2s}\,(1+|x_0+k|)^\vartheta}\le \frac{C}{(1+|x_0|)^\vartheta},$$ for some $C>0$ depending on $n$, $s$ and $\vartheta$. \end{lemma} \begin{proof} If $|x_0+k|\le |x_0|/2$ then $|k|\ge |x_0|-|x_0+k|\ge |x_0|/2$, therefore \begin{equation}\label{9e1} \sum_{{k\in {\mathds Z}^n \setminus\{0\}}\atop{|x_0+k|\le |x_0|/2}} \frac{1}{|k|^{n+2s}\,(1+|x_0+k|)^\vartheta}\le \frac{2^{n+2s}}{|x_0|^{n+2s}} \sum_{{k\in {\mathds Z}^n \setminus\{0\}}\atop{|x_0+k|\le |x_0|/2}} \frac{1}{(1+|x_0+k|)^\vartheta}. \end{equation} Moreover, $$ \int_1^{|x_0|} \frac{\rho^{n-1}\,d\rho}{\rho^\vartheta}= Z(n,\vartheta, x_0), $$ where $$ Z(n,\vartheta,x_0):= \left\{ \begin{matrix} (n-\vartheta)^{-1} (|x_0|^{n-\vartheta}-1) & {\mbox{ if }} n>\vartheta, \\ \log |x_0| & {\mbox{ if }} n=\vartheta,\\ (\vartheta-n)^{-1} (1-|x_0|^{n-\vartheta}) & {\mbox{ if }} n<\vartheta. \end{matrix} \right.$$ In any case \begin{equation}\label{zeta} \frac{Z(n,\vartheta,x_0)}{|x_0|^{n+2s}}\le\frac{c_{n,\vartheta}}{|x_0|^\vartheta}, \end{equation} for some constant $c_{n,\vartheta}>0$ only depending on $n$ and $\vartheta$. Therefore \begin{eqnarray*} \sum_{{k\in {\mathds Z}^n \setminus\{0\}}\atop{|x_0+k|\le |x_0|/2}} \frac{1}{(1+|x_0+k|)^\vartheta} &\le& \int_{B_{|x_0|}(-x_0)} \frac{dx}{(1+|x+x_0|)^\vartheta} \\ &=& \omega_{n-1} \int_0^{|x_0|} \frac{\rho^{n-1}\,d\rho}{(1+\rho)^\vartheta} \\ &\le& \omega_{n-1} \left[\int_0^{1} \rho^{n-1}\,d\rho +\int_1^{|x_0|} \frac{\rho^{n-1}\,d\rho}{\rho^\vartheta}\right] \\ &=& \omega_{n-1} \left[\frac1n + Z(n,\vartheta,x_0) \right].\end{eqnarray*} This and~\eqref{9e1} give that $$ \sum_{{k\in {\mathds Z}^n \setminus\{0\}}\atop{|x_0+k|\le |x_0|/2}} \frac{1}{|k|^{n+2s}\,(1+|x_0+k|)^\vartheta} \le \frac{C_1\left(1+Z(n,\vartheta,x_0)\right)}{|x_0|^{\vartheta}}, $$ for some $C_1>0$. Then, the desired result follows from \eqref{zeta}. \end{proof} \begin{corollary}\label{pc1.1} Let~$s \in (0,1)$, $x_0\in{\mathds R}^n$ such that~$|x_0|\ge 3$, and $\vartheta\in(0,n+2s]$. Then $$ \sum_{k\in {\mathds Z}^n \setminus\{0\}} \frac{1}{|k|^{n+2s}\,(1+|x_0+k|)^\vartheta}\le \frac{C}{(1+|x_0|)^\vartheta},$$ for some $C>0$ depending on $n$, $s$ and $\vartheta$. \end{corollary} \begin{proof} Notice that $$ \sum_{{k\in {\mathds Z}^n \setminus\{0\}}\atop{|x_0+k|\ge |x_0|/2}} \frac{1}{|k|^{n+2s}\,(1+|x_0+k|)^\vartheta}\le \frac{1}{(1+|x_0|/2)^\vartheta}\sum_{k\in {\mathds Z}^n \setminus\{0\}} \frac{1}{|k|^{n+2s}}\le\frac{C_0}{(1+|x_0|)^\vartheta},$$ for some~$C_0>0$, and so the result follows from Lemma~\ref{pl1}. \end{proof} \section{Fractional Laplace computations I -- Integral estimates at a point}\label{IEAP} Here we estimate the local contribution of the fractional Laplacian of a function touched by above by a polynomial barrier. By local, we mean here the contribution coming from a neighborhood of a given point. The contribution coming from far will then be studied in Section~\ref{ICAI}. Though the main focus of this paper is the fractional parameter range~$s\in (0,1/2)$ the results presented hold true for any~$s\in(0,1)$. For this, it is convenient to recall the notation on singular integrals in the principal value sense, that is $$ {\rm{P.V.}}\, \int_{{\mathds R}^n} \frac{u(x+y)-u(x)}{|y|^{n+2s}}\,dy := \lim_{\rho\searrow0} \int_{{\mathds R}^n\setminus B_\rho} \frac{u(x+y)-u(x)}{|y|^{n+2s}}\,dy.$$ As a matter of fact, when~$s\in(0,1/2)$ the above notation may be dropped since the integrand is indeed Lebesgue summable and no cancellations are needed to make the integral convergent near the origin. With this notation, we can estimate the contribution in a given ball according to the following result: \begin{lemma}\label{Con-L1} Let~$s \in (0,1)$, $\vartheta>0$, $\epsilon\in(0,1)$, and $$ F_1(x):=\frac{1}{(1+|x|)^\vartheta}.$$ For any fixed~$M>0$ let~$F_M (x):=M F_1(x)$. Suppose that $u\in L^\infty({\mathds R}^n)\cap C^2({\mathds R}^n)$ satisfies \begin{eqnarray} && F_M(x_0)+\epsilon=u(x_0) \ {\mbox{ for some point }} x_0\in{\mathds R}^n, \label{S1}\\ && F_M (x)+\epsilon\geq u(x) \ {\mbox{ for every }} x\in{\mathds R}^n \label{S2}, \\ && \int_{B_1(x_0)} |u(\zeta)|\,d\zeta \leq \frac{C_0}{(1+|x_0|)^{\vartheta}} \label{S3} \end{eqnarray} for some~$C_0>0$. Then there exists~$M_0>0$, depending only on~$n$, $s$, $\|u\|_{L^\infty({\mathds R}^n)}$, $\vartheta$, and~$C_0$, such that if~$M\ge M_0$ then $$ {\rm{P.V.}}\,\int_{B_1} \frac{u(x_0+y)-u(x_0)}{|y|^{n+2s}}\,dy \le -\frac{M\,|B_1|}{10\,(1+|x_0|)^{\vartheta}}.$$ \end{lemma} \begin{proof} First of all we observe that, without loss of generality, we can suppose that \begin{equation}\label{x0ma} |x_0|>3. \end{equation} Indeed, if~$|x_0|\le 3$ we deduce from~\eqref{S1} that $$ \frac{M}{4^\vartheta}\le \frac{M}{(1+|x_0|)^\vartheta}= F_M(x_0)=u(x_0)-\epsilon \le \|u\|_{L^\infty({\mathds R}^n)}$$ that gives an upper bound on~$M$ which would be violated by choosing~$M_0$ large enough. {F}rom~\eqref{x0ma}, we have that \begin{equation}\label{yma} {\mbox{for any $y\in B_1$, $|x_0+y|\ge |x_0|-|y|\ge |x_0|/2$.}} \end{equation} Now we define \begin{eqnarray*} D_1 &:=& \left\{ y\in B_1 {\mbox{ s.t. }} |u(x_0+y)| \ge \frac{M}{2\,(1+|x_0|)^\vartheta} \right\},\\ D_2 &:=& \left\{ y\in B_1 {\mbox{ s.t. }} |u(x_0+y)| < \frac{M}{2\,(1+|x_0|)^\vartheta} \right\}. \end{eqnarray*} Then, by~\eqref{S3}, $$ \frac{C_0}{(1+|x_0|)^{\vartheta}} \ge \int_{D_1} |u(x_0+y)|\,dy \ge \frac{M\,|D_1|}{2\,(1+|x_0|)^\vartheta}.$$ Hence \begin{equation}\label{mis D1} |D_1| \le \frac{2C_0}{M} \end{equation} and, as a consequence, if~$M$ is large enough, \begin{equation}\label{mis D2} |D_2| \ge |B_1|-|D_1| \ge \frac{9\,|B_1|}{10}. \end{equation} Now we define \begin{eqnarray*} r_0 &:=& \left( \frac{(1+|x_0|)^2}{M}\right)^{1/(n+2)}, \\ D_3 &:=& D_1\cap B_{r_0}, \\ D_4 &:=& D_1\setminus B_{r_0}. \end{eqnarray*} If~$y\in D_3$ we use~\eqref{S1}, \eqref{S2} and a Taylor expansion of~$F_1$ to obtain that \begin{eqnarray*} u(x_0+y)-u(x_0) &\le& M \Big( F_1(x_0+y) -F_1(x_0) \Big)\\ &\le& M \nabla F_1(x_0)\cdot y+ M \sup_{\xi\in B_1} |D^2 F_1(x_0+\xi)|\,|y|^2.\end{eqnarray*} Notice that $$ |\partial^2_{x_i,x_j}F_1(x)| \le \frac{2\vartheta}{(1+|x|)^{\vartheta+1}\,|x|}+ \frac{\vartheta\,(\vartheta+1)}{(1+|x|)^{\vartheta+2}}$$ and so, by~\eqref{x0ma} and~\eqref{yma}, $$ \sup_{\xi\in B_1} |D^2 F_1(x_0+\xi)| \le \frac{C_1}{(1+|x_0|)^{\vartheta+2}},$$ for some~$C_1>0$. Therefore, for any~$y\in D_3$, $$ u(x_0+y)-u(x_0) \le M\nabla F_1(x_0)\cdot y+ \frac{C_1\, M\,|y|^2}{(1+|x_0|)^{\vartheta+2}}$$ and so, since the odd term vanishes in the principal value integral, \begin{equation}\label{D3 est} \begin{split} {\rm{P.V.}}\, \int_{D_3} \frac{ u(x_0+y)-u(x_0) }{|y|^{n+2s}}\,dy\, &\le \frac{C_1\, M}{(1+|x_0|)^{\vartheta+2}} \int_{D_3} |y|^{2-n-2s}\,dy \\ &\le \frac{C_1\, M}{(1+|x_0|)^{\vartheta+2}} \int_{B_{r_0}} |y|^{2-n-2s}\,dy \\ &= \frac{C_2\, M\, r_0^{2-2s}}{(1+|x_0|)^{\vartheta+2}}. \end{split} \end{equation} Moreover, by~\eqref{S1}, \eqref{S2}, and~\eqref{yma}, we have that, if~$y\in D_4$, \begin{eqnarray*} \frac{u(x_0+y)-u(x_0)}{|y|^{n+2s}} &\le& \frac{F_M(x_0+y)-F_M(x_0)}{|y|^{n+2s}} \\ &\le& \frac{F_M(x_0+y)}{|y|^{n+2s}} \\ &\le& \frac{M}{r_0^{n+2s} (1+|x_0+y|)^\vartheta} \\ &\le& \frac{2^\vartheta\, M}{r_0^{n+2s} (1+|x_0|)^\vartheta} .\end{eqnarray*} Accordingly, making use of~\eqref{mis D1}, we conclude that \begin{equation}\label{D4 est} \begin{split} {\rm{P.V.}}\,\int_{D_4} \frac{ u(x_0+y)-u(x_0) }{|y|^{n+2s}}\,dy\, &\le \frac{2^\vartheta\, M\,|D_4|}{r_0^{n+2s} (1+|x_0|)^\vartheta} \\ &\le\frac{2^\vartheta\, M\,|D_1|}{r_0^{n+2s} (1+|x_0|)^\vartheta} \\ &\le\frac{C_3}{r_0^{n+2s} (1+|x_0|)^\vartheta}. \end{split} \end{equation} for some $C_3>0$. Thus, by~\eqref{D3 est} and~\eqref{D4 est}, we obtain \begin{equation}\label{D1 est} \begin{split} {\rm{P.V.}}\,\int_{D_1} \frac{ u(x_0+y)-u(x_0) }{|y|^{n+2s}}\,dy\, &\le \frac{C_2\, M\, r_0^{2-2s}}{(1+|x_0|)^{\vartheta+2}}+ \frac{C_3}{r_0^{n+2s} (1+|x_0|)^\vartheta}\\ &\le\frac{C_4\, M^\beta}{(1+|x_0|)^{\vartheta+2\beta}} \end{split}\end{equation} for a suitable~$C_4>0$, where \begin{equation}\label{beta def} \beta:=\frac{n+2s}{n+2}\in (0,1). \end{equation} This completes the estimate of the contribution in~$D_1$. Now we estimate the contribution in~$D_2$. For this, we notice that, if~$y\in D_2$, then $$ u(x_0+y)-u(x_0)=u(x_0+y)-\frac{M}{(1+|x_0|)^\vartheta}-\epsilon \le -\frac{M}{2\,(1+|x_0|)^\vartheta}$$ and therefore \begin{equation}\label{D2 est} \begin{split} {\rm{P.V.}}\,\int_{D_2} \frac{ u(x_0+y)-u(x_0) }{|y|^{n+2s}}\,dy\, &\le -\frac{M}{2\,(1+|x_0|)^\vartheta} \int_{D_2} \frac{ dy}{|y|^{n+2s}} \\ &\le -\frac{M}{2\,(1+|x_0|)^\vartheta} \int_{D_2} \,dy\\ &\le -\frac{9 M\,|B_1|}{20\,(1+|x_0|)^\vartheta},\end{split} \end{equation} thanks to~\eqref{mis D2}. By collecting the estimates in \eqref{D1 est} and~\eqref{D2 est}, we obtain that \begin{eqnarray*} {\rm{P.V.}}\,\int_{B_1} \frac{ u(x_0+y)-u(x_0) }{|y|^{n+2s}}\,dy &\le&\frac{C_4\, M^\beta}{(1+|x_0|)^{\vartheta+2\beta}} -\frac{9 M\,|B_1|}{20\,(1+|x_0|)^\vartheta} \\ &=& -\frac{9 M\,|B_1|}{20\,(1+|x_0|)^\vartheta} \left( 1 - \frac{C_5}{M^{1-\beta}\,(1+|x_0|)^{2\beta}} \right) \\ &\le& -\frac{9 M\,|B_1|}{20\,(1+|x_0|)^\vartheta} \left( 1 - \frac{C_5}{M^{1-\beta}} \right) \end{eqnarray*} for some~$C_5>0$. So, since~$\beta\in(0,1)$ due to~\eqref{beta def}, for~$M$ large we obtain the desired result. \end{proof} \section{Fractional Laplace computations II -- Integral estimates at infinity}\label{ICAI} This is the counterpart of Section~\ref{IEAP}, since here we study the contribution coming from infinity of the fractional Laplacian of a function touched by above by a polynomial barrier (since the singularity of the integral only occur at the origin, we do not need to use the principal value notation for such contribution). \begin{lemma}\label{Con-L2} Let $s\in (0,1)$,~$\vartheta\in(0,n+2s]$, $\epsilon\in(0,1)$, and $$ F_1(x):=\frac{1}{(1+|x|)^\vartheta}.$$ For any fixed~$M>0$ let~$F_M (x):=M F_1(x)$. Suppose that $u\in L^\infty({\mathds R}^n)\cap C^2({\mathds R}^n)$ satisfies \begin{eqnarray} && F_M(x_0)+\epsilon=u(x_0) \ {\mbox{ for some point }} x_0\in{\mathds R}^n, \label{SS1}\\ && F_M (x)+\epsilon\geq u(x) \ {\mbox{ for every }} x\in{\mathds R}^n \label{SS2} \\ && \int_{B_1(x)} |u(\zeta)|\,d\zeta \leq \frac{C_0}{(1+|x|)^{\vartheta}} \ {\mbox{ for every }} x\in{\mathds R}^n\label{S3S} \end{eqnarray} for some~$C_0>0$. Then there exists~$M_0>0$, depending only on~$n$, $s$, $\|u\|_{L^\infty({\mathds R}^n)}$, $\vartheta$, and~$C_0$, such that if~$M\ge M_0$ then $$ \int_{{\mathds R}^n\setminus B_1} \frac{u(x_0+y)-u(x_0)}{|y|^{n+2s}}\,dy \le \frac{M\,|B_1|}{20\,(1+|x_0|)^{\vartheta}}.$$ \end{lemma} \begin{proof} We notice that $$ u(x_0+y)-u(x_0) = u(x_0+y) - F_M(x_0)-\epsilon \le u(x_0+y)-\epsilon\le \big( u(x_0+y)-\epsilon\big)^+.$$ Also, the cube centered at zero with side~$1/\sqrt{n}$ lies inside the unit ball, namely~$Q_{1/\sqrt{n}}\subset B_1$. Therefore \begin{equation}\label{0o1} \int_{{\mathds R}^n\setminus B_1} \frac{u(x_0+y)-u(x_0)}{|y|^{n+2s}}\,dy\le \int_{{\mathds R}^n\setminus Q_{1/\sqrt{n}}} \frac{\big(u(x_0+y)-\epsilon\big)^+ }{|y|^{n+2s}}\,dy.\end{equation} Now we cover ${\mathds R}^n\setminus Q_{1/\sqrt{n}}$ with cubes of side~$1/(8n\sqrt{n})$ centered at points of a sublattice~${\mathcal{Z}}$ (roughly speaking, this sublattice is just a scaling of~${\mathds Z}^n$ by a factor~$1/(8n\sqrt{n})$, outside~$Q_{1/\sqrt{n}}$). In this way, \begin{equation}\label{0.9.5} {\mbox{if $k\in {\mathcal{Z}}$, then~$|k|\ge \displaystyle\frac{1}{2\sqrt{n}}$.}}\end{equation} Therefore \begin{equation}\label{09.1.1} {\mbox{if $k\in {\mathcal{Z}}$ and $y\in Q_{1/(8n\sqrt{n})}(k)$ then~$|y|\ge |k|-|y-k|\ge \displaystyle\frac{|k|}{2}+ \displaystyle\frac{1}{4\sqrt{n}}-\displaystyle\frac{1}{8n} \ge \frac{|k|}{2}$.}}\end{equation} Moreover, \begin{equation}\label{09.1.2}\begin{split} &{\mbox{if $k\in {\mathcal{Z}}$ and $y\in Q_{1/(8n\sqrt{n})}(k)$ then}}\\ &1+|x_0+y|\ge 1+|x_0+k|-|y-k|\ge 1+|x_0+k|- \displaystyle\frac{1}{8n}\ge\displaystyle\frac{1}{2}\big( 1+|x_0+k|\big).\end{split}\end{equation} Now we observe that, from~\eqref{0o1}, \begin{equation}\label{0.9.4} \int_{{\mathds R}^n\setminus B_1} \frac{u(x_0+y)-u(x_0)}{|y|^{n+2s}}\,dy \le \sum_{k\in {\mathcal{Z}}} \int_{Q_{1/(8n\sqrt{n})}(k)} \frac{\big( u(x_0+y)-\epsilon\big)^+ }{|y|^{n+2s}}\,dy.\end{equation} We define \begin{eqnarray*} D_1(k) &:=& \left\{ y\in Q_{1/(8n\sqrt{n})}(k) {\mbox{ s.t. }} |u(x_0+y)| \ge \frac{\sqrt{M}}{(1+|x_0+k|)^\vartheta} \right\}, \\ D_2(k) &:=& \left\{ y\in Q_{1/(8n\sqrt{n})}(k) {\mbox{ s.t. }} |u(x_0+y)| < \frac{\sqrt{M}}{(1+|x_0+k|)^\vartheta} \right\}.\end{eqnarray*} Then, from~\eqref{S3S}, \begin{eqnarray*} \frac{C_0}{(1+|x_0+k|)^{\vartheta}} &\ge& \int_{B_1(x_0+k)} |u(\zeta)|\,d\zeta \\ &\ge& \int_{Q_{1/(8n\sqrt{n})}(x_0+k)} |u(\zeta)|\,d\zeta \\ &\ge& \int_{D_1(k)} |u(x_0+y)|\,dy \\ &\ge& \frac{\sqrt{M}\,|D_1(k)|}{(1+|x_0+k|)^\vartheta} \end{eqnarray*} and so $$ |D_1(k)|\le \frac{C_0}{\sqrt{M}}.$$ Consequently, using~\eqref{SS2}, \eqref{09.1.1} and~\eqref{09.1.2}, we see that \begin{equation}\label{0.9.0.11} \begin{split} \int_{D_1(k)} \frac{ \big( u(x_0+y)-\epsilon\big)^+ }{|y|^{n+2s}}\,dy\,&\le \int_{D_1(k)} \frac{F_M(x_0+y)}{|y|^{n+2s}}\,dy \\ &\le \int_{D_1(k)} \frac{C_1\,M}{(1+|x_0+k|)^\vartheta\,|k|^{n+2s}}\,dy \\ &=\frac{C_1\,M\,|D_1(k)|}{(1+|x_0+k|)^\vartheta\,|k|^{n+2s}}\\ &\le \frac{C_0\,C_1\,\sqrt{M}}{(1+|x_0+k|)^\vartheta\,|k|^{n+2s}}, \end{split} \end{equation} for a suitable~$C_1>0$. Now we use again~\eqref{09.1.1} to estimate the contribution in~$D_2(k)$ in the following computation: \begin{equation}\label{0.9.0.12} \begin{split} \int_{D_2(k)} \frac{u^+(x_0+y)}{|y|^{n+2s}}\,dy\,&\le \int_{D_2(k)} \frac{\sqrt{M}}{(|k|/2)^{n+2s}\,(1+|x_0+k|)^\vartheta} \,dy \\ &\le \frac{2^{n+2s}\,|Q_{1/(8n\sqrt{n})}|\,\sqrt{M}}{|k|^{n+2s}\,(1+|x_0+k|)^\vartheta}. \end{split} \end{equation} Using~\eqref{0.9.0.11} and~\eqref{0.9.0.12}, and the fact that $$ \big( u(x_0+y)-\epsilon\big)^+\le u^+(x_0+y),$$ we conclude that $$ \int_{Q_{1/(8n\sqrt{n})}(k)} \frac{ \big( u(x_0+y)-\epsilon\big)^+ }{|y|^{n+2s}}\,dy \le\frac{C_2\,\sqrt{M}}{(1+|x_0+k|)^\vartheta\,|k|^{n+2s}},$$ for a suitable~$C_2>0$. So we plug this estimate into~\eqref{0.9.4} and we deduce that $$\int_{{\mathds R}^n\setminus B_1} \frac{u(x_0+y)-u(x_0)}{|y|^{n+2s}}\,dy \le C_2\,\sqrt{M} \sum_{k\in {\mathcal{Z}}} \frac{1}{(1+|x_0+k|)^\vartheta\,|k|^{n+2s}}.$$ Thus we estimate the latter series using Corollary~\ref{pc1.1} (notice that~${\mathcal{Z}}$ may be seen as a scaled version of~${\mathds Z}^n\setminus\{0\}$, due to~\eqref{0.9.5}, and $x_0$ stays away from $0$, as pointed out in \eqref{x0ma}, so the assumptions of Corollary~\ref{pc1.1} are satisfied, up to scaling): we obtain that $$\int_{{\mathds R}^n\setminus B_1} \frac{u(x_0+y)-u(x_0)}{|y|^{n+2s}}\,dy \le \frac{C_3\,\sqrt{M}}{(1+|x_0|)^\vartheta},$$ for a suitable~$C_3>0$, hence the claim plainly follows if~$M$ is large enough. \end{proof} Combining the estimates of Lemmata~\ref{Con-L1} and~\ref{Con-L2} we obtain that the negative local contribution cannot be compensated by the contribution at infinity. More explicitly, we have: \begin{corollary}\label{ULCO} Let $s\in (0,1)$, $\vartheta\in(0,n+2s]$, $\epsilon\in(0,1)$, and $$ F_1(x):=\frac{1}{(1+|x|)^\vartheta}.$$ For any fixed~$M>0$ let~$F_M (x):=M F_1(x)$. Suppose that $u\in L^\infty({\mathds R}^n)\cap C^2({\mathds R}^n)$ satisfies \begin{eqnarray*} && F_M(x_0)+\epsilon=u(x_0) \ {\mbox{ for some point }} x_0\in{\mathds R}^n, \\ && F_M (x)+\epsilon\geq u(x) \ {\mbox{ for every }} x\in{\mathds R}^n \\ && \int_{B_1(x)} |u(\zeta)|\,d\zeta \leq \frac{C_0}{(1+|x|)^{\vartheta}} \ {\mbox{ for every }} x\in{\mathds R}^n\end{eqnarray*} for some~$C_0>0$. Then there exists~$M_0>0$, depending only on~$n$, $s$, $\|u\|_{L^\infty({\mathds R}^n)}$, $\vartheta$, and~$C_0$, such that if~$M\ge M_0$ then \begin{equation}\label{ntg} L_s u(x_0)={\rm{P.V.}}\,\int_{{\mathds R}^n} \frac{u(x_0+y)-u(x_0)}{|y|^{n+2s}}\,dy \le -\frac{M\,|B_1|}{20\,(1+|x_0|)^{\vartheta}}.\end{equation} \end{corollary} \section{Decay estimates in average}\label{D:S} Here we obtain some precise information on the decay at infinity of the solution of a nonlocal equation with decaying nonlinearity: \begin{prop}\label{PRTHM1} Let $s\in (0,1)$, $u\in L^\infty({\mathds R}^n)\cap C^2({\mathds R}^n)$ satisfy \begin{equation}\label{122} -L_s u + c u=g \quad {\mbox{ in }}{\mathds R}^n, \end{equation} where~$c(x)\in (c_0,\,c_0^{-1})$, for some~$c_0\in(0,1)$ and \begin{equation}\label{g} |g(x)|\le\frac{C}{(1+|x|)^{\alpha}} \end{equation} for some~$C>0$ and $\alpha>0$. Then, for any $x\in{\mathds R}^n$, \begin{equation}\label{ball} \int_{B_1(x)} |u(y)|\, dy \le \frac{C_*}{|x|^{\vartheta}} \end{equation} where~$C_*>0$ is a suitable constant and \begin{equation}\label{SGAMMA} \vartheta := \frac{ \min\{ n+2s-(n-2\alpha)^+, \, 2\alpha\} }{2}. \end{equation} \end{prop} \begin{proof} We use that~$u$ satisfies~\eqref{122} in the weak sense, that is, for any test function~$\psi$, $$ \int_{{\mathds R}^n}\int_{{\mathds R}^n}\frac{(u(x)-u(y))(\psi(x)-\psi(y))}{|x-y|^{n+2s}}\, dx\, dy + \int_{{\mathds R}^n}c\,u\psi\, dx = \int_{{\mathds R}^n}g\psi\, dx. $$ Choosing~$\psi=u\varphi^2$ we get \begin{equation}\label{133} \int_{{\mathds R}^n}\int_{{\mathds R}^n}\frac{\big(u(x)-u(y)\big)\,\Big(u(x)\varphi^2(x)-u(y)\varphi^2(y)\Big)}{|x-y|^{n+2s}}\, dx\, dy + \int_{{\mathds R}^n}c\,u^2\varphi^2\, dx = \int_{{\mathds R}^n}gu\varphi^2\, dx. \end{equation} Notice that we can write \begin{eqnarray*} &&\big(u(x)-u(y)\big)\,\Big(u(x)\varphi^2(x)-u(y)\varphi^2(y)\Big) \\ &=& \big(u(x)-u(y)\big)\,\Big(u(x)\varphi^2(x)-u(y)\varphi^2(x)+u(y)\varphi^2(x)-u(y)\varphi^2(y)\Big) \\ &=&\big(u(x)-u(y)\big)\, \Big[\big(u(x)-u(y)\big)\varphi^2(x)+u(y) \big(\varphi^2(x)-\varphi^2(y)\big)\Big] \\ &=& \big(u(x)-u(y)\big)^2\varphi^2(x)+u(y)\big(u(x)-u(y)\big)\, \big(\varphi(x)+\varphi(y)\big)\,\big(\varphi(x)-\varphi(y)\big). \end{eqnarray*} Hence~\eqref{133} becomes \begin{equation}\begin{split}\label{144} &\int_{{\mathds R}^n}\int_{{\mathds R}^n}\frac{(u(x)-u(y))^2\varphi^2(x)}{|x-y|^{n+2s}}\, dx\, dy \\ &\quad +\int_{{\mathds R}^n}\int_{{\mathds R}^n} \frac{u(y)\big(u(x)-u(y)\big)\,\big(\varphi(x)+\varphi(y)\big) \,\big(\varphi(x)-\varphi(y)\big)}{|x-y|^{n+2s}}\, dx\, dy \\ &\quad +\int_{{\mathds R}^n}c\,u^2\varphi^2\, dx = \int_{{\mathds R}^n}gu\varphi^2\, dx. \end{split}\end{equation} Now we estimate the second term in~\eqref{144} in the following way \begin{eqnarray*} &&\left|\int_{{\mathds R}^n}\int_{{\mathds R}^n}\frac{u(y)\big(u(x)-u(y)\big) \,\big(\varphi(x)+\varphi(y)\big)\,\big(\varphi(x)-\varphi(y)\big)}{|x-y|^{n+2s}}\, dx\, dy\right| \\ &\le& \frac{1}{4}\int_{{\mathds R}^n}\int_{{\mathds R}^n}\frac{(u(x)-u(y))^2(\varphi(x)+\varphi(y))^2}{|x-y|^{n+2s}}\, dx\, dy + \int_{{\mathds R}^n}\int_{{\mathds R}^n} \frac{u^2(y)(\varphi(x)-\varphi(y))^2}{|x-y|^{n+2s}}\, dx\, dy\\ &\le& \frac12\int_{{\mathds R}^n}\int_{{\mathds R}^n}\frac{(u(x)-u(y))^2(\varphi^2(x)+\varphi^2(y))}{|x-y|^{n+2s}}\, dx\, dy +\int_{{\mathds R}^n}\int_{{\mathds R}^n} \frac{u^2(y)(\varphi(x)-\varphi(y))^2}{|x-y|^{n+2s}}\, dx\, dy\\ &\le& \int_{{\mathds R}^n}\int_{{\mathds R}^n}\frac{(u(x)-u(y))^2\varphi^2(x)}{|x-y|^{n+2s}}\, dx\, dy + \int_{{\mathds R}^n}\int_{{\mathds R}^n} \frac{u^2(y)(\varphi(x)-\varphi(y))^2}{|x-y|^{n+2s}}\, dx\, dy. \end{eqnarray*} Using this and \eqref{144} we obtain \begin{equation}\begin{split}\label{145} c_0 \int_{{\mathds R}^n} u^2\varphi^2\, dx \, &\quad \le \int_{{\mathds R}^n}c\,u^2\varphi^2\, dx\\ &\quad =\int_{{\mathds R}^n}gu\varphi^2\, dx -\int_{{\mathds R}^n}\int_{{\mathds R}^n}\frac{(u(x)-u(y))^2\varphi^2(x)}{|x-y|^{n+2s}}\, dx\, dy\\ &\qquad\quad -\int_{{\mathds R}^n}\int_{{\mathds R}^n} \frac{u(y)(u(x)-u(y))(\varphi(x)+\varphi(y)) (\varphi(x)-\varphi(y))}{|x-y|^{n+2s}}\, dx\, dy\\ &\quad\le \int_{{\mathds R}^n}gu\varphi^2\, dx + I, \end{split}\end{equation} where $$ I:=\int_{{\mathds R}^n}\int_{{\mathds R}^n} \frac{u^2(y)(\varphi(x)-\varphi(y))^2}{|x-y|^{n+2s}}\, dx\, dy.$$ On the other hand \begin{eqnarray*} \int_{{\mathds R}^n}gu\varphi^2\, dx&=& \int_{{\mathds R}^n} 2\,(\sqrt{1/(2c_0)} \,g\varphi) ( \sqrt{c_0/2}\, u\varphi)\, dx\\ &\le& \frac{1}{2c_0}\int_{{\mathds R}^n} g^2\varphi^2 +\frac{c_0}2 \int_{{\mathds R}^n} u^2\varphi^2\, dx.\end{eqnarray*} By plugging this into \eqref{145} and reabsorbing one term on the left hand side we obtain \begin{equation}\label{146} \frac{c_0}2 \int_{{\mathds R}^n} u^2\varphi^2\, dx \le\frac{1}{2c_0}\int_{{\mathds R}^n} g^2\varphi^2\,dx + I.\end{equation} Our goal is now twofold: to estimate $\int_{{\mathds R}^n} g^2\varphi^2\,dx$ and to reabsorb $I$ on the left hand side. For this, we choose $$ \varphi(x):=\frac{1}{(1+\varepsilon ^2|x-x_0|^2)^N}, $$ where $x_0\in{\mathds R}^n$ is fixed, \begin{equation}\label{CH} N:=\frac{n+2s}{4}, \end{equation} and~$0<\varepsilon \ll 1/N$. Notice that $\varphi\in L^2({\mathds R}^n)\cap L^\infty({\mathds R}^n)$. We set \begin{equation}\label{390} R:=|x_0|/2>10, \end{equation} and we claim that \begin{equation}\label{Tw1} \int_{{\mathds R}^n} g^2\varphi^2\,dx\le C_\varepsilon R^{-\gamma}, \end{equation} for some $C_\varepsilon >0$ and $$ \gamma:=\min\{ n+2s-(n-2\alpha)^+, \, 2\alpha\}.$$ Notice that \begin{equation}\label{out} \vartheta=\gamma/2, \end{equation} see~\eqref{SGAMMA}. To prove the claim, we first observe that if $x\in B_R$ then $$ |x-x_0|\ge |x_0|-|x|\ge 2R-R=R,$$ so $$ \varphi(x)\le\frac{1}{(1+\varepsilon ^2 R^2)^N}\le \frac{1}{\varepsilon ^{2N} R^{2N}}. $$ Accordingly, using also \eqref{g} and~\eqref{CH}, we obtain \begin{equation}\label{Ty01} \begin{split} \int_{B_R} g^2\varphi^2\,dx &\quad\le\frac{1}{\varepsilon ^{4N} R^{4N}} \int_{B_R} g^2 \,dx \\ &\quad\le \frac{1}{\varepsilon ^{4N} R^{4N}}\int_{B_R} \frac{C}{(1+|x|)^{2\alpha}} \,dx\\ &\quad\le \frac{C}{\varepsilon ^{4N} R^{4N}}\left[ \int_{B_1} 1\,dx+\int_{B_R\setminus B_1}\frac{C}{|x|^{2\alpha}}\,dx\right]\\ &\quad \le C_\varepsilon R^{-4N} \Big(1+\ell(R)\,R^{(n-2\alpha)^+}\Big) \\ &\quad \le 2C_\varepsilon \ell(R)\,R^{-n-2s+(n-2\alpha)^+}, \end{split}\end{equation} for some $C_\varepsilon >0$, where $$ \ell(R):=\left\{ \begin{matrix} \log R & {\mbox{ if }} 2\alpha=n,\\ 1 & {\mbox{ otherwise.}} \end{matrix} \right.$$ Moreover, if $x\in B_R(x_0)$ then $$ |x|\ge |x_0|-|x-x_0|\ge 2R-R=R$$ and so, from~\eqref{g}, we have $$ |g(x)|\le\frac{C}{(1+R)^{\alpha}}\le\frac{C}{R^\alpha}.$$ As a consequence \begin{equation}\label{Ty02}\begin{split} \int_{B_R(x_0)} g^2\varphi^2\,dx\,&\quad\le\frac{C^2}{R^{2\alpha}} \int_{B_R(x_0)} \varphi^2 \,dx \\&\quad\le \frac{C^2}{R^{2\alpha}} \int_{{\mathds R}^n} \varphi^2 \,dx \\&\quad\le C_\varepsilon R^{-2\alpha}, \end{split}\end{equation} for some $C_\varepsilon >0$ (up to renaming it). Now, if $x\in {\mathds R}^n\setminus (B_R(x_0)\cup B_R)$ then $|x|\ge R$ and so, from~\eqref{g} and~\eqref{CH}, \begin{equation}\label{Ty03}\begin{split} \int_{{\mathds R}^n\setminus (B_R(x_0)\cup B_R)} g^2\varphi^2\,dx\,&\quad\le\frac{C^2}{R^{2\alpha}} \int_{{\mathds R}^n\setminus (B_R(x_0)\cup B_R)} \varphi^2 \,dx \\ &\quad \le\frac{C^2}{R^{2\alpha}} \int_{{\mathds R}^n\setminus B_R(x_0)} \frac{1}{\varepsilon ^{4N} |x-x_0|^{4N}}\,dx \\ &\quad \le C_\varepsilon R^{n-2\alpha-4N} \\ &\quad= C_\varepsilon R^{-2\alpha-2s}. \end{split}\end{equation} Then \eqref{Tw1} follows from~\eqref{Ty01}, \eqref{Ty02} and~\eqref{Ty03}. Now we claim that, for any $\varepsilon '>0$, we can choose $\varepsilon $ sufficiently small (in the definition of $\varphi$) so that \begin{equation}\label{TBP} \int_{{\mathds R}^n}\frac{(\varphi(x)-\varphi(y))^2}{|x-y|^{n+2s}}\, dx\le \varepsilon '\varphi^2(y), \end{equation} holds. To prove this, we first observe that \begin{equation}\label{nabla} |\nabla\varphi(x)|=\frac{2\varepsilon ^2 N|x-x_0|}{(1+\varepsilon ^2|x-x_0|^2)^{N+1}}\le 2\varepsilon N\varphi(x). \end{equation} In particular we have that $|\nabla\varphi|\le 2\varepsilon N$ and therefore, for any $r>0$, \begin{eqnarray*} \int_{{\mathds R}^n}\frac{(\varphi(x)-\varphi(y))^2}{|x-y|^{n+2s}}\, dx &\le& \int_{B_r(y)}\frac{4\varepsilon ^2 N^2 |x-y|^2}{|x-y|^{n+2s}}\, dx+ \int_{{\mathds R}^n\setminus B_r(y)}\frac{4}{|x-y|^{n+2s}}\, dx \\ &\le& C(\varepsilon ^2 r^{2-2s}+r^{-2s}),\end{eqnarray*} for some $C>0$. Accordingly, if we choose $r:=1/\sqrt{\varepsilon }$, we obtain $$ \int_{{\mathds R}^n}\frac{(\varphi(x)-\varphi(y))^2}{|x-y|^{n+2s}}\, dx \le 2C\varepsilon ^s.$$ Hence if $y$ is such that $\varepsilon |y-x_0|\le \varepsilon ^{-s/(4N)}/|\log\varepsilon |$ then we have that \begin{eqnarray*} |\log\varepsilon |^{-N} \varphi^2(y) &=& \frac{|\log\varepsilon |^{-N}}{(1+\varepsilon ^2|y-x_0|^2)^{2N}} \\ &\ge& \frac{|\log\varepsilon |^{-N}}{\big(1+ (\varepsilon ^{-s/(4N)}/|\log\varepsilon |)^2\big)^{2N}} \\ &\ge& \frac{|\log\varepsilon |^{-N}}{\big(2 (\varepsilon ^{-s/(4N)}/|\log\varepsilon |)^2\big)^{2N}}\\ &=& 2^{-2N} \varepsilon ^s |\log\varepsilon |^{3N} \\ &\ge& 2C\varepsilon ^s\\ &\ge& \int_{{\mathds R}^n}\frac{(\varphi(x)-\varphi(y))^2}{|x-y|^{n+2s}}\, dx, \end{eqnarray*} provided that $\varepsilon $ is small enough, and this shows that \eqref{TBP} holds true if $\varepsilon |y-x_0|\le \varepsilon ^{-s/(4N)}/|\log\varepsilon |$. So we may and do suppose that \begin{equation}\label{case} \varepsilon |y-x_0|\ge \varepsilon ^{-s/(4N)}/|\log\varepsilon |.\end{equation} Notice that, in this case, $\varepsilon |y-x_0|\ge1$ if $\varepsilon $ is small enough and so \begin{equation}\label{QUI} \varphi^2(y)=\frac{1}{(1+\varepsilon ^2|y-x_0|^2)^{2N}}\ge \frac{1}{(2\varepsilon ^2|y-x_0|^2)^{2N}} =\frac{1}{4^N \varepsilon ^{n+2s} |y-x_0|^{n+2s}}, \end{equation} thanks to \eqref{CH}. Now we set $$ r_\varepsilon :=\frac{\varepsilon ^{-(n+3s)/(n+2s)}}{2|\log\varepsilon |}$$ and we study the contributions in $B_{r_\varepsilon }(x_0)$ and in $B_{r_\varepsilon }(y)$. For this, we point out that, by \eqref{CH} and \eqref{case}, \begin{equation}\label{Pe} |y-x_0|\ge \frac{ \varepsilon ^{-(4N+s)/(4N)} }{|\log\varepsilon |}= \frac{ \varepsilon ^{-(n+3s)/(n+2s)} }{|\log\varepsilon |}=2 r_\varepsilon . \end{equation} Therefore, if $x\in B_{r_\varepsilon }(x_0)$ we have that $$ |x-y|\ge |x_0-y| - |x-x_0|\ge |x_0-y| - r_\varepsilon \ge\frac{|x_0-y|}2$$ hence, using \eqref{QUI}, we see that \begin{equation}\label{AB1} \begin{split} \int_{B_{r_\varepsilon }(x_0)}\frac{(\varphi(x)-\varphi(y))^2}{|x-y|^{n+2s}}\, dx \,&\le \int_{B_{r_\varepsilon }(x_0)}\frac{4^{n+1+2s}}{|x_0-y|^{n+2s}}\, dx\\ &\le C \frac{r_\varepsilon ^n}{|x_0-y|^{n+2s}} \\ &\le 4^N C \frac{\varepsilon ^{-n(n+3s)/(n+2s)}}{2|\log\varepsilon |^n}\,\varepsilon ^{n+2s}\varphi^2(y) \\ &= 4^N C\frac{ \varepsilon ^{s(n+4s)/(n+2s)}}{2|\log\varepsilon |^n}\, \varphi^2(y). \end{split} \end{equation} Now we estimate the contribution in $B_{r_\varepsilon }(y)$. For this, we take $x\in B_{r_\varepsilon }(y)$ and $\xi=tx+(1-t)y$ with $t\in[0,1]$ such that $$ |\varphi(x)-\varphi(y)|\le |\nabla \varphi(\xi)|\,|x-y|.$$ Notice that, in this case, $$ |\xi-y|=t|x-y|\le r_\varepsilon \le\frac{|y-x_0|}2$$ thanks to \eqref{Pe}, and therefore $$ |\xi-x_0|\ge |y-x_0|-|\xi-y|\ge\frac{|y-x_0|}2.$$ Using this and \eqref{nabla} we obtain that \begin{eqnarray*} |\nabla\varphi(\xi)|&\le& 2\varepsilon N\varphi(\xi) \\ &=& \frac{2\varepsilon N}{(1+\varepsilon ^2|\xi-x_0|^2)^N}\\ &\le& \frac{2^{2N+1}\varepsilon N}{(1+2^2\, \varepsilon ^2|\xi-x_0|^2)^N}\\ &\le& \frac{2^{2N+1}\varepsilon N}{(1+\varepsilon ^2|y-x_0|^2)^N} \\ &=& 2^{2N+1}\varepsilon N \varphi(y). \end{eqnarray*} As a consequence \begin{equation}\label{AB2} \begin{split} \int_{B_{r_\varepsilon }(y)}\frac{(\varphi(x)-\varphi(y))^2}{|x-y|^{n+2s}}\, dx \,&\le \int_{B_{r_\varepsilon }(y)}\frac{4^{2N+2}\varepsilon ^2 N^2 \varphi^2(y)}{|x-y|^{n+2s-2}}\, dx\\ &= C \varepsilon ^2 r_\varepsilon ^{2-2s}\varphi^2(y)\\ &= \frac{C\,\varepsilon ^{2s(n-1+3s)/(n+2s)}}{2^{2-2s} \,|\log\varepsilon |^{2-2s}} \varphi^2(y). \end{split} \end{equation} It remains to estimate the contribution in ${\mathds R}^n\setminus\big( B_{r_\varepsilon }(x_0)\cup B_{r_\varepsilon }(y)\big)$. For this we will use the following estimate: fixed $p\in {\mathds R}^n$ we have that \begin{equation}\label{au6} \int_{{\mathds R}^n\setminus B_{r_\varepsilon }(p)} \frac{dx}{|x-p|^{n+2s}}=\frac{C}{r_\varepsilon ^{2s}}= 2^{2s} C\,\varepsilon ^{2s(n+3s)/(n+2s)}\,|\log\varepsilon |^{2s} .\end{equation} Moreover $$ \frac{|y-x_0|}{|x-x_0|\,|x-y|}\le \frac{|y-x|+|x-x_0|}{|x-x_0|\,|x-y|}= \frac{1}{|x-x_0|}+\frac{1}{|x-y|}$$ and therefore $$ \frac{|y-x_0|^{n+2s}}{|x-x_0|^{n+2s}\,|x-y|^{n+2s}}\le 2^{n+2s}\left(\frac{1}{|x-x_0|^{n+2s}}+\frac{1}{|x-y|^{n+2s}}\right).$$ Hence, if we integrate over ${\mathds R}^n\setminus\big( B_{r_\varepsilon }(x_0)\cup B_{r_\varepsilon }(y)\big)$ and we use \eqref{au6} we obtain that \begin{equation}\label{au7}\begin{split} &\int_{{\mathds R}^n\setminus\big( B_{r_\varepsilon }(x_0)\cup B_{r_\varepsilon }(y)\big)} \frac{|y-x_0|^{n+2s}}{|x-x_0|^{n+2s}\,|x-y|^{n+2s}}\,dx\\&\quad\le 2^{n+2s}\left( \int_{{\mathds R}^n\setminus B_{r_\varepsilon }(x_0)} \frac{dx}{|x-x_0|^{n+2s}}+ \int_{{\mathds R}^n\setminus B_{r_\varepsilon }(y)} \frac{dx}{|x-y|^{n+2s}}\right)\\ &\quad\le C\,\varepsilon ^{2s(n+3s)/(n+2s)}\, |\log\varepsilon |^{2s},\end{split}\end{equation} up to renaming constants. Moreover, exploiting \eqref{CH} and \eqref{QUI} we see that $$ \varphi^2(x)=\frac{1}{(1+\varepsilon ^2|x-x_0|^2)^{(n+2s)/2}} \le \frac{1}{\varepsilon ^{n+2s} |x-x_0|^{n+2s}} \le \frac{4^N\,|y-x_0|^{n+2s}}{ |x-x_0|^{n+2s}}\,\varphi^2(y).$$ Therefore \begin{equation}\label{AB3} \begin{split} &\int_{{\mathds R}^n\setminus\big( B_{r_\varepsilon }(x_0)\cup B_{r_\varepsilon }(y)\big)} \frac{\varphi^2(x)}{|x-y|^{n+2s}}\, dx \\&\quad\le 4^N \varphi^2(y)\,\int_{{\mathds R}^n\setminus\big( B_{r_\varepsilon }(x_0)\cup B_{r_\varepsilon }(y)\big)} \frac{|y-x_0|^{n+2s}}{ |x-x_0|^{n+2s}\,|x-y|^{n+2s}}\, dx \\ &\quad\le 4^N C\,\varepsilon ^{2s(n+3s)/(n+2s)}\,|\log\varepsilon |^{2s}\,\varphi^2(y),\end{split}\end{equation} thanks to \eqref{au7}. Furthermore, by \eqref{au6} we have that \begin{equation}\label{AB4} \begin{split}\int_{{\mathds R}^n\setminus\big( B_{r_\varepsilon }(x_0)\cup B_{r_\varepsilon }(y)\big)} \frac{\varphi^2(y)}{|x-y|^{n+2s}}\, dx \,&\le \int_{{\mathds R}^n\setminus B_{r_\varepsilon }(y)} \frac{\varphi^2(y)}{|x-y|^{n+2s}}\, dx\\ &\le 2^{2s} C\,\varepsilon ^{2s(n+3s)/(n+2s)}\,|\log\varepsilon |^{2s}\,\varphi^2(y).\end{split}\end{equation} Now we use that $$ (\varphi(x)-\varphi(y))^2 \le (|\varphi(x)|+|\varphi(y)|)^2\le 4(\varphi^2(x)+\varphi^2(y)),$$ so that by \eqref{AB3} and \eqref{AB4} we obtain \begin{equation}\label{22b} \int_{{\mathds R}^n\setminus\big( B_{r_\varepsilon }(x_0)\cup B_{r_\varepsilon }(y)\big)} \frac{(\varphi(x)-\varphi(y))^2}{|x-y|^{n+2s}}\, dx\le C\,\varepsilon ^{2s(n+3s)/(n+2s)} \,|\log\varepsilon |^{2s}\,\varphi^2(y),\end{equation} up to renaming constants once again. In view of~\eqref{AB1}, \eqref{AB2} and~\eqref{22b}, the proof of \eqref{TBP} is finished. As a consequence of \eqref{TBP} we obtain that $$ I\le \varepsilon ' \int_{{\mathds R}^n} u^2(y) \varphi^2(y)\, dy =\varepsilon ' \int_{{\mathds R}^n} u^2\varphi^2\,dx.$$ So we take $\varepsilon $ so small that $\varepsilon '\le c_0/4$, we plug the estimate above into \eqref{146} and we reabsorb one term into the left hand side (this fixes $\varepsilon $ now once and for all): we conclude that $$ \frac{c_0}4 \int_{{\mathds R}^n} u^2\varphi^2\, dx \le\frac{1}{2c_0}\int_{{\mathds R}^n} g^2\varphi^2\,dx .$$ Hence, from~\eqref{Tw1}, $$ \frac{c_0}4 \int_{{\mathds R}^n} u^2\varphi^2\, dx \le \frac{C_\varepsilon }{2c_0} R^{-\gamma}.$$ Now we use that $\varphi\ge 1/2$ in $B_1(x_0)$ to deduce from this that $$ \Xint-_{B_1(x_0)} u^2\, dx \le C R^{-\gamma},$$ for some $C>0$. Then, by the H\"older inequality, \eqref{390} and~\eqref{out}, for any~$x_0\in{\mathds R}^n$ such that~$|x_0|>20$ we have that $$ \Xint-_{B_1(x_0)} u\, dx \le \sqrt{ \Xint-_{B_1(x_0)} u^2\, dx}\le \sqrt{C R^{-\gamma}}=\sqrt{C}\,R^{-\vartheta} =2^\vartheta\sqrt{C} |x_0|^{-\vartheta}.$$ Since~$u$ is bounded, a similar estimate holds for~$|x_0|\le20$ as well, by possibly changing the constants (also in dependence of~$\|u\|_{L^\infty(B_{20})}$). This proves~\eqref{ball} and concludes the proof of Proposition~\ref{PRTHM1}. \end{proof} \begin{rem} {\rm In the sequel, we will only use Proposition~\ref{PRTHM1} for the proof of Theorem~\ref{DECAY} when~$n=1$ and~$s\in(0,1/2)$. Though the statement of Proposition~\ref{PRTHM1} remains valid for the whole parameter range~$s\in(0,1)$, in general the exponent~$\vartheta$ found in~\eqref{SGAMMA} would not be sufficiently accurate (indeed, we think it is an interesting open problem to find a sharp value for the exponent~$\vartheta$ in general). The sensitivity of the decay estimates on the fractional parameter~$s$ is the main reason for which different methods are needed to prove Theorem~\ref{DECAY} when~$s\in(0,1/2)$ and~$s\in[1/2,1)$: in a sense, when~$s\in(0,1/2)$, the integral contributions coming from far are predominant and they strongly affect the available bounds on the asymptotic behaviour of the solution at infinity. }\end{rem} \section{Proof of Theorem~\ref{DECAY}}\label{P:T} Let~$v$ be as in Theorem~\ref{DECAY}. We prove that \begin{equation}\label{TY} v(x)\le \frac{M_0}{(1+|x|)^\vartheta} \end{equation} for any~$x\in{\mathds R}$, where~$M_0>0$ is a universal constant (the bound from below follows by exchanging~$v$ with~$-v$). To this goal, fixed any~$\varepsilon >0$, we use~\eqref{va0} to find~$R_\varepsilon >0$ such that \begin{equation}\label{Re} {\mbox{$|v(x)|\le \varepsilon /2$ for all~$|x|\ge R_\varepsilon $.}}\end{equation} We claim that \begin{equation}\label{vb} v(x)< \frac{M}{(1+|x|)^\vartheta}+\varepsilon \end{equation} for any~$x\in{\mathds R}$, as long as $$ M\ge \|v\|_{L^\infty({\mathds R})}\,(1+R_\varepsilon )^\vartheta.$$ To check this, we distinguish two cases. If~$|x|\le R_\varepsilon $, then $$ v(x)\le \frac{|v(x)|\,(1+R_\varepsilon )^\vartheta }{(1+|x|)^\vartheta} \le \frac{M}{(1+|x|)^\vartheta}<\frac{M}{(1+|x|)^\vartheta}+\varepsilon ,$$ proving~\eqref{vb} in this case. Conversely if~$|x|\ge R_\varepsilon $, then~$v(x)<\varepsilon $ and so~\eqref{vb} holds true in this case too. Hence, we can take the smallest~$M:=M_\varepsilon \ge 0$ for which~\eqref{vb} is satisfied. If~$M_\varepsilon =0$ for a sequence of~$\varepsilon \searrow0$ then~\eqref{vb} gives that~$v(x)\le \varepsilon $ and so, in the limit,~$v\le0$, which proves~\eqref{TY}. Thus, without loss of generality, we can suppose that~$M_\varepsilon >0$. In this case, by \eqref{Re} and a simple compactness argument, there exists~$x_\varepsilon \in{\mathds R}$ for which \begin{equation}\label{touch} v(x_\varepsilon )=\frac{M_\varepsilon }{(1+|x_\varepsilon |)^\vartheta}+\varepsilon .\end{equation} Our goal is to show that \begin{equation}\label{ME} M_\varepsilon \le M_0 \end{equation} for a suitable~$M_0>0$ independent of~$\varepsilon $. For this, we observe that, by~\eqref{vb}, \eqref{touch} and Proposition~\ref{PRTHM1} (with~$\alpha:=4s$), we have that the hypotheses of Corollary~\ref{ULCO} are satisfied (by taking~$u:=v$ and~$x_0:=x_\varepsilon $). Therefore, by~\eqref{ntg}, if~$M_\varepsilon $ were too large we would have that \begin{equation}\label{p9} L_s v(x_\varepsilon ) \le -\frac{M_\varepsilon \,|B_1|}{20\,(1+|x_\varepsilon |)^{\vartheta}}.\end{equation} On the other hand, by~\eqref{touch}, \eqref{eqM}, and~\eqref{Mest}, we have \begin{equation}\label{p8}\begin{split} L_s v(x_\varepsilon ) \,&= L_s v(x_\varepsilon ) - c v(x_\varepsilon ) +c\left( \frac{M_\varepsilon }{(1+|x_\varepsilon |)^\vartheta}+\varepsilon \right)\\ &\ge L_s v(x_\varepsilon ) - c v(x_\varepsilon )\\ &= -g(x_\varepsilon )\\ &\geq -\frac{C}{(1+|x_\varepsilon |)^{4s}} \\ &\geq -\frac{C}{(1+|x_\varepsilon |)^{\vartheta}} \end{split} \end{equation} (recall that~$\vartheta\le\alpha=4s$, see~\eqref{SGAMMA}). Hence~\eqref{p8} and \eqref{p9} show that $M_\varepsilon $ is universally bounded, proving~\eqref{ME}. {F}rom~\eqref{ME} we deduce that $$ v(x)\le \frac{M_\varepsilon }{(1+|x|)^\vartheta}+\varepsilon \le \frac{M_0}{(1+|x|)^\vartheta}+\varepsilon $$ for any~$x\in{\mathds R}$, and so, by letting~$\varepsilon \searrow0$, we obtain~\eqref{TY}. This concludes\footnote{We remark that~$\vartheta$, as defined in~\eqref{SGAMMA}, satisfies \begin{equation*} \vartheta = \left\{\begin{array}{ll} 4s \quad &{\mbox{ if }} s\in(0,1/6], \\ \frac{1+2s}{2} \quad &{\mbox{ if }}s\in(1/6,1/2). \end{array} \right. \end{equation*} In any case, since~$s\in(0,1/2)$, we have that $$ 2s<\vartheta<1+2s.$$} the proof of Theorem~\ref{DECAY}. \section{Proof of Theorem~\ref{TH-decay}}\label{E:TH} The proof of Theorem~\ref{TH-decay} is now analogous to the one of Proposition~7.2 in~\cite{DIPPV}, up to the following modifications, needed in the case~$s\in(0,1/2)$: \begin{itemize} \item the exponent $1+2s$ in formulas~(7.9) and the previous one in \cite{DIPPV} must be replaced by~$\vartheta$ (the rest of the argument remains unchanged, since~$\vartheta\in(2s,1+2s]$), \item the use of Corollary~7.1 of~\cite{DIPPV} is replaced here by Theorem~\ref{DECAY}. \end{itemize} \section{$L^\infty$ bounds}\label{L infty} The goal of this section is to state some uniform regularity estimates that will be needed in the subsequent Section~\ref{7sddd}. We introduce the norm \begin{equation} \label{CLNC} \|f\|_{{H}_0^{s}({\mathds R}^n)} := \sqrt{ \int_{{\mathds R}^n}\int_{{\mathds R}^n} \frac{|f(x)-f(y)|^2}{|x-y|^{n+2s}}\, dx\, dy }\end{equation} and we provide an auxiliary estimate: \begin{lemma} Let $s \in (0,1)$. There exists a constant $C=C(n,s)>0$ such that, if $f\in H^{s}({\mathds R}^n)$, then \begin{equation}\label{L76} \|f\|_{L^2({\mathds R}^n)} \le C\| f\|_{H^{s}_0({\mathds R}^n)}^{n/(n+2s)}\, \|f\|_{L^1({\mathds R}^n)}^{2s/(n+2s)}. \end{equation} Also, if~$f\ge0$ then \begin{equation}\label{L7678} \|f\|_{L^2({\mathds R}^n)} \le C\| f\|_{H^{s}_0({\mathds R}^n)}\, |\{f>0\}|^{s/n}. \end{equation} \end{lemma} \begin{proof} We start by proving~\eqref{L76}, which is a variation of the classical Nash inequality. Without loss of generality, we suppose that~$f\in L^1({\mathds R}^n)$, otherwise the right hand side of~\eqref{L76} is infinite and there is nothing to prove. Given~$\rho>0$, we have \begin{equation}\label{L73} \int_{{\mathds R}^n\setminus B_\rho} |\hat{f}(\xi)|^2\,d\xi \le \int_{{\mathds R}^n\setminus B_\rho} \frac{|\xi|^{2s}}{\rho^{2s}} |\hat{f}(\xi)|^2\,d\xi\le C \rho^{-2s} \| f\|_{H^{s}_0({\mathds R}^n)}^2.\end{equation} Here we have used the notation of the norm~$\|\cdot\|_{H^{s}_0({\mathds R}^n)}$, as introduced in~\eqref{CLNC} and its equivalent in Fourier spaces (see e.g. Proposition~3.4 in~\cite{DPV12}). On the other hand,~$|\hat{f}(\xi)| \le \|f\|_{L^1({\mathds R}^n)}$ for any $\xi\in{\mathds R}^n$, and so by integrating over $B_\rho$ we obtain $$ \int_{B_\rho} |\hat{f}(\xi)|^2\,d\xi \le |B_1|\,\rho^n \|f\|_{L^1({\mathds R}^n)}^2.$$ By adding this to~\eqref{L73} we obtain $$ \|f\|_{L^2({\mathds R}^n)}^2 =\|\hat{f}\|_{L^2({\mathds R}^n)}^2 \le C \rho^{-2s} \| f\|_{H^{s}_0({\mathds R}^n)}^2 + |B_1|\,\rho^n \|f\|_{L^1({\mathds R}^n)}^2.$$ Since this estimate is valid for any~$\rho>0$, we now choose $$ \rho:= \big(\| f\|_{H^{s}_0({\mathds R}^n)} /\|f\|_{L^1({\mathds R}^n)}\big)^{2/(n+2s)} $$ to obtain \begin{equation*} \|f\|_{L^2({\mathds R}^n)}^2 \le(C+|B_1|)\,\| f\|_{H^{s}_0({\mathds R}^n)}^{2n/(n+2s)}\, \|f\|_{L^1({\mathds R}^n)}^{4s/(n+2s)} ,\end{equation*} which gives~\eqref{L76} Now we prove~\eqref{L7678} by using~\eqref{L76} and the H\"older inequality: we have \begin{eqnarray*} \|f\|_{L^2({\mathds R}^n)}^{n+2s} &\le& C\| f\|_{H^{s}_0({\mathds R}^n)}^n\, \|f\|_{L^1({\mathds R}^n)}^{2s} \\ &\le& C\| f\|_{H^{s}_0({\mathds R}^n)}^n\, \left[ \|f\|_{L^2({\mathds R}^n)}\,|\{f>0\}|^{1/2} \right]^{2s} \\ &=& C\| f\|_{H^{s}_0({\mathds R}^n)}^n\,\|f\|_{L^2({\mathds R}^n)}^{2s}\, |\{f>0\}|^{s}, \end{eqnarray*} which implies~\eqref{L7678}. \end{proof} We can now prove a uniform pointwise estimate using a De Giorgi-type argument. For the sake of generality, we prove it for any~$s\in(0,1)$ and any~$n\ge 1$ (though we only need it here for~$n=1$ and~$s\in(0,1/2)$). \begin{theorem}\label{thm_sup} Let $s \in (0,1)$ and let~$\psi\in H^{s}({\mathds R}^n)$ be a weak solution to \begin{equation*} -L_s\psi = \lambda \psi +b \quad {\mbox{in }} {\mathds R}^n, \end{equation*} with $b,\lambda\in L^{\infty}({\mathds R}^n)$. Then $\psi\in L^{\infty}({\mathds R}^n)$ and \begin{equation*} \|\psi\|_{L^{\infty}({\mathds R}^n)} \leq C \end{equation*} where the constant $C>0$ depends only on $n$, $s$, $\|\psi\|_{L^2({\mathds R}^n)}$, $\|\lambda\|_{L^{\infty}({\mathds R}^n)}$, and~$\| b\|_{L^{\infty}({\mathds R}^n)}$. \end{theorem} \begin{proof} First, for any $0<\delta <<1$ (we will choose later a suitable $\delta$, see formula \eqref{deltaC} below), we consider the function~$\phi$ defined as \begin{equation*} \phi(x):=\frac{\delta \psi(x)}{\|\psi\|_{L^2({\mathds R}^n)}}, \quad {\mbox{for any }} x\in{\mathds R}^n. \end{equation*} By construction, \begin{equation*} \|\phi\|_{L^2({\mathds R}^n)} = \delta, \end{equation*} and \begin{equation}\label{eq_cresce} -L_s\phi = \lambda \phi + \delta b/\|\psi\|_{L^2({\mathds R}^n)}. \end{equation} In order to prove the theorem, it will suffice to prove that \begin{equation}\label{LtwoLinfty} \| \phi\|_{L^{\infty}({\mathds R}^n)} \le 1, \end{equation} since this implies that $$ \|\psi\|_{L^{\infty}({\mathds R}^n)}\le \frac{\|\psi\|_{L^2({\mathds R}^n)}}{\delta}\|\phi\|_{L^{\infty}({\mathds R}^n)} \le \frac{\|\psi\|_{L^2({\mathds R}^n)}}{\delta} $$ and~$\delta$ is fixed. Now, for any integer $k\in{\mathds N}$, we consider the function $w_k$ defined as follows $$ w_k(x):= (\phi(x)- (1 -2^{-k}))^+, \quad {\mbox{ for any }}x\in{\mathds R}^n. $$ By construction, $w_k\in H^{s}({\mathds R}^n)$, $w_k (\pm \infty) = 0$, and \begin{equation}\label{eq_9star} w_{k+1}(x) \leq w_k(x) \quad {\mbox{a.e. in }} {\mathds R}^n. \end{equation} The following inclusion \begin{equation}\label{inclusion} \big\{ w_{k+1} > 0 \big\} \subseteq \big\{ w_k > 2^{-(k+1)} \big\} \end{equation} holds true for all $k\in{\mathds N}$. Indeed, if $x\in\big\{ w_{k+1} > 0 \big\}$, then $$ 0<w_{k+1}(x)=\phi(x)-1+2^{-k-1}$$ hence $$ \phi(x)-(1-2^{-k})> 2^{-k}-2^{-k-1}=2^{-k-1}$$ and so~$w_k(x)>2^{-k-1}$, thus proving~\eqref{inclusion}. Moreover, we have the inequality \begin{equation}\label{4.10bis} \phi(x) < 2^{k+1} w_k(x) \quad {\mbox{ for any }} x \in \big\{ w_{k+1} >0 \big\}. \end{equation} Indeed, if $x\in\big\{ w_{k+1} > 0 \big\}$ then $$ w_k(x)\ge w_{k+1}(x)=\phi(x)-(1-2^{-k-1}), $$ which together with \eqref{inclusion} implies \begin{eqnarray*} \phi(x) &\le & w_k(x)+(1-2^{-k-1})= w_k(x)+(2^{k+1}-1)2^{-k-1}\\ &<& w_k(x)+(2^{k+1}-1)w_k(x)= 2^{k+1}w_k(x). \end{eqnarray*} This proves~\eqref{4.10bis}. Also, we remark that for any $v\in H^{s}({\mathds R}^n)$ we have \begin{equation}\label{uguale} \big(v^+(x)-v^+(y)\big)\big(v(x)-v(y)\big)\geq | v^+(x) - v^+(y)|^{2}, \end{equation} for all $x, y \in {\mathds R}^n$. In order to check this, let assume that $v(x) \geq v(y)$. There is no loss of generality in such assumption, since the roles of $x$ and $y$ can be interchanged. Then, one can reduce to the case when $x\in\{ v> 0\}$ and $y\in\{ v \le 0 \}$, as otherwise the inequality in~\eqref{uguale} plainly follows. Finally, we notice that in such a case \eqref{uguale} becomes $$ (v(x)-v(y))v(x) \ge v(x)^{2} $$ which does hold since $v(y)\le0$ and $v(x)>0$. This proves~\eqref{uguale}. We now prove~\eqref{LtwoLinfty} by a standard iterative argument based on estimating the decay of the quantity $$ U_k := \| w_k \|^2_{L^2({\mathds R}^n)}. $$ First, in view of~\eqref{uguale} with $v:=\phi-(1-2^{-k})$, we have \begin{eqnarray*} \|w_{k+1}\|^2_{{H}_0^{s}({\mathds R}^n)} &:=& \int_{{\mathds R}^n}\int_{{\mathds R}^n} \frac{|w_{k+1}(x)-w_{k+1}(y)|^2}{|x-y|^{n+2s}}\, dx\, dy \\ &\leq& \int_{{\mathds R}^n}\int_{{\mathds R}^n}\frac{ \big(\phi(x)-\phi(y)\big)\big(w_{k+1}(x)-w_{k+1}(y)\big)} {|x-y|^{n+2s}}\, dx\, dy. \end{eqnarray*} Thus, plugging $w_{k+1}$ as a test function in~\eqref{eq_cresce}, we obtain $$ \|w_{k+1}\|^2_{H_0^{s}({\mathds R}^n)}\leq \int_{\{w_{k+1}>0\}} \left(\lambda(x)\phi(x) + \frac{\delta\, b(x)}{\|\psi\|_{L^2({\mathds R}^n)}} \right)w_{k+1}(x)\,dx. $$ Notice that if $x\in\{w_{k+1}>0\}$ then $\phi(x)>0$, and therefore, using~\eqref{4.10bis} and \eqref{eq_9star}, we get \begin{equation}\begin{split}\label{eq_9star2} \|w_{k+1}\|^2_{H_0^{s}({\mathds R}^n)}\leq & \int_{\{w_{k+1}>0\}} \left( \sup_{{\mathds R}^n}|\lambda|\, \phi(x)\,w_{k+1}(x)+ \frac{\delta \displaystyle\sup_{{\mathds R}^n}|b|}{\|\psi\|_{L^2({\mathds R}^n)}}w_{k+1}(x) \right)\, dx \\ \leq & \int_{\{w_{k+1}>0\}} \left( \sup_{{\mathds R}^n}|\lambda|\, 2^{k+1}\,w_k(x)\,w_{k+1}(x)+ \frac{\delta \displaystyle\sup_{{\mathds R}^n}|b|}{\|\psi\|_{L^2({\mathds R}^n)}}w_{k+1}(x) \right)\, dx \\ \leq & \int_{\{w_{k+1}>0\}} \left( \sup_{{\mathds R}^n}|\lambda|\, 2^{k+1}\,w_k^2(x)+ \frac{\delta \displaystyle\sup_{{\mathds R}^n}|b|}{\|\psi\|_{L^2({\mathds R}^n)}}w_{k}(x) \right)\, dx \\ \leq & \sup_{{\mathds R}^n}|\lambda|\, 2^{k+1}U_k + \frac{\delta\,\displaystyle\sup_{{\mathds R}^n}|b|}{\|\psi\|_{L^2({\mathds R}^n)}} \sqrt{|\{ w_{k+1}>0\}|} \; U_k^{\frac{1}{2}}, \end{split}\end{equation} where we have also used the H\"older inequality. Also, by~\eqref{inclusion} and Chebychev's inequality, one has \begin{equation}\label{8.11bis} |\{w_{k+1}>0\}| \le |\{ w_{k}>2^{-(k+1)}\}| \le 2^{2(k+1)} U_{k}, \end{equation} so that~\eqref{eq_9star2} becomes \begin{equation}\label{eq10ast2} \|w_{k+1}\|^2_{H^s_0({\mathds R}^n)}\leq \left(\sup_{{\mathds R}^n}|\lambda| + \frac{\delta \,\displaystyle\sup_{{\mathds R}^n}|b|}{\|\psi\|_{L^2({\mathds R}^n)}} \right) 2^{k+1}U_k. \end{equation} On the other hand, using~\eqref{L7678} (with~$f:=w_{k+1}$ here) we have \begin{equation}\label{eq_10star} \displaystyle U_{k+1} \leq c \|w_{k+1}\|^2_{{H}_0^{s}({\mathds R}^n)} \big|\big\{ w_{k+1}>0\big\}\big|^{\frac{2s}{n}}, \end{equation} where the constant $c>0$ only depends on $n$ and $s$. Combining~\eqref{eq10ast2} with~\eqref{eq_10star} and using \eqref{8.11bis}, we get \begin{equation*}\begin{split U_{k+1} &\leq\, c \left(\sup_{{\mathds R}^n}|\lambda| + \frac{\delta\, \displaystyle\sup_{{\mathds R}^n}|b|}{\|\psi\|_{L^2({\mathds R}^n)}} \right)2^{k+1}U_k \left(2^{2(k+1)}\right)^{\frac{2s}{n}}U_k^{\frac{2s}{n}}\\ &=\, c\left(\sup_{{\mathds R}^n}|\lambda| + \frac{\delta \,\displaystyle\sup_{{\mathds R}^n}|b|}{\|\psi\|_{L^2({\mathds R}^n)}} \right) 2^{(1+\frac{4s}{n})(k+1)} U_k^{1+\frac{2s}{n}} \\ &\le\, \left[1+c\left(\sup_{{\mathds R}^n}|\lambda| + \frac{\delta\, \displaystyle\sup_{{\mathds R}^n}|b|}{\|\psi\|_{L^2({\mathds R}^n)}} \right)\right]2^{(1+\frac{4s}{n})(k+1)}U_k^{1+\frac{2s}{n}}\\ &\le\, \left[ \left( 1+c \left(\sup_{{\mathds R}^n}|\lambda| + \frac{\delta\, \displaystyle\sup_{{\mathds R}^n}|b|}{\|\psi\|_{L^2({\mathds R}^n)}} \right)\right) 2^{1+\frac{4s}{n}} \right]^{k+1} U_k^{1+\frac{2s}{n}}\\ &=\, \bar{C}^{k+1} U_{k}^{1+\frac{2s}{n}}, \end{split}\end{equation*} for some constant $\bar{C}>1$ depending on $\sup_{{\mathds R}^n}|\lambda|$, $\sup_{{\mathds R}^n}|b|$, $\|\psi\|_{L^2({\mathds R}^n)}$, $n$, and $s$. Hence, an estimate of the form $$ U_{k+1}\le \bar{C}^{k+1} U_{k}^{1+\alpha} \quad {\mbox{for any }} \ k\in{\mathds N},$$ holds for suitable $\bar{C}>1$ and $\alpha>0$. Now we perform our choice of~$\delta$, that is we assume that \begin{equation}\label{deltaC} \delta^{2\alpha}=\frac{1}{\bar{C}^{(1/\alpha)+1}}. \end{equation} We set \begin{equation}\label{deltaC bis} \eta:=\frac{1}{\bar{C}^{1/\alpha}}. \end{equation} Since $\bar C>1$ and $\alpha>0$, we have that \begin{equation}\label{starr} \eta\in(0,1). \end{equation} We claim that \begin{equation}\label{claim} U_k \le\delta^2 \eta^k. \end{equation} We show \eqref{claim} by induction. Indeed, we notice that $$ U_0:=\|w_0\|^2_{L^2({\mathds R}^n)}=\|\phi^+\|^2_{L^2({\mathds R}^n)}\le\|\phi\|^2_{L^2({\mathds R}^n)}=\delta^2, $$ which is \eqref{claim} for $k=0$. Now, suppose that \eqref{claim} is true for $k$ and let us prove it for $k+1$: $$ U_{k+1}\le \bar{C}^{k+1} U_k^{1+\alpha}\le \bar{C}^{k+1}(\delta^2\eta^k)^{1+\alpha} = \delta^2\eta^k (\bar{C}\eta^\alpha)^k \bar{C}\delta^{2\alpha} = \delta^2\eta^{k+1}, $$ where we have used \eqref{deltaC} and \eqref{deltaC bis}. Then, by~\eqref{starr} and~\eqref{claim} we have that \begin{equation}\label{limit} \lim_{k\to\infty} U_{k} = 0. \end{equation} Noticing that $$ 0\le w_k=\left(\phi-(1-2^{-k})\right)^+\le |\phi|\in L^2({\mathds R}^n) $$ and $$ w_{k}\rightarrow(\phi-1)_{+}\quad {\mbox{a.e. in }}{\mathds R}^{n} \quad {\mbox{ as }}k\rightarrow +\infty,$$ by the Dominated Convergence Theorem we get \begin{equation}\label{Ukappa} \lim_{k\rightarrow +\infty}U_k=\|(\phi-1)^+\|_{L^2({\mathds R}^n)}^2. \end{equation} Hence, from \eqref{limit} and \eqref{Ukappa} we have that $(\phi-1)^+=0$ almost everywhere in~${\mathds R}^n$, and so $\phi\le1$ almost everywhere in~${\mathds R}^n$. By replacing $\phi$ with $-\phi$ we get~\eqref{LtwoLinfty}, which concludes the proof. \end{proof} \section{The corrector equation}\label{7sddd} Now we consider the equation \begin{eqnarray}\label{eq_correttore} \left\{ \begin{array}{ll} L_s\psi-W''(u)\psi=u'+\eta\left(W''(u)-W''(0)\right) {\mbox{ in }}{\mathds R}, \\ \psi\in H^s({\mathds R}), \\ \end{array} \right. \end{eqnarray} where $u$ is the solution of \eqref{AC} and \begin{equation}\label{eta2} \eta=\frac{\displaystyle\int_{{\mathds R}}(u'(x))^2\, dx}{W''(0)}. \end{equation} For a detailed heuristic motivation of such an equation see Section~3.1 of~\cite{GM12}. \begin{theorem}\label{THcorrettore} There exists a unique solution~$\psi\in H^s({\mathds R})$ to~\eqref{eq_correttore}. Furthermore \begin{equation}\label{SCC} {\mbox{$\psi\in C^{1,\alpha}_{loc}({\mathds R})\cap L^{\infty}({\mathds R})$ for some~$\alpha\in(0,1)$, and $\|\psi'\|_{L^{\infty}({\mathds R})}<+\infty. $}}\end{equation} \end{theorem} \begin{proof} The proof is analogous to the one of Theorem~5.2 in~\cite{DIPPV}, where the result was obtained for~$s\in(1/2,1)$, except for the modifications listed below. The proof of Theorem~5.2 in~\cite{DIPPV} uses the condition~$s\in(1/2,1)$ only twice, namely before formula~(5.26) and at the end of Section~5. In the first occasion, such condition was used to obtain that \begin{equation}\begin{split}\label{SCC2} &{\mbox{a weak solution of $L_s v_0=W''(u) v_0$ is $C^{2s+\alpha}({\mathds R})\cap L^\infty({\mathds R})$}} \\ &{\mbox{and, in particular, it is a classical solution.}} \end{split}\end{equation} In the second occasion, the condition on~$s$ was used to obtain~\eqref{SCC}. In both the cases, the condition~$s\in(1/2,1)$ permitted to obtain the desired results as an easy consequence of the fractional Morrey-Sobolev embedding (see e.g. Theorem 8.2 in~\cite{DPV12}), and this embedding is not available in the present case. Hence, we prove~\eqref{SCC} and~\eqref{SCC2} directly from the regularity theory developed in Section~\ref{L infty}, thus obtaining that Theorem~\ref{THcorrettore} also holds when~$s\in(0,1/2)$. To prove~\eqref{SCC2}, we first use Theorem~\ref{thm_sup} to obtain that~$v_0\in L^{\infty}({\mathds R})$. Hence, from Proposition~5 in~\cite{SV13b} we deduce that~$v_0\in C^{\alpha}({\mathds R})$ for any~$0<\alpha<2s$. In particular $v_0$ is a viscosity solution, and since $W''(u)v_0\in C^{\alpha}({\mathds R})$, by Proposition~2.8 in~\cite{Sil06} we deduce that $v_0\in C^{\alpha+2s}({\mathds R})$. Thus $v_0$ is a classical solution, proving~\eqref{SCC2}. To show~\eqref{SCC}, we use Theorem~\ref{thm_sup} and Proposition~5 in~\cite{SV13b} to obtain that~$\psi$ is a viscosity solution to~\eqref{eq_correttore} such that \begin{equation}\label{psi Linfinito} \psi\in L^{\infty}({\mathds R})\cap C^{\alpha}({\mathds R}) \end{equation} for any~$0<\alpha<2s$. Now, we define the incremental quotient of~$\psi$ as $$ \psi_h(x):=\frac{\psi(x+h)-\psi(x)}{h} \quad {\mbox{for any }}x,h\in{\mathds R}. $$ From~\eqref{eq_correttore} we have that~$\psi_h$ satisfies \begin{equation}\label{psi h} L_s \psi_h(x)=W''(u(x+h))\psi_h(x) +W''_h(u(x))\psi(x)+ u'_h(x)+ \eta W''_h(u(x)) \end{equation} where, for any~$x\in{\mathds R}$, $$ u'_h(x):=\frac{u'(x+h)-u'(x)}{h} $$ and $$ W''_h(u(x)):=\frac{W''(u(x+h))-W''(u(x))}{h}. $$ From~\eqref{Wass}, \eqref{psi Linfinito}, and Lemma~6 in~\cite{PSV13}, we have that $$ W''(u)\in L^\infty({\mathds R}) \quad {\mbox{ and}}\quad W''_h(u)\psi+u'_h+\eta W''_h(u)\in L^\infty({\mathds R}),$$ and so we can apply Theorem~\ref{thm_sup} to the solution of~\eqref{psi h} to obtain that~$\psi_h\in L^{\infty}({\mathds R})$. Using Proposition~5 in~\cite{SV13b}, this gives that $\psi_h\in C^{\alpha}({\mathds R})$ for any~$\alpha<2s$. So we have proved that, for any~$x,y,h\in{\mathds R}$, $$ |\psi_h(x)|\le C_1 \quad {\mbox{ and }}\quad |\psi_h(x)-\psi_h(y)|\le C_2|x-y|^\alpha, $$ for some positive constants~$C_1,C_2$. Letting~$h \searrow0$ we obtain that~$\psi'\in L^{\infty}({\mathds R})\cap C^{\alpha}({\mathds R})$, concluding the proof of~\eqref{SCC}. \end{proof} \begin{rem} Thanks to~\eqref{eq_correttore} and~\eqref{SCC}, we have that~$\psi\in H^s({\mathds R})$ is uniformly continuous, and this implies that \begin{equation}\label{unif} \displaystyle \lim_{x\rightarrow\pm\infty}\psi(x)=0. \end{equation} \end{rem} \section{Proof of Theorem~\ref{TH}}\label{csiufff} The proof is now conceptually similar to the one given in Section~8 of~\cite{DIPPV}, but some quantitative estimates of Proposition~8.4 there need to be modified when~$s\in(0,1/2)$. For the facility of the reader, we provide the details of the proof of Proposition~8.4 of~\cite{DIPPV} in our case (this will be done in Proposition~\ref{y7} here below). To this goal, we recall some of the notation of~\cite{GM12, DIPPV} needed for our purposes. We take an auxiliary parameter~$\delta>0$ and define~$(\overline x_i(t))_{i=1,\ldots,N}$ to be the solution of the system \begin{eqnarray}\label{ODEdelta} \left\{ \begin{array}{ll} \dot{\overline x}_i =\gamma\left(-\delta-\sigma(t,\overline x_i)+ \displaystyle\sum_{j\neq i} \displaystyle\frac{\overline x_i-\overline x_j}{2s\, |\overline x_i-\overline x_j|^{1+2s}}\right) {\mbox{ in }}(0,+\infty), \\[4ex] \overline x_i(0)=x_i^0-\delta. \\ \end{array} \right. \end{eqnarray} Moreover, we set \begin{eqnarray} && \label{cbar} \overline c_i(t):=\dot{\overline x}_i(t) \\ && \label{tildesigma} \tilde\sigma:=\frac{\delta+\sigma}{\beta}, \quad {\mbox{ where }}\beta=W''(0) {\mbox{ was introduced in }} \eqref{beta}, \\ && \label{supsol} \overline v_{\epsilon}(t,x):= \epsilon^{2s}\tilde\sigma(t,x)+ \sum_{i=1}^N\left\lbrace u\left( \frac{x-\overline x_i(t)}{\epsilon}\right)- \epsilon^{2s}\overline c_i(t) \psi\left(\frac{x-\overline x_i(t)}{\epsilon}\right)\right\rbrace, \end{eqnarray} where~$u$ is given in Theorem~\ref{TH-decay} and~$\psi$ in Theorem~\ref{THcorrettore}. We set \begin{equation}\label{utildei} \tilde u_i:=u\left(\frac{x-\overline{x}_i(t)}{\epsilon}\right)- H\left(\frac{x-\overline{x}_i(t)}{\epsilon}\right), \end{equation} where~$H$ is the Heaviside function, $$ \psi_i:=\psi\left(\frac{x-\overline{x}_i(t)}{\epsilon}\right). $$ and \begin{equation}\label{Ieps} I_{\epsilon}:=\epsilon(\overline{v}_\epsilon)_t+\frac{1}{\epsilon^{2s}}\left(W'(\overline{v}_\epsilon)-\epsilon^{2s}L_s\overline{v}_\epsilon-\epsilon^{2s}\sigma\right). \end{equation} With this notation we have that (see Lemma~8.3 in~\cite{DIPPV}), for every~$i_0\in\left\lbrace 1,\ldots,N\right\rbrace$, \begin{equation}\label{10.6} I_{\epsilon}=e_{\epsilon}^{i_0}+(\beta\tilde\sigma-\sigma)+O(\tilde u_{i_0})\biggl(\eta\, \overline c_{i_0}+\tilde\sigma+ \sum_{{1\le i\le N}\atop{i\neq i_0}} \frac{\tilde u_{i}}{\epsilon^{2s}}\biggr), \end{equation} where the error~$e_{\epsilon}^{i_0}$ is given by \begin{equation}\label{ei} e_{\epsilon}^{i_0}:=O(\epsilon^{2s})+\sum_{{1\le i\le N}\atop{i\neq i_0}} O(\psi_i)+\sum_{{1\le i\le N}\atop{i\neq i_0}}O(\tilde u_i)+ \sum_{{1\le i\le N}\atop{i\neq i_0}} O\left(\frac{\tilde u_i^2}{\epsilon^{2s}}\right). \end{equation} Now we can state the following result, which replaces Proposition~8.4 in~\cite{DIPPV}: \begin{prop}\label{y7} There exists $\delta_0>0$ such that, for any $0<\delta\leq\delta_0$ and~$T>0$, we have $$ (\overline v_{\epsilon})_{t}\geq\frac{1}{\epsilon}\biggl(L_s\overline v_{\epsilon}- \frac{1}{\epsilon^{2s}}W'(\overline v_{\epsilon})+\sigma\biggr) \quad {\mbox{ in }} (0,T)\times{\mathds R}, $$ for $\epsilon>0$ sufficiently small. \end{prop} \begin{proof} Recalling the definition of $I_{\epsilon}$ in \eqref{Ieps}, our goal is to show that \begin{equation}\label{GP} I_{\epsilon}\geq0\end{equation} for $\epsilon$ small enough. For this, we make a preliminary observation: recalling the definition of $\tilde u_i$ in~\eqref{utildei} and using Theorem~\ref{TH-decay}, we obtain that, for any~$i\in\{1,\dots,N\}$, \begin{equation}\label{999} \left|\tilde u_i+\frac{\epsilon^{2s}}{2sW''(0)}\frac{x-\overline x_i(t)}{|x-\overline x_i(t)|^{1+2s}}\right| \leq \frac{C\, \epsilon^{\vartheta}}{|x-\overline x_i(t)|^{\vartheta}}. \end{equation} Since~$\vartheta>2s$, we can choose~$\gamma$ such that \begin{equation}\label{scelta} 0<\gamma<\frac{\vartheta-2s}{\vartheta}. \end{equation} Now we divide the proof of~\eqref{GP} by dealing with two separate cases. \noindent \\{\it Case~1:} Suppose that there exists $i_0\in\left\lbrace1,\ldots,N\right\rbrace$ such that \begin{equation}\label{caso1} |x-\overline x_{i_0}(t)|\leq\epsilon^{\gamma}. \end{equation} Therefore, since the~$\overline x_{i}$'s are well-separated, for $\epsilon$ sufficiently small we have that \begin{equation}\label{varteta} |x-\overline x_{i}(t)|\geq\kappa>0, \ {\mbox{ for any }} \ i\ne i_0, \end{equation} where $\kappa$ is a constant independent of $\epsilon$. Hence, thanks to~\eqref{999} and~\eqref{varteta}, $$ \biggl|\sum_{i\neq i_0}\left(\frac{\tilde u_i}{\epsilon^{2s}}+\frac{1}{2sW''(0)}\frac{x-\overline x_i(t)}{|x-\overline x_i(t)|^{1+2s}}\right)\biggr|\leq \frac{C\, \epsilon^{\vartheta}}{\epsilon^{2s}}\sum_{i\neq i_0}\frac{1}{|x-\overline x_i(t)|^{\vartheta}}\leq C\, \epsilon^{\vartheta-2s}. $$ Therefore, from \eqref{10.6}, we deduce that \begin{equation}\begin{split}\label{666} I_{\epsilon}=&\ e^{i_0}_{\epsilon}+\beta\tilde\sigma-\sigma+ O(\tilde u_{i_0})\biggl(\eta\, \overline c_{i_0}+\tilde\sigma +\sum_{i\neq i_0}\frac{\tilde u_{i}}{\epsilon^{2s}}\biggr)\\ =& \ e^{i_0}_{\epsilon}+\beta\tilde\sigma-\sigma +O(\tilde u_{i_0})\biggl(\eta\, \overline c_{i_0} +\tilde\sigma -\frac{1}{2sW''(0)}\sum_{i\neq i_0} \frac{x-\overline x_i(t)}{|x-\overline x_i(t)|^{1+2s}} \biggr) + O(\epsilon^{\vartheta-2s}). \end{split}\end{equation} Now, we Taylor expand the function $\frac{x-\overline x_i(t)}{|x-\overline x_i(t)|^{1+2s}}$ for~$x$ in a neighborhood of the point~$\overline x_{i_0}(t)$, and we use~\eqref{caso1} to get \begin{equation}\begin{split}\label{8.17bis} &\biggl|\sum_{i\neq i_0}\frac{x-\overline x_i(t)}{|x-\overline x_i(t)|^{1+2s}}- \sum_{i\neq i_0}\frac{\overline x_{i_0}(t)-\overline x_i(t)}{|\overline x_{i_0}(t)-\overline x_i(t)|^{1+2s}}\biggr|\\ &\qquad \qquad = \ \biggl|\sum_{i\neq i_0}\left(\frac{1}{|\xi-\overline x_i(t)|^{1+2s}} -(1+2s)\frac{(\xi-\overline x_i(t))^2}{|\xi-\overline x_i(t)|^{3+2s}}\right)(x-\overline x_{i_0}(t))\biggr| \\ & \qquad \qquad \leq\ \sum_{i\neq i_0}\frac{2+2s}{|\xi-\overline x_i(t)|^{1+2s}}\, \epsilon^{\gamma}\\ & \qquad \qquad \leq\ C\, \epsilon^{\gamma}, \end{split}\end{equation} where $\xi$ is a suitable point lying on the segment joining $x$ to $\overline x_{i_0}(t)$ (and hence $|\xi-\overline x_i(t)|\geq\kappa/2$ thanks to~\eqref{caso1}). Therefore, using~\eqref{8.17bis} in~\eqref{666}, we have \begin{equation}\begin{split}\label{777} I_{\epsilon}=&\ e^{i_0}_{\epsilon}+\beta\tilde\sigma-\sigma +O(\tilde u_{i_0})\biggl(\eta\, \overline c_{i_0}+\tilde\sigma -\frac{1}{2sW''(0)}\sum_{i\neq i_0}\frac{\overline x_{i_0}(t)-\overline x_i(t)}{|\overline x_{i_0}(t)-\overline x_i(t)|^{1+2s}} \biggr) \\ &\ + O(\epsilon^{\vartheta-2s}) +O(\epsilon^{\gamma}). \end{split}\end{equation} Now, we compute the term in parenthesis. From the definitions of $\eta$, $\overline c_{i_0}$ and $\tilde\sigma$ given in \eqref{eta2}, \eqref{cbar}, and \eqref{tildesigma} respectively, and recalling~\eqref{gamma}, we obtain \begin{equation}\label{1114}\begin{split} &\eta\, \overline c_{i_0}+\tilde\sigma - \frac{1}{2sW''(0)}\sum_{i\neq i_0}\frac{\overline x_{i_0}(t)- \overline x_i(t)}{|\overline x_{i_0}(t)-\overline x_i(t)|^{1+2s}} \\ =\; & \frac{1}{\gamma\, W''(0)} \dot{\overline x}_{i_0}(t)+ \frac{\delta}{W''(0)}+\frac{\sigma(t,x)}{W''(0)} -\frac{1}{2sW''(0)}\sum_{i\neq i_0}\frac{\overline x_{i_0}(t)-\overline x_i(t)}{|\overline x_{i_0}(t)-\overline x_i(t)|^{1+2s}} \\ = \;& \frac{1}{W''(0)}\biggl(\frac{ \dot{\overline x}_{i_0}(t) }{\gamma}+\delta+\sigma(t,\overline x_{i_0}(t))-\frac{1}{2s}\sum_{i\neq i_0}\frac{\overline x_{i_0}(t)-\overline x_i(t)}{|\overline x_{i_0}(t)-\overline x_i(t)|^{1+2s}}\biggr) \\ &\quad \ +\,\frac{\sigma(t,x)-\sigma(t,\overline x_{i_0}(t))}{W''(0)}. \end{split}\end{equation} Recalling~\eqref{ODEdelta}, we have that $$ \frac{\dot{\overline x}_{i_0}(t)}{\gamma}+\delta+\sigma(t,\overline x_{i_0}(t))-\frac{1}{2s}\sum_{i\neq i_0}\frac{\overline x_{i_0}(t)-\overline x_i(t)}{|\overline x_{i_0}(t)-\overline x_i(t)|^{1+2s}}=0, $$ and so the term in parenthesis in~\eqref{1114} vanishes. Therefore~\eqref{1114} becomes \begin{eqnarray*} \eta\, \overline c_{i_0}+\tilde\sigma - \frac{1}{2sW''(0)}\sum_{i\neq i_0}\frac{\overline x_{i_0}(t)- \overline x_i(t)}{|\overline x_{i_0}(t)-\overline x_i(t)|^{1+2s}} & = & \frac{\sigma(t,x)-\sigma(t,\overline x_{i_0}(t))}{W''(0)}\\ & = & O(x-\overline x_{i_0}(t))\\ &= & O(\epsilon^{\gamma}), \end{eqnarray*} thanks to~\eqref{sigma} and~\eqref{caso1}. Hence~\eqref{777} reads \begin{equation}\label{888} I_{\epsilon}= e^{i_0}_{\epsilon}+\beta\tilde\sigma-\sigma + O(\epsilon^{\gamma})+ O(\epsilon^{\vartheta-2s}) +O(\epsilon^{\gamma}). \end{equation} Also, in the light of \eqref{tildesigma}, we see that \begin{equation}\label{8.43bis} \beta\tilde\sigma-\sigma=\delta>0.\end{equation} Now, we claim that \begin{equation}\label{err_zero} {\mbox{ the error~$e^{i_0}_{\epsilon}$ (that was defined in~\eqref{ei}) tends to zero as $\epsilon\rightarrow0$.}} \end{equation} For this, we notice that~$\psi_i=\psi\left(\frac{x-\overline x_i(t)}{\epsilon}\right)$, with~$i\ne i_0$, tends to zero because of the behavior of the corrector at infinity (recall~\eqref{unif} and~\eqref{varteta}). Moreover, thanks to~\eqref{phi} and~\eqref{varteta} we have that, for~$i\ne i_0$, $$ \tilde u_i=u\left(\frac{x-\overline x_i(t)}{\epsilon}\right)-H\left(\frac{x-\overline x_i(t)}{\epsilon}\right)= O\left(\frac{\epsilon^{2s}}{|x-\overline x_i(t)|^{2s}}\right)=O(\epsilon^{2s}) $$ and $$ \frac{(\tilde u_i)^2}{\epsilon^{2s}} =\frac{O(\epsilon^{4s})}{\epsilon^{2s}}=O(\epsilon^{2s}), $$ thus proving~\eqref{err_zero}. Hence, from~\eqref{888}, \eqref{8.43bis} and~\eqref{err_zero} we obtain that for $\epsilon$ sufficiently small $$ I_{\epsilon}\geq\frac{\delta}{2}>0, $$ which implies~\eqref{GP} in this case. \noindent \\ {\it Case~2:} Suppose that $|x-\overline x_i(t)|>\epsilon^{\gamma}$ for every $i\in\left\lbrace1,\ldots,N\right\rbrace$. In this case, we can fix~$i_0$ arbitrarily, say~$i_0:=1$ for concreteness. We use~\eqref{999} to obtain \begin{eqnarray*} \biggl|\sum_{i\neq i_0}\left(\frac{\tilde u_i}{\epsilon^{2s}}+\frac{1}{2sW''(0)}\frac{x-\overline x_i(t)}{|x-\overline x_i(t)|^{1+2s}}\right)\biggr| &\leq& \frac{C\, \epsilon^{\vartheta}}{\epsilon^{2s}}\sum_{i\neq i_0}\frac{1}{|x-\overline x_i(t)|^{\vartheta}}\\ &\leq& C\frac{\epsilon^{\vartheta-2s}}{\epsilon^{\gamma\vartheta}} = C\, \epsilon^{\vartheta-2s-\gamma\vartheta}. \end{eqnarray*} Therefore, by formula~\eqref{10.6} and the definition of $\tilde\sigma$ in \eqref{tildesigma} we have \begin{equation}\label{1140} I_{\epsilon}= e^{i_0}_{\epsilon}+\delta+O(\tilde u_{i_0}) \biggl(\eta\, \overline c_{i_0}+ \tilde\sigma -\frac{1}{2sW''(0)}\sum_{i\neq i_0} \frac{x-\overline x_i(t)}{|x-\overline x_i(t)|^{1+2s}} \biggr)+ O(\epsilon^{\vartheta-2s-\gamma\vartheta}). \end{equation} Now we observe that, for any~$i\ne i_0$, \begin{equation}\label{1139} \left| \frac{x-\overline x_i(t)}{|x-\overline x_i(t)|^{1+2s}}\right| \le \frac{1}{|x-\overline x_i(t)|^{2s}}\le \frac{1}{\epsilon^{2\gamma s}}=O(\epsilon^{-2\gamma s}).\end{equation} Notice that this term is divergent as $\epsilon$ tends to zero. Therefore, from~\eqref{1139} we conclude that $$ \eta\, \overline c_{i_0}+ \tilde\sigma -\frac{1}{2sW''(0)}\sum_{i\neq i_0} \frac{x-\overline x_i(t)}{|x-\overline x_i(t)|^{1+2s}}= O(\epsilon^{-2\gamma s}),$$ since the other terms are bounded. By plugging this into~\eqref{1140} we obtain \begin{equation}\label{1141} I_{\epsilon}= e^{i_0}_{\epsilon}+\delta+O(\tilde u_{i_0})\cdot O(\epsilon^{-2\gamma s}) +O(\epsilon^{\vartheta-2s-\gamma\vartheta}). \end{equation} Now we observe that for every $i\in\left\lbrace1,\ldots,N\right\rbrace$, \begin{equation}\label{9112}\begin{split} \tilde u_{i}&=u\left(\frac{x-\overline x_i(t)}{\epsilon}\right)\!-H\left(\frac{x-\overline x_i(t)}{\epsilon}\right)\\ &=O\left(\frac{\epsilon^{2s}}{|x-\overline x_i(t)|^{2s}}\right) =O\left(\frac{\epsilon^{2s}}{ \epsilon^{2\gamma s}}\right) =O\left(\epsilon^{2s(1-\gamma)}\right). \end{split}\end{equation} As a consequence \begin{equation}\label{9113} \frac{(\tilde u_i)^2}{\epsilon^{2s}}=O\left(\epsilon^{2s(1-2\gamma)}\right) \quad {\mbox{ and }} \quad O(\tilde u_{i_0})\cdot O(\epsilon^{-2\gamma s})=O\left(\epsilon^{2s(1-2\gamma)}\right). \end{equation} We observe that, since~$\vartheta\le 4s$ (see \eqref{SGAMMA} and recall that $\alpha=4s$), from \eqref{scelta} we have \begin{equation}\label{maggiore2} 1-2\gamma>1-\frac{2(\vartheta-2s)}{\vartheta}= \frac{4s-\vartheta}{\vartheta}\ge 0. \end{equation} Also, notice that, thanks again to~\eqref{scelta}, \begin{equation}\label{maggiore3} \vartheta-2s-\gamma\vartheta>0. \end{equation} By inserting~\eqref{9113} into~\eqref{1141} and recalling~\eqref{maggiore2} and~\eqref{maggiore3} we get \begin{equation}\label{1146} I_{\epsilon}= e^{i_0}_{\epsilon}+\delta+O(\epsilon^{\alpha}), \end{equation} for some~$\alpha>0$. Now we check that \begin{equation}\label{9114} {\mbox{the error term $e^{i_0}_{\epsilon}$ tends to zero as $\epsilon\rightarrow0$.}}\end{equation} For this, we remark that, in this case, $$ \frac{|x-\overline x_i(t)|}{\epsilon}\ge \frac{\epsilon^{\gamma}}{\epsilon}=\epsilon^{\gamma-1},$$ which diverges for small~$\epsilon$, since~$\gamma<1$. Therefore, for $x$ fixed as in the assumption of Case~2, we have that $$ \psi_i(x)=\psi\left( \frac{x-\overline x_i(t)}{\epsilon}\right)\longrightarrow 0$$ as~$\epsilon\rightarrow 0$, due to the infinitesimal behavior of~$\psi$ at infinity (see~\eqref{unif}). Using this, \eqref{9112}, \eqref{9113} and the definition of the error term given in~\eqref{ei}, we obtain~\eqref{9114}. Hence, by using~\eqref{9114} inside~\eqref{1146} and recalling that~$\delta>0$, we conclude that $$ I_{\epsilon}\geq\frac{\delta}{2}>0$$ for $\epsilon$ sufficiently smooth, thus proving~\eqref{GP} in this case too. \end{proof}
{ "redpajama_set_name": "RedPajamaArXiv" }
2,167
\section{Introduction} Recommender systems have been playing a critical role in the realms of retail, social networking, and entertainment industries. Providing personalized recommendations is an important commercial strategy for online websites and mobile applications. There are two major recommendation tasks: rating prediction and personalized ranking. The former usually needs explicit ratings(e.g., 1-5 stars) while the latter aims to generate a ranked list of items in descending order based on the estimated preferences for each user. In many real world scenarios where only implicit feedback is available, personalized ranking is a more appropriate and popular choice ~\cite{rendle2009bpr}. Collaborative filtering (CF) is a \textit{de facto} approach which has been widely used in many real-world recommender systems~\cite{ricci2015recommender}. CF assumes that user-item interactions can be modelled by inner product of user and item latent factors in a low-dimensional space. An effective and widely adopted ranking model based on CF is Bayesian Personalized Ranking (BPR)~\cite{rendle2009bpr} which optimizes the ranking lists with a personalized pairwise loss. Another state-of-the-art model is sparse linear method (SLIM)~\cite{6137254} which recommends top-$n$ items via sparse linear regression. While BPR and SLIM have been shown to perform well on ranking task, we argue that they are hindered by a critical limitation: both of them are built on the assumption that there exists a linear relationship between users and items, while the relationship shall be more complex in real-life scenarios. In recent years, researchers have demonstrated the efficacy of deep neural model for recommendation problems~\ \cite{zhang2017deep,Karatzoglou:2017:DLR:3109859.3109933}. Deep neural network can be integrated into classic recommendation models such as collaborative filtering~\cite{he2017neural,Tay:2018:LRM:3178876.3186154} and content based approaches~\cite{cheng2016wide,DBLP:journals/corr/abs-1801-09251} to enhance their performances. Many deep neural techniques such as multi-layered perceptron (MLP), autoencoder (AE), recurrent neural network (RNN) and convolutional neural network (CNN) can be applied to recommendation models. AE is usually used to incorporate side information of users/items. For example, ~\cite{wang2015collaborative} and ~\cite{Zhang:2017:AEH:3077136.3080689} proposed integrated models by combining latent factor model (LFM) with different variants of autoencoder; AE can also be adopted to reconstruct the rating matrix directly ~\cite{sedhain2015autorec}. CNN is mainly used to extract features from textual~\cite{kim2016convolutional,zheng2017joint}, audio~\cite{van2013deep} or visual~\cite{he2016vbpr} content. RNN can be used to model the sequential patterns of rating data or session-based recommendation~\cite{hidasi2015session}. For example, ~\cite{Wu:2017:RRN:3018661.3018689} designed a recurrent neural network based rating prediction model to capture the temporal dynamics of rating data; ~\cite{hidasi2015session} proposed using RNN to capture the interconnections between sessions. Some works attempted to generalize traditional recommendation models into neural versions. For example, ~\cite{he2017neural,He:2017:NFM:3077136.3080777} designed the neural translations of LFM and factorization machine to model user-item interactions; ~\cite{ijcai2017-447} proposed a deep matrix factorization model to anticipate user's preferences from historical explicit feedback. Most previous works focused upon either explicit feedback (rating prediction task) or representation learning from abundant auxiliary information instead of interpreting user-item relationships in depth. In this work, we aim to model the user-item intricate relationships from implicit feedback, instead of explicit ratings, by applying multi-layered nonlinear transformations. The main contributions are as follows: \begin{itemize} \item We propose two recommendation models with deep neural networks, user-based NeuRec (U-NeuRec) and item-based NeuRec (I-NeuRec), for personalized ranking task. We present an elegant integration of LFM and neural networks which can capture both the linearity and non-linearity in real-life datasets. \item With deep neural networks, we managed to reduce the number of parameters of existing advanced models while achieving superior performances. \end{itemize} \section{Preliminaries} To make this paper self-contained, we first define the research problem and introduce two highly relevant previous works. \subsection{Problem Statement} Let $M$ and $N$ denote the total number of users and items in a recommender system, so we have a $M \times N$ interaction matrix $X \in \mathcal{R}^{M \times N}$. We use low-case letter $u \in \{1,...,M\}$ and $i \in \{1,...,N\}$ to denote user $u$ and item $i$ respectively, and $X_{ui}$ represents the preference of user $u$ to item $i$. In our work, we will use two important vectors: $X_{u*}$ and $X_{*i}$. $X_{u*} = \{X_{u1},...,X_{uN}\}$ denotes user $u$'s preferences toward all items; $X_{*i} = \{X_{1i},...,X_{Mi}\}$ means the preferences for item $i$ received from all users in the system. We will focus on recommendation with implicit feedback here. Implicit feedback such as, click, browse and purchase is widely accessible and easy to collect. We set $X_{ui}$ to $1$ if the interaction between user $u$ and item $i$ exists, otherwise, $X_{ui}=0$. Here, $0$ does not necessarily mean user $u$ dislikes item $i$, it may also mean that the user does not realize the existence of item $i$. \subsection{Latent Factor Model} Latent factor model (LFM) is an effective methodology for model-based collaborative filtering. It assumes that the user-item affinity can be derived from low-dimensional representations of users and items. Latent factor method has been widely studied and many variants have been developed~\cite{Koren:2009:MFT:1608565.1608614,koren2008factorization,Zhang:2017:AEH:3077136.3080689,Salakhutdinov:2007:PMF:2981562.2981720}. One of the most successful realizations of LFM is matrix factorization. It factorizes the interaction matrix into two low-rank matrices with the same latent space of dimensionality $k$ ($k$ is much smaller than $M$ and $N$), such that user-item interactions are approximated as inner product in that space \begin{equation} X_{ui} = U_u \cdot V_i \end{equation} where $U \in \mathcal{R}^{M \times k}$ is the user latent factor and $V \in \mathcal{R}^{N \times k}$ is the item latent factor. With this low rank approximation, it compresses the original matrix down to two smaller matrices. \subsection{Sparse Linear Method} SLIM~\cite{6137254} is a sparse linear model for top-$n$ recommendation. It aims to learn a sparse aggregation coefficient matrix $S \in \mathcal{R}^{N \times N}$. $S$ is reminiscent of the similarity matrix in item-based neighbourhood CF (itemCF)~\cite{1167344}, but SLIM learns the similarity matrix as a least squares problem rather than determines it with predefined similarity metrics (e.g., cosine, Jaccard etc.). It finds the optimal coefficient matrix $S$ by solving the following optimization problem \begin{equation*} \begin{aligned} \underset{S}{min}\parallel X - XS\parallel_F^2 + \lambda \parallel S \parallel_F^2 + \mu \parallel S\parallel_1 \\ s.t. S\geq 0, diag(S) = 0 \end{aligned} \label{slim} \end{equation*} The constraints are intended to avoid trivial solutions and ensure positive similarities. The $\ell_1$ norm is adopted to introduce sparsity to matrix $S$. SLIM can be considered as a special case of LFM with $X \Leftrightarrow U$ and $S \Leftrightarrow V$. SLIM is demonstrated to outperform numerous models in terms of top-$n$ recommendation. Nevertheless, we argue that it has two main drawbacks: (1) From the definition, the size of $S$ is far larger than the two latent factor models, that is, $N \times N \gg (N \times k + M \times k )$, which also results in higher model complexity. Even though it can be improved via feature selection by first learning an itemCF model, this sacrifices model generalization as it heavily relies on other pre-trained recommendation models; (2) SLIM assumes that there exists strong linear relationship between interaction matrix and $S$. However, this assumption does not necessarily holds. Intuitively, the relationship shall be far more complex in real world applications due to the dynamicity of user preferences and item changes. In this work, we aim to address these two problems. Inspired by LFM and recent advances of deep neural network on recommendation tasks, we propose employing a deep neural network to tackle the above disadvantages by introducing non-linearity to top-$n$ recommendations. \section{Proposed Methodology} In this section, we present a novel nonlinear model based on neural network for top-$n$ recommendation and denote it by \textbf{NeuRec}. Unlike SLIM which directly applies linear mapping on the interaction matrix $X$, NeuRec first maps $X$ into a low-dimensional space with multi-layer neural networks. This transformation not only reduces the parameter size, but also incorporates non-linearity to the recommendation model. Then the user-item interaction is modeled by inner product in the low-dimensional space. Based on this approach, we further devise two variants, namely, U-NeuRec and I-NeuRec. \subsection{User-based NeuRec} For user-based NeuRec, we first get the high-level dense representations from the rows of $X$ with feed-forward neural networks. Note that $X$ is constructed with training data, so there are no leakages of test data in this model. Let $W_j$ and $b_j$, $j=\{1,...,L\}$ ($L$ is the number of layers) denote the weights and biases of layer $j$. For each user, we have \begin{equation*} \begin{aligned} h_1(X_{u*}) &= f(W_1 X_{u*} + b_1) \\ h_j(X_{u*}) &= f(W_j h_{j-1} + b_j) \\ h_L(X_{u*}) &= f(W_L h_{L-1} + b_L) \end{aligned} \end{equation*} where $f(\cdot)$ is a non-linear activation function such as $sigmoid$, $tanh$ or $relu$. The dimension of output $h_L(X_{u*})$ is usually much smaller than original input $X_{u*}$. Suppose the output dimension is $k$ (we reuse the latent factor size $k$ here), we have an output $h_L(X_{u*}) \in \mathcal{R}^{k}$ for each user. Same as latent factor models, we define an item latent factor $Q_i \in \mathcal{R}^{k}$ for each item, and consider $h_L(X_{u*})$ as user latent factor. The recommendation score is computed by the inner product of these two latent factors \begin{equation} \hat{X_{ui}} = h_L(X_{u*}) \cdot Q_i \label{unfm} \end{equation} To train this model, we minimize the regularized squared error in the following form \begin{equation} \underset{W*,Q*, b*}{min}\sum_{ u, i }(X_{ui} -\hat{X_{ui}})^2 + \lambda ( \parallel W \parallel_F^2 + \parallel Q \parallel_F^2) \label{opt} \end{equation} Here, $\lambda$ is the regularization rate. We adopt the Frobenius norm to regularize weight $W$ and item latent factor $Q$. Since parameter $Q$ is no longer a similarity matrix but latent factors in a low-dimensional space, the constraints in SLIM and $\ell_1$ norm can be relaxed. For optimization, we apply the Adam algorithm~\cite{kingma2014adam} to solve this objective function. Figure \ref{nlrec}(left) illustrates the architecture of U-NeuRec. \begin{figure*} \centering \includegraphics[width=0.85\textwidth]{neurec.png} \caption{Illustration of User-based NeuRec (left) and item-based NeuRec(right). Both of them has two parts: a multi-layer perceptron with $X_{u*}$ (or $X_{*i}$) as input and item (or user) latent factor.} \label{nlrec} \vspace{-4mm} \end{figure*} \subsection{Item-based NeuRec} Likewise, we use the column of $X$ as input and learn a dense representation for each item with a multi-layered neural network \begin{align} h_1(X_{*i}) &= f(W_1 X_{*i} + b_1) \\ h_j(X_{*i}) &= f(W_j h_{j-1} + b_j) \\ h_L(X_{*i}) &= f(W_L h_{L-1} + b_L) \end{align} Let $P_u$ denote the user latent factor for user $u$, then the preference score of user $u$ to item $i$ is computed by \begin{equation} \hat{X_{ui}} = P_u \cdot h_L(X_{*i}) \label{infm} \end{equation} We also employ a regularized squared error as the training loss. Thus, the objective function of item-based NeuRec is formulated as \begin{equation} \underset{W*,P*, b*}{min}\sum_{ u, i }(X_{ui} -\hat{X_{ui}})^2 + \lambda ( \parallel W \parallel_F^2 + \parallel P \parallel_F^2) \end{equation} the optimal parameters can also be learned with Adam Optimizer as well. The architecture of I-NeuRec is illustrated in Figure \ref{nlrec}(right). \subsection{Dropout Regularization} Dropout~\cite{srivastava2014dropout} is an effective regularization technique for neural networks. It can reduce the co-adaptation between neurons by randomly dropping some neurons during training. Unlike traditional dropout which is usually applied on hidden layers, here, we propose applying the dropout operation on the input layer $X_{u*}$ or $X_{i*}$ (We found that the improvement of applying the dropout on hidden layers is subtle in our case). By randomly dropping some historical interactions, we could prevent the model from learning the identity function and increase the robustness of NeuRec. \subsection{Relation to LFM and SLIM} In this section, we shed some light on the relationships between NeuRec and LFM / SLIM. NeuRec can be regarded as a neural integration of LFM and sparse linear model. NeuRec utilizes the concepts of latent factor in LFM. The major difference is that either item or user latent factors of NeuRec are learned from the rating matrix with deep neural network. In addition, NeuRec also manages to capture both negative and positive feedback in an integrated manner with rows or columns of $X$ as inputs. To be more precise, U-NeuRec is a neural extension of SLIM. If we set $f$ to identity function and enforce $W$ to be a uniform vector of 1 and omit the biases, we have $h_L(X_{u*})^T \Leftrightarrow X_{u*}$. Hence, U-NeuRec will degrade to a SLIM with $S \Leftrightarrow Q$. Note that the sparsity and non-negativity constraints are dropped. I-NeuRec has no direct relationship with SLIM. Nonetheless, it can be viewed as a symmetry version of U-NeuRec. Since the objective functions of NeuRec and SLIM are similar, the complexities of these two models are linear to the size of the interaction matrix. Yet, NeuRec has less model parameters. \subsection{Pairwise Learning Approach} NeuRec can be boiled down to a pairwise training scheme with Bayesian log loss. \begin{equation} \begin{multlined} \underset{\Theta}{min} \sum_{(u,i^+, i^-)} - log(\sigma(\hat{X}_{ui^+} - \hat{X}_{ui^-})) + \Omega(\Theta) \label{pairwise} \end{multlined} \end{equation} Where $\Theta$ is the model parameters, $\Theta = \{W*,Q*, b*\}$ for U-NeuRec, and $\Theta = \{W*, P*, b*\}$ for I-NeuRec; $\Omega$ is Frobenius regularization; $i^+$ and $i^-$ represent observed and unobserved items respectively. The above pairwise method is intended to maximize the difference between positive items and negative items. However, previous studies have shown that optimizing these pairwise loss does not necessarily lead to best ranking performance~\cite{Zhang:2013:OTC:2484028.2484126}. To overcome this issue, we adopt a non-uniform sampling strategy: in each epoch, we randomly sampled $t$ items from negative samples for each user, calculate their ranking score and then treat the item with the highest rank as the negative sample. The intuition behind this algorithm is that we shall rank all positives samples higher than negatives samples. \section{Experiments} In this section, we conduct experiments on four real-world datasets and analyze the impact of hyper-parameters. \subsection{Experimental Setup} \subsubsection{Datasets Description} We conduct experiments on four real-world datasets: Movielens HetRec, Movielens 1M, FilmTrust and Frappe. The two Movielens datasets\footnote{https://grouplens.org/datasets/movielens/} are collected by GroupLens research\cite{Harper:2015:MDH:2866565.2827872}. Movielens HetRec is released in HetRec 2011\footnote{http://recsys.acm.org/2011 }. It consists of $855598$ interactions from $10109$ movies and $2113$ users. They are widely used as benchmark datasets for evaluating the performance of recommender algorithms. FilmTrust is crawled from a movie sharing and rating website by Guo et al.~\cite{guo2013novel}. Frappe~\cite{baltrunas2015frappe} is an Android application recommendation dataset which contains around a hundred thousand records from $957$ users on over four thousand mobile applications. The interactions of these four datasets are binarized with the approach introduced in Section 2.1. \subsubsection{Evaluation Metrics} To appropriately evaluate the overall performance for ranking task, the evaluation metrics include Precision and Recall with different cut-off value (e.g., P@5, P@10, R@5 and R@10), Mean Average Precision (MAP), Mean Reciprocal Rank (MRR) and Normalized Discounted Cumulative Gain (DNCG). These metrics are used to evaluate the quality of recommendation lists regarding different aspects~\cite{liu2009learning,shani2011evaluating}: Precision, Recall and MAP are used to evaluate the recommendation accuracy, as they only consider the hit numbers and ignore the rank positions; MRR and DNCG are two rank-aware measures with which higher ranked positive items are prioritized, thus they are more suitable for assessing the quality of ranked lists. We omit the details for brevity. \subsection{Implementation Details} We implemented our proposed model based on Tensorflow\footnote{https://www.tensorflow.org/} and tested it on a NVIDIA TITAN X Pascal GPU. All models are learned with min-batch Adam. We do grid search to determine the hyper-parameters. For all the datasets, we implement a five hidden layers neural network with constant structure for the neural network part of NeuRec and use \textit{sigmoid} as the activation function. For ML-HetRec, we set the neuron number of each layer to $300$, latent factor dimension $k$ to $50$ and dropout rate to $0.03$; For ML-1M, neuron number is set to $300$, $k$ is set to $50$, and dropout rate is set to $0.03$. The neuron size for FilmTrust is set to $150$ and $k$ is set to $40$. We do not use dropout for this dataset; For Frappe, neuron size is set to $300$, $k$ is set to $50$ and dropout rate is set to $0.03$. We set the learning rate to $1e-4$ for ML-HetRec, ML-1M and Frappe. The learning rate for FilmTrust is $5e-5$. For ML-HetRec, ML-1M and FilmTrust, we set the regularization rate to $0.1$, and that for Frappe is set to $0.01$. For simplicity, we adopt the same parameter setting for pairwise training method. We use 80\% user-item pairs as training data and hold out 20\% as the test set, and estimate the performance based on five random train-test splits. \subsection{Results and Discussions} Since NeuRec is designed to overcome the drawbacks of LFM and \textbf{SLIM}, so they are two strong baselines for comparison to demonstrate if our methods can overcome their disadvantages. Specifically, we choose \textbf{BPRMF}~\cite{rendle2009bpr}, a personalized ranking algorithm based on matrix factorization, as the representative of latent factor model. Similar to~\cite{6137254}, we adopt neighbourhood approach to accelerate the training process of SLIM. For fair comparison, we also report the results of \textbf{mostPOP} and two neural network based models: \textbf{GMF} and \textbf{NeuMF}~\cite{he2017neural}, and follow the configuration proposed in ~\cite{he2017neural}. The recent work DMF~\cite{ijcai2017-447} is tailored for explicit datasets and not suitable for recommendations on implicit feedback, so it is unfair to compare our method with it. \subsubsection{Parameter Size} The parameter size of SLIM is $N \times N$, while I-NeuRec has $S_{nn} + k \times M$ parameters and U-NeuRec has $S_{nn} + k \times N$. $S_{nn}$ is the size of the neural network. Usually, our model can reduce the number of parameters largely (up to 10 times). \subsubsection{Overall Comparisons} \begin{table*}[t!] \centering \label{result} \begin{tabular}{l|cccccc} \toprule \midrule \multicolumn{7}{c}{\textbf{MOVIELENS HetRec}} \\ \midrule Methods& Precision@5 & Precision@10 & Recall@5 & Recall@10 & MAP & MRR \\ \midrule mostPOP & $0.455 \pm 0.002$ & $0.403 \pm 0.003$ & $0.042 \pm 0.001$ & $ 0.070 \pm 0.001$ & $0.181 \pm 0.001$ & $0.651 \pm 0.004$\\ BPRMF & $0.537 \pm 0.002$ & $0.486 \pm 0.001$& $0.052 \pm 0.001$ & $0.090 \pm 0.001 $ &$0.246 \pm 0.001$ & $0.713 \pm 0.001$ \\ GMF & $0.540\pm0.002 $& $ 0.487\pm 0.001 $ &$ 0.053 \pm 0.001$ &$ 0.090\pm0.001 $ & $0.248 \pm 0.001$& $ 0.719\pm 0.005$ \\ SLIM & $0.528 \pm 0.002 $& $0.465 \pm 0.002$ &$0.055 \pm 0.001$ &$0.090 \pm 0.001$ & $ 0.227 \pm 0.001$& $0.755 \pm 0.001$ \\ NeuMF & $0.535\pm0.006 $& $ 0.485\pm 0.004 $ &$ 0.053 \pm 0.001$ &$ 0.091\pm0.001 $ & $0.248 \pm 0.002$& $ 0.722\pm 0.006$ \\ \midrule I-NeuRec & $\textbf{0.603} \pm \textbf{0.004}$ & $\textbf{0.542} \pm \textbf{0.003}$ & $\textbf{0.060} \pm \textbf{0.001}$ & $\textbf{0.101} \pm \textbf{0.001}$ & $\textbf{0.278} \pm \textbf{0.002}$ &$\textbf{0.772} \pm \textbf{0.006}$ \\ U-NeuRec & $\underline{0.601}\pm 0.004 $& $\underline{0.538}\pm 0.004 $& $\underline{0.059}\pm 0.001 $ & $\underline{0.098}\pm 0.002 $ &$\underline{0.271}\pm 0.002$ & $\underline{0.768}\pm 0.003$ \\ \midrule \multicolumn{7}{c}{\textbf{MOVIELENS 1M}} \\ \midrule Methods& Precision@5 & Precision@10 & Recall@5 & Recall@10 & MAP & MRR \\ \midrule mostPOP& $0.210 \pm 0.001$& $0.182 \pm 0.002$ & $0.041 \pm 0.001$ & $0.066 \pm 0.001 $ & $0.102 \pm 0.001 $& $0.392 \pm 0.004 $\\ BPRMF & $0.354 \pm 0.003 $& $0.307 \pm 0.001$& $0.078 \pm 0.001 $ & $0.130 \pm 0.001$ & $0.199 \pm 0.001$ & $0.572 \pm 0.003 $ \\ GMF & $0.367\pm0.001 $& $ 0.316\pm 0.001 $ &$ 0.081 \pm 0.001$ &$ 0.134\pm0.001 $ & $0.201 \pm 0.001$& $ 0.589\pm 0.006$ \\ SLIM &$0.340 \pm 0.004 $ & $0.291 \pm 0.002$& $0.091 \pm 0.001$ & $0.148 \pm 0.001$ & $0.198 \pm 0.001$ & $0.585 \pm 0.003$ \\ NeuMF & $0.367\pm 0.004 $& $ 0.319\pm 0.002$ &$ 0.081 \pm0.002 $ &$ 0.135\pm 0.002$ & $ 0.208 \pm 0.002 $& $ 0.586\pm0.002 $ \\ \midrule I-NeuRec &$\underline{0.414} \pm 0.001$ & $\underline{0.359} \pm 0.001$ & $\underline{0.100} \pm 0.001$ & $\underline{0.161} \pm 0.001$& $\underline{0.242} \pm 0.001$& $\underline{0.636} \pm 0.003$ \\ U-NeuRec & $\textbf{0.419} \pm \textbf{0.002}$ & $\textbf{0.362} \pm \textbf{0.003}$ & $\textbf{0.103} \pm\textbf{0.001}$ & $\textbf{0.165} \pm \textbf{0.002}$ & $\textbf{0.245} \pm \textbf{0.002}$& $\textbf{0.650} \pm \textbf{0.003}$ \\ \midrule \multicolumn{7}{c}{\textbf{FILMTRUST}} \\ \midrule Methods& Precision@5 & Precision@10 & Recall@5 & Recall@10 & MAP & MRR \\ \midrule mostPOP & $0.418 \pm 0.004$ & $0.350 \pm 0.002 $ & $0.397 \pm 0.008$& $\underline{0.631} \pm 0.004 $ & $0.489 \pm 0.002$ & $ 0.618 \pm 0.004$\\ BPRMF & $0.412 \pm 0.005$& $0.347 \pm 0.000$ & $0.391 \pm 0.009 $& $0.613 \pm 0.007$& $0.476 \pm 0.004$&$0.600 \pm 0.007 $ \\ GMF & $0.393\pm0.004 $& $ 0.342\pm 0.003 $ &$ 0.393 \pm 0.004$ &$ 0.608\pm0.002 $ & $0.481 \pm 0.004$& $ 0.613\pm 0.008$ \\ SLIM & $ \underline{0.431} \pm 0.002$ & $\underline{0.352} \pm 0.002$ & $\underline{0.422} \pm 0.005$ & $0.625 \pm 0.003$ & $\underline{0.507} \pm 0.003$ & $\underline{0.647} \pm 0.002$ \\ NeuMF & $0.413\pm 0.003 $& $ 0.350\pm0.003$ &$ 0.392\pm 0.002$ &$ 0.626 \pm 0.007$ & $ 0.483\pm0.001$& $ 0.609\pm0.005 $ \\ \midrule I-NeuRec & $ 0.421\pm0.005 $ & $ 0.347\pm0.002 $ & $0.405 \pm 0.011$ &$ 0.619\pm0.005 $ & $ 0.491\pm0.008 $ & $ 0.621\pm0.012 $ \\ U-NeuRec & $\textbf{0.441} \pm \textbf{0.003}$ & $\textbf{0.358} \pm \textbf{0.002}$ & $\textbf{0.446} \pm \textbf{0.004}$& $\textbf{0.654} \pm \textbf{0.007} $ & $\textbf{0.530} \pm \textbf{0.006}$ & $\textbf{0.667} \pm \textbf{0.008}$ \\ \midrule \multicolumn{7}{c}{\textbf{FRAPPE}} \\ \midrule Methods& Precision@5 & Precision@10 & Recall@5 & Recall@10 & MAP & MRR \\ \midrule mostPOP &$0.034 \pm 0.001$ & $0.026 \pm 0.001$ &$0.054 \pm 0.001$& $0.075 \pm 0.001$ & $0.041 \pm 0.002$& $0.115 \pm 0.001$ \\ BPRMF & $0.055 \pm 0.003$ & $0.052 \pm 0.003$ & $0.059 \pm 0.002$ & $0.095 \pm 0.005$& $0.052 \pm 0.002$ & $0.134 \pm 0.005$ \\ GMF & $0.055 \pm 0.004$ & $0.043 \pm 0.002 $ & $0.066 \pm 0.005$& $0.095 \pm 0.006$& $0.094 \pm 0.001$&$0.151 \pm 0.001$ \\ SLIM & $0.089 \pm 0.003$ & $0.064 \pm 0.001$ & $0.065 \pm 0.003$ & $0.092 \pm 0.003$& $\underline{0.108} \pm 0.003$ & $\underline{0.195} \pm 0.003$ \\ NeuMF & $0.072\pm 0.002 $& $ 0.056\pm0.002 $ &$ \underline{0.076}\pm 0.002$ &$ \textbf{0.105}\pm\textbf{0.004}$ & $ 0.104\pm 0.002$& $ 0.174\pm 0.004$ \\ \midrule I-NeuRec & $\textbf{0.106} \pm \textbf{0.003}$ & $\textbf{0.075} \pm \textbf{0.001}$ & $\textbf{0.078} \pm \textbf{0.003}$& $\underline{0.102} \pm 0.005 $ & $\textbf{0.125} \pm \textbf{0.004}$ & $\textbf{0.211} \pm \textbf{0.006}$ \\ U-NeuRec & $\underline{0.093}\pm 0.006 $ & $\underline{0.068}\pm 0.003 $ & $0.067\pm 0.007 $ & $0.094\pm 0.006 $ & $0.107 \pm 0.004$ & $0.185\pm 0.002$ \\ \midrule \bottomrule \end{tabular} \caption{Precision@5, Precision@10, Recall@5, Recall@10, MAP and MRR comparisons on Movielens HetRec, Movielens 1M, FilmTrust, Frappe. Best performance is in boldface and second best is underlined. I-NeuRec and U-NeuRec are models proposed by us.} \vspace{-3mm} \label{resultsss} \end{table*} Table \ref{resultsss} and Figure 2 summarize the overall performance of baselines and NeuRec. From the comparison, we can observe that our methods constantly achieve the best performances on these four datasets not only in terms of prediction accuracy but also ranking quality. Higher MRR and NDCG mean that our models can effectively rank the items user preferred in top positions. Performance gains of NeuRec over the best baseline are: Movielens HetRec ($8.61\%$), Movielens 1M ($12.29\%$), FilmTrust ($3.43\%$), Frappe ($8.93\%$). The results of I-NeuRec and U-NeuRec are very close and better than competing baselines. The subtle difference between U-NeuRec and I-NeuRec might be due to the distribution differences of user historical interactions and item historical interactions (or the number of users and items). We found that the improvement of NeuMF over GMF are not significant, which might be due to the overfitting caused by the use of dual embedding spaces~\cite{Tay:2018:LRM:3178876.3186154}. Although the improvements of pairwise based U-NeuRec and I-NeuRec are subtle (in Tables 2 and 3), they are still worth being investigated. From the results, we observe that U-NeuRec is more suitable for pairwise training. In U-NeuRec, positive item and negative item are represented by two independent vectors $Q_{i^+}$ and $Q_{i^-}$, while in I-NeuRec, they need to share the same network with input $X_{*i^+}$ or $X_{*i^-}$. Therefore, the negative and positive samples will undesirably influence each other. \begin{table}[h] \centering \label{my-label} \begin{tabular}{c|cccc} \toprule & \multicolumn{1}{c}{ML HetRec} & \multicolumn{1}{c}{ML 1M} & \multicolumn{1}{c}{FilmTrust} & \multicolumn{1}{c}{FRAPPE} \\ \midrule P@5 & 0.521 & 0.347 & 0.418 & 0.038\\ P@10 & 0.473 & 0.303 & 0.349 & 0.032 \\ R@5 & 0.047 & 0.077 & 0.402 & 0.054 \\ R@10 & 0.082 & 0.128 & 0.630 & 0.086 \\ MAP &0.227 & 0.194 &0.492 & 0.076\\ MRR & 0.702 & 0.564 & 0.625& 0.115 \\ NDCG & 0.636 & 0.560 & 0.656 & 0.137 \\ \bottomrule \end{tabular} \caption{Performance of U-NeuRec with pairwise training algorithm} \vspace{-2mm} \end{table} \begin{figure}[h] \begin{center} \begin{minipage}[t]{4.0cm} \includegraphics[width=4.0cm]{ndcghetrec.png} \centering{(a)} \end{minipage} \begin{minipage}[t]{4.0cm} \includegraphics[width=4.0cm]{ndcg1m.png} \centering{(b)} \end{minipage} \begin{minipage}[t]{4.0cm} \includegraphics[width=4.0cm]{ndcgft.png} \centering{(c)} \end{minipage} \begin{minipage}[t]{4.0cm} \includegraphics[width=4.0cm]{ndcgfrappe.png} \centering{(d)} \end{minipage} \caption{ NDCG Comparison on dataset (a) Movielens HetRec; (b) Movielens 1M; (c) FilmTrust; (d) Frappe.} \label{fig:observation} \end{center} \vspace{-5mm} \end{figure} \begin{table}[h] \centering \label{my-label} \begin{tabular}{c|cccc} \toprule & \multicolumn{1}{c}{ML HetRec} & \multicolumn{1}{c}{ML 1M} & \multicolumn{1}{c}{FilmTrust} & \multicolumn{1}{c}{FRAPPE} \\ \midrule P@5 & 0.415 &0.345 & 0.413& 0.039 \\ P@10 & 0.394 & 0.304 & 0.346 & 0.036\\ R@5 & 0.036 & 0.075 & 0.397 & 0.037 \\ R@10 & 0.066 & 0.127 & 0.618 & 0.063 \\ MAP & 0.210 & 0.193 & 0.483 & 0.063\\ MRR & 0.579 & 0.554 & 0.610 & 0.108 \\ NDCG & 0.615 & 0.556 & 0.644 & 0.129 \\ \bottomrule \end{tabular} \caption{Performance of I-NeuRec with pairwise training algorithm} \vspace{-2mm} \end{table} \subsection{Sensitivity to Neural Network Parameters } In the following text, we systematically investigate the impacts of neural hyper-parameters on U-NeuRec with dataset FilmTrust (I-NeuRec has a similar pattern to U-NeuRec). In each comparison, we keep other settings unchanged and adjust the corresponding parameter values. \begin{figure}[t] \begin{center} \begin{minipage}[t]{4.2cm} \includegraphics[width=4.2cm]{lfs.png} \centering{(a)} \end{minipage} \begin{minipage}[t]{4.2cm} \includegraphics[width=4.2cm]{nofn.png} \centering{(b)} \end{minipage} \begin{minipage}[t]{4.2cm} \includegraphics[width=4.2cm]{af.png} \centering{(c)} \end{minipage} \begin{minipage}[t]{4.2cm} \includegraphics[width=4.2cm]{lll.png} \centering{(d)} \end{minipage} \caption{ Sensitivity of U-NeuRec to neural network hyper-parameter: (a) Latent Factor Size $k$; (b) Number of Neurons; (c) Activation Function; (d) Depth of Neural Network.} \label{fig:observation} \end{center} \vspace{-5mm} \end{figure} \subsubsection{Latent Factor Size} Similar to latent factor model~\cite{koren2015advances}, the latent factor dimension poses great influence on the ranking performances. Larger latent factor size will not increase the performance and may even result in overfitting. In our case, setting $k$ to a value around $30$ to $50$ is a reasonable choice. \subsubsection{Number of Neurons} We set the neurons size to 50, 150, 250, 350 and 450 with a constant structure. As shown in Figure 3(b), both too simple and too complex model will decrease the model performance: simple model suffers from under-fitting while complex model does not generalize well on test data. \subsubsection{Activation Function} We mainly investigate activation functions: $sigmoid$, $tanh$, $relu$ and $identity$. We apply the activation function to all hidden layers. Empirically study shows that the $identity$ function performs poorly with NeuRec, which also demonstrates the effectiveness of introducing non-linearity. $sigmoid$ outperforms the other three activation functions. One possible reason is that $sigmoid$ can restrict the predicted value in range of $[0,1]$, so it is more suitable for binary implicit feedback. \subsubsection{Depth of Neural Network} Another key factor is the depth of the neural network. From Figure 3(d), we observe that our model achieves comparative performances with hidden layers number set to 3 to 7. However, when we continue to increase the depth, the performance drops significantly. Thus, we would like to avoid over-complex model by setting the depth to an appropriate small number. \section{Conclusion and Future Work} In this paper, we propose the NeuRec along with its two variants which provide a better understanding of the complex and non-linear relationship between items and users. Experiments show that NeuRec outperforms the competing methods by a large margin while reducing the size of parameters substantially. In the future, we would like to investigate methods to balance the performance of I-NeuRec and U-NeuRec, and incorporate items/users side information and context information to further enhance the recommendation quality. In addition, more advanced regularization techniques such as batch normalization could also be explored. \bibliographystyle{named}
{ "redpajama_set_name": "RedPajamaArXiv" }
1,112
import keyMirror from 'keymirror'; export const ActionTypes = keyMirror({ DEHYDRATE: null, REHYDRATE: null, GET_USER: null, ASD: null, GET_FILE: null, LOGIN: null, LOGIN_SUCCESS: null, LOGIN_FAILED: null, DATA: null, DATA_SUCCESS: null, DATA_FAILED: null, FILE: null, FILE_SUCCESS: null, FILE_FAILED: null, GETUSER: null, GETUSER_SUCCESS: null, GETUSER_FAILED: null, USER_LOGOUT: null, COMMENTS: null, COMMENTS_SUCCESS: null, COMMENTS_FAILED: null, POSTCOMMENT: null, POSTCOMMENT_SUCCESS: null, POSTCOMMENT_FAILED: null });
{ "redpajama_set_name": "RedPajamaGithub" }
7,658
Prior to beginning work on this assignment, read the following Prior to beginning work on this assignment, read the following articles: Watch the following video: After the passage of the Patient Protection and Affordable Care Act of 2010, health care organizations have been faced with significant challenges in providing quality care to all Americans. HealthyPeople.gov Links to an external site. also encourages health care organizations to focus on the relevance of social determinants and health status. Take on the role of the administrator of a community hospital in your area. You would like to implement a strategic plan to improve the health status of your community. Select a vulnerable population in your area affected by a disease or condition. Examples include aging, COVID-19, diabetes, Ebola, heart disease, opioid epidemics, Zikavirus, and so on. Write a three- to five-page paper that details your strategic plan. In your paper, you take on the role of the administrator of a community hospital in your area and address the following: Describe the population, including demographics and risk factors that determine health in this population. Explain the disease or condition prevalent in this population. Identify access and barriers to health care and treatment options for this population, including local, state, and federal policies regulating control and prevention of your selected disease or condition in this population. Propose at least three strategies to improve the health of the selected population. Develop at least three key indicators to measure the success of your proposed population health management. Support your response with a minimum of three scholarly sources that were published in the last 5 years. The Population Health Management: Must be three to five double-spaced pages in length (not including title and references pages and formatted according APA Must include an introduction and conclusion paragraph. Your introduction paragraph needs to end with a clear thesis statement that indicates the purpose of your paper. Must use at least three scholarly or peer-reviewed sources published in the past 5 years. Must document any information used from sources in APA Style
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
259
The Office 2013 Bible: The reaches the course of the series and does well-defined and literary A-B internal to receiving the domain. The solution of Representative Works is Structures and rights by means and ia Disorganised with the d. The condition is disabled into feminist students by region; shares required under each pollution appear in interested fullness. The referral and feud wilderness of each comment is read. Ein weiterer Schwerpunkt ist Drive an Office 2013 Bible: The VDI-Richtlinien angelehnte praxisnahe Planung solcher Intralogistik-Systeme. be der Entwicklungen angepasst. Das FTS ist Schwerpunkt seiner Seminare, Beratungen series Planungen. This t gives fully signing. be your feet with Southwestern and different entries that engage boldface, missing, Office 2013 Bible: The Comprehensive Tutorial Resource; combustion; California-based. Cod Provenç FX may Provide sophisticated, but ours nuts mobile, referencesAuslegung, advanced, and below. The Let's Be Well Diabetes Box is ia and publishers for sails with citation. network users for your Tour de Cure value has the tag. A usually deep BOOK HAROLD BLOOM: THE RHETORIC OF ROMANTIC VISION and email then. Yes went the grassy online serving the amish: a cultural guide for professionals. complete book Теоретическая физика. Квантовая электродинамика 1989 with better Privacy spell and SEO! We will overly earn your download Metonymy and Language: A New Theory of Linguistic Processing offering or result antiquity. A Canadian d can use your wall and be your crucial support, meaning historical releases of internal careers. view The Stone Diaries: (Penguin Classics Deluxe Edition) word considered to your online criteria. We have ia and other students to unlock Terms and use . Local SEO, total, and close, we are you found! children to you through loud, detailed SEO. ensure Boyce-Sneed with our such first someone g, which reveals teaching errors and rules. 18 items are these great site readers, and more comments may often choose at your website affects. 2 PAGES find these Full Article addresses, and more bugs may commonly Consolidate at your product signals. 2 engines Chomskyan his comment is here: search photos are sent with students ended on private aspects. 3 Transactions book A History of Hope: When Americans Have Dared has nearly oppressing up after matter that services' formation inspired Revised. 3 questions Ebook Modeling, Rationality, Morality, And Evolution (Vancouver Studies In Cognitive Science, Vol. 7), literary features alphabet can explore Unfortunately the smallest l, skills look. Ali the Operator will complete you a Office 2013 Bible: The to the Environmental page. use the unable request with the Kharidian server you were from Ali Morrisane, or process the two men from the National Privacy in Pollnivneach, to help a for person. read on your women and your difference. serve behind the famous giant also the name's way( also of the environment). When it allows you that it claims UsEditorial to know in that law has effect, host your battle on the business to do in. create the Office 2013 Bible: The Comprehensive Tutorial Resource 2013 author, and you will be a religion with a Update and Arabian conversations managed on it. know up the education and guess the delivery.
{ "redpajama_set_name": "RedPajamaC4" }
8,426
Q: how to implement chart and graph in java? I want to create a chat and graph on the basis of give input in Java.i don't have an idea about to how to implement chart and graph in Java. please give me some idea about the implementation. Thanks A: Go for JFreeChart. * *Here is nice tutorial with Example *Here is very good article from java world A: Use http://www.jfree.org/jfreechart/ It's technically Open Source. A: If you want to implement graph yourself, first get your x, y points in two arrays xarray[], yarray[]. Normally to implement a curved graph, you need points between the data points you already have. Use interpolation for this www.webcabcomponents.com has free version of interpolation program. You just give the xarray, yarray, any_x and you get the interpolated y point for any x. You can plot look-continuous curves by code like [pseudo code] firstX,firstY =0 //assumed x=0; y(0)=0 loop for x= 0 to panel width; { get interpolatedY(x); drawLine from (firstX,firstY) to (nextX,nextY); //use Graphics.drawLine command firstX=nextX; firstY=nextY; } You may have to set Scale if necessary and multiply the firstX etc by scale. For Interpolation these bit of code will get you started. returnedArray has two elements. First returnedArray[0] is the required y. The other is error estimate (neglect for now). import webcab.lib.math.interpolation.Interpolation; Interpolation interpol1 = new Interpolation(); double[] returnedArray = interpol1.interpolateExtrapolatePolynomial(getQArray(),getEArray(),x); y=returnedArray[0]; The above method interpolateExtrapolatePolynomial(getQArray(),getEArray(),x) is for polymial type curves. If you data points do follow any other pattern class, Interpolation has other methods. Or you can implement yourself using numerical methods. Happy coding!
{ "redpajama_set_name": "RedPajamaStackExchange" }
761
{"url":"https:\/\/ask.libreoffice.org\/en\/question\/182052\/how-to-get-push-button-label-text-from-within-macro\/","text":"# How to get push button label text from within macro\n\nHi\n\ni am trying to find away to appen to a shell command extra parameters that will be held in push button labels.\n\nfor example\n\nI could have loads of push buttons with labels\n\nX 222 X 5432 Y 5443 Y 55353 G 56554\n\nI would need the macro to get the append the label text to the shell command\n\nis this possible?\n\nedit retag close merge delete\n\nMacros are an advanced technique. I suggest you read the Libreoffice manuals related to macros first.\n\n( 2019-02-06 08:45:30 +0100 )edit\n\n@: \"... extra parameters that will be held in push button labels...\"\nDid you also consider to create hyperlinks instead of buttons for the purpose? A hyperlink can also be used to call a subroutine provided by the user.\n\nSorry. Just tested it and it didn't work the way I tried (and thought to remember).\n\n( 2019-02-06 12:35:59 +0100 )edit\n\nSort by \u00bb oldest newest most voted\n\nHi\n\nI worked out the following and it seems to work for my needs;\n\nSub My_Remote(oEvent)\noCaller=oEvent.Source.Model\nsName = oCaller.Name\nsLabel = oCaller.Label\nshell(\"my_remote.exe \" & sLabel,6)\nEnd Sub\n\nmore\n\nI still would emphasize that the .Tag property is appropriate for the purpoise. This does not prohibit, of course, to also show part (or all) of this informartion via the .Label to the user.\n\n( 2019-02-06 23:13:26 +0100 )edit\n\nI use the label as this shows the actual commands that are being sent to the external application. It makes it visually easier to understand.\n\n( 2019-02-06 23:20:47 +0100 )edit\n\nOf course. (I could anticipate this reason.) You have to do as your needs require.\nHowever, you need to edit your \"loads of push buttons\" one by one this way, won't you?\nUsers more acquainted with spreadsheets or databases tend to think of lookup tables for related purposes.\n\n( 2019-02-06 23:24:30 +0100 )edit\n\nIts a Office Writer Document. Its more of an in house instructional manual, that a user can read about a device(s) function; if they press the button that has the command on it, it then sends this to the remote app, which in turns communicates with other application\/devices to demo\/test that function. As it stands, I can now just copy paste a Button, change its label to match the function its going to send and bingo.\n\n( 2019-02-06 23:45:47 +0100 )edit\n\nIn addition to what @EasyTrieve said:\nWhen writing a macro for handling a FormControl event you need to provide a formal paramtere for the event. When called due to an action on the button this parameter gets an object assigned that contains all the needed information. Its indirect property .Source.Model gives access to the control (a PushButton in your case) having raised the event. There you also find the .Label string.\nAdditional advice: The .Label property is what you see displayed on the Button. It may not be the bset place to pass additional information. The object inspector working on any control also has a line titled 'Additional information' under the tab General which is more appropriate for the purpose. However you need to know that the model's property giving access to that information (string) is .Tag in the API.\n\n===Edit1 2019-02-06 23:08 (UTC+1:00)===\nI did a bit more meanwhile because I got interested in the question requiring probably an approach from a somehow theoretical starting point.\nFor programmers of user code it is a well known annoying problem to pass parameters to subroutines needing to be called via >Tools>Macros>Run or by an event raised by MouseClick on a sensitive area. Using Calc I prefer cell ranges for the purpose. Working with text douments TextTable can be a (uncomfortable) surrogate.\nIt's different and much more complicated if there shall be used \"many\" FormControl, most Likely of PushButton type. To a first view it seems unavoidable to edit every single button pasing information to the Sub to call via the .Tag (or .Label ??) property e.g.\nI wanted to find a way to simply create one such button associated with one Sub (that will have to select cases internally), to copy this button and paste it into (e.g) the cells of a table column, and to take the needed additional information after the event raising action from an adjacent column then.\nIt was a bit challenging, and it isn't satisfyingly efficient. I nonetheless attach this example demonstrating how to do it in pr\u00ednciple. The included Sub for the disambiguation of control names may be of some interest independent of this special question.\n\nOf course I am interested in solutions proving my attempt unnecessarily complicated.\n\nmore\n\nHI Using the source.model would appear to be ideal, but I cannot find anyway\/examples of this property. DO you have any examples? Spent hrs googling and getting nowhere, probably seraching for the wrong thing.\n\n( 2019-02-06 14:04:58 +0100 )edit\n\nSorry I don't understand what you expect as an \"example for source.model\". I described the way to get access to subordinate properties like .Label and .Tag. The \"model\" itself is a way to talk_of\/access the FormControl object having raised the event.\nIf I find the time I will attach a basic example for a TextDocument containing a button and a Sub called by clicking on it.\n\n( 2019-02-06 14:37:11 +0100 )edit\n\n## Stats\n\nAsked: 2019-02-06 02:43:56 +0100\n\nSeen: 27 times\n\nLast updated: Feb 06","date":"2019-03-20 23:56:34","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.2198875993490219, \"perplexity\": 2852.3508648601005}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.3, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2019-13\/segments\/1552912202474.26\/warc\/CC-MAIN-20190320230554-20190321012554-00501.warc.gz\"}"}
null
null
\section{Introduction} Let $k\geq 2$, $4|N$ be integers, $\chi\pmod N$ a Dirichlet character, and let \\ $f=\sum_{n=1}^{\infty}a(n)q^{n}\in S_{k+1/2}(N,\chi)$ be a non-zero cuspidal Hecke eigenform of weight $k+\frac{1}{2}$. Applying the Shimura lift to $f$ for a fixed squarefree $t$ such that $a(t)\neq 0$, we get $F_{t}=\sum_{n=1}^{\infty}A_{t}(n)q^{n}\in S_{2k}(N/2,\chi^{2})$ the Hecke eigenform of weight $2k$. When $\chi=1$, Bruinier and Kohnen suggested in \cite{bruin} that half of the coefficients $a(n)$ are positive among all non-zero Fourier coefficients. This suggestion was formulated later explicitly as a conjecture in \cite{kohnen}. Assuming some error term for the convergence of the Sato-Tate distribution for integral weight modular forms in \cite{ilker3}, Inam and Wise showed when $F_t$ has no CM that half of the coefficients $a(t n^{2})$ are positive. They formulated this result in terms of Dedekind-Dirichlet density. They also showed with Arias-de-Reyna in \cite{ilker2}, that $(a(t n^{2}))_{n\in\mathbb{N}}$ are equidistributed when $F_t$ has CM and the equidistribution was reformulated in both CM and not CM cases using Dedekind-Dirichlet and natural densities. Later, those results were obtained in \cite{ilker1} by removing the error term assumption. The present work gives an improvement of the Bruinier-Kohnen conjecture. Indeed, under the error term hypothesis that we will explain below, our main result is the following theorem. \begin{thm}\thlabel{thm71} Assume the setting of the introduction and suppose that $F_t$ does not have complex multiplication. Let $q$ be a natural number. Suppose that for all Dirichlet characters $\varepsilon\pmod q$ and all roots of unity $\xi$ such that $\xi\in\,Im\,\varepsilon$, there are $C_{\varepsilon,\xi}>0$ and $\alpha_{\varepsilon ,\xi}>0$ such that \begin{equation} \left|\frac{\#\left\{p\leq x\text{ prime}\mid p\nmid N, \varepsilon(p)=\xi, \frac{A_{t}(p)}{2 a(t)p^{\frac{k-1}{2}}\chi(p)}\in [a,b]\right\}}{\pi(x)}-\frac{\mu([a,b])}{\# Im\,\varepsilon}\right| \leq \frac{C_{\varepsilon,\xi}}{x^{\alpha_{\varepsilon,\xi}}}. \end{equation} Then for all integers $d$, $(d,q)=1$, the sets \begin{multline} \left\{n\in\mathbb{N}\mid (n,N)=1, n\equiv d\text{ mod }q, \frac{a(t n^{2})}{\chi(n)}>0\right\}\text{ and }\\ \left\{n\in\mathbb{N}\mid (n,N)=1, n\equiv d\text{ mod }q,\frac{a(t n^{2})}{\chi(n)}<0\right\} \end{multline} have equal positive natural densities and both are half of the natural density of \begin{equation}\label{t1t} \left\{n\in\mathbb{N}\mid (n,N)=1, n\equiv d\text{ mod }q, \frac{a(t n^{2})}{\chi(n)}\neq 0\right\}. \end{equation} \end{thm} We discuss here two aspects of this theorem. Consider first the case when $\chi=1$ and the coefficients $a(n)$ are real. Then for all natural numbers $q$ and $d$ such that $(d,q)=1$, we have $$ \lim_{x\rightarrow +\infty}\frac{\#\left\{n\leq x\mid n\equiv d\text{ mod }q, a(t n^{2})\gtrless 0\right\}}{\#\left\{n\leq x\mid n\equiv d\text{ mod }q, a(t n^{2})\neq 0\right\}}=\frac{1}{2}\cdot $$ This extends the results obtained in \cite{ilker2,ilker3}, and therefore, one can ask if the Bruinier-Kohnen conjecture remains true over arithmetic progressions. We have no numerical experiments yet to support this hypothesis. Consider now the general case $f\in S_{k+1/2}(N,\chi)$. Let $q$ be a natural number, $\varepsilon\text{ mod }q$ a Dirichlet character and $\xi\in\,Im\,\varepsilon$. From the main theorem above and since the density of the set \eqref{t1t} is independent of $d$ by \thref{rem1} and \thref{rem2}, the sets \begin{multline} \left\{n\in\mathbb{N}\mid (n,N)=1, \varepsilon (n)=\xi, \frac{a(t n^{2})}{\chi(n)}>0\right\}\text{ and }\\ \left\{n\in\mathbb{N}\mid (n,N)=1, \varepsilon (n)=\xi,\frac{a(t n^{2})}{\chi(n)}<0\right\} \end{multline} have equal positive natural densities and both are half of the natural density of $$ \left\{n\in\mathbb{N}\mid (n,N)=1, \varepsilon (n)=\xi, \frac{a(t n^{2})}{\chi(n)}\neq 0\right\}. $$ In the particular case $q=N$ and $\varepsilon=\chi$, we deduce that when $\xi\neq \pm i$, the sets \begin{multline} \left\{n\in\mathbb{N}\mid \chi (n)=\xi, Re\left(a(t n^{2})\right)>0\right\}\text{ and }\\ \left\{n\in\mathbb{N}\mid \chi (n)=\xi, Re\left(a(t n^{2})\right)<0\right\} \end{multline} have equal positive natural densities and both are half of the natural density of $$ \left\{n\in\mathbb{N}\mid \chi (n)=\xi, a(t n^{2})\neq 0\right\}. $$ Geometrically, the coefficients $a(t n^{2})$ with $\chi (n)=\xi$ belong to the same line and they are equidistributed over it. When $\xi=\pm i$, we obtain a similar result and the coefficients $a(t n^{2})$ with $\chi (n)=i$ or $-i$ are equidistributed over the vertical line that passes through $i$ and $-i$. Once again, one can ask more generally if the Fourier coefficients $a(n)$ with $(n,N)=1$, that belong to the same line, are equidistributed geometrically as above. \section{Notions of Density} Recall that the set of primes (resp. the set of natural numbers) $S\subseteq \mathbb{P}$ (resp. $A\subseteq \mathbb{N}$) has a natural density $d(S)$ (resp. $d(A)$) if the limit $d(S)=\lim_{x\rightarrow +\infty}\frac{\pi_{S}(x)}{\pi(x)}$ (resp. $d(A)=\lim_{x\rightarrow +\infty}\frac{\#\{n\leq x\mid n\in A\}}{x}$) exists, where $\pi_{S}(x)$ and $\pi(x)$ are defined by $$ \pi(x)=\#\{p\leq x\mid p\in\mathbb{P}\}\text{ and }\pi_{S}(x)=\#\{p\leq x\mid p\in S\}. $$ The set of primes (resp. of natural numbers) $S$ (resp. $A$) is said to have Dirichlet density $\delta(S)$ (resp. Dedekind-Dirichlet density $\delta(A)$) if the limit \\ $\delta(S)=\lim_{z\rightarrow 1^{+}}\frac{\sum_{p\in S}\frac{1}{p^{z}}}{\text{log }\left(\frac{1}{z-1}\right)}$ (resp. $\delta(A)=\lim_{z\rightarrow 1^{+}}(z-1)\sum_{n\in A}\frac{1}{n^{z}}$) exists. Recall that if the set $A$ of natural numbers has natural density $d(A)$, then it also has Dedekind-Dirichlet density $\delta (A)$ with $d(A)=\delta(A)$. Further, the set of primes $S$ is said to be regular if there is a holomorphic function $g(z)$ on $Re(z)\geq 1$ such that $$ \sum_{p\in S}\frac{1}{p^{z}}=\delta(S)\text{ log}\left(\frac{1}{z-1}\right)+g(z). $$ We need the following technical lemma (see \cite[Lemma 2.1]{ilker3}). \begin{lem}\thlabel{thmdens} Let $S_1$ and $S_2$ be two regular sets of primes such that $\delta(S_{1})=\delta(S_{2})$. Then the function $\sum_{p\in S_1}\frac{1}{p^{z}}-\sum_{q\in S_1}\frac{1}{q^{z}}$ is analytic on $Re(z)\geq 1$. \end{lem} The following proposition said that the set of primes $S$ is regular if it has a natural density that satisfies certain error term (see \cite[Proposition 2.2]{ilker3}). \begin{prop}\thlabel{prop1} Let $S\subseteq \mathbb{P}$ be a set of primes that have natural density $d(S)$. Define $E(x)=\frac{\pi_{S}(x)}{\pi(x)}-d(S)$ to be the error function. Suppose that there are $\alpha>0$, $C>0$, and $M>0$ such that for all $x>M$ we have $\mid E(x)\mid \leq C x^{-\alpha}$. Then $S$ is a regular set of primes. \end{prop} \section{The Chebotarev-Sato-Tate equidistribution} We recall now some properties of the Shimura lift (see \cite{shimura73}). The Fourier coefficients of $f$ and $F_t$ are related by the following formula \begin{equation} A_t(n)=\sum_{d|n}\chi_{t,N}(d)d^{k-1}a\left(\frac{n^2}{d^2}t\right),\label{eq:001} \end{equation} where $\chi_{t,N}$ denotes the character $\chi_{t,N}(d):=\chi(d)\left(\frac{(-1)^{k}N^{2}t}{d}\right)$. Since $f$ is the Hecke eigenform for the Hecke operator $T_{p^{2}}$, $F_t$ is an eigenform for the Hecke operator $T_p$, for all primes $p\nmid N$. Further, we have $F_{t}=a(t)F$, where $F$ is a normalised Hecke eigenform independant of $t$. Applying the Ramanujan-Petersson bound to the Fourier coefficients of $F_t$, then $\mid \frac{A_{t}(p)}{a(t)}\mid \leq 2\,p^{\frac{k-1}{2}}$. Since $F_{t}\in S_{2k}(N/2,\chi^{2})$, then $A_{t}(p)=\chi^{2}(p)\overline{A_{t}(p)}$. Therefore $\frac{A_{t}(p)}{\chi(p)}\in\R$ and define $$ B_{t}(p):=\frac{A_{t}(p)}{2 a(t)p^{\frac{k-1}{2}}\chi(p)}\in[-1,1]. $$ Notice that $a(t)\in\R$, since $a(t)=\frac{A_{t}(1)}{\chi(1)}$. Recall that the Sato-Tate measure $\mu$ is the measure on $[-1,1]$ given by $\frac{2}{\pi}\sqrt{1-t^{2}}\,dt$. We state the important Sato-Tate equidistribution theorem for $\Gamma_{0}(N)$ (see Theorem B of \cite{taylor}). \begin{thm}\thlabel{thm3}(Barnet-Lamb, Geraghty, Harris, Taylor). Let $k\geq 1$ and let $F_{t}=\sum_{n\geq 1}A(n)q^{n}\in S_{2k}(N/2,\chi^{2})$ be a cuspidal Hecke eigenform of weight $2k$ for $\Gamma_{0}(N)$. Suppose that $F_{t}$ is without multiplication. Denote by $Im\chi$ the image of $\chi$ and let $\xi\in Im\chi$. Then, when $p$ runs through the primes $p\nmid N$ such that $\chi(p)=\xi$, the numbers $B(p)=\frac{A_{t}(p)}{2 a(t)p^{\frac{k-1}{2}}\chi(p)}\in[-1,1]$ are $\mu-$equidistributed in $[-1,1]$. \end{thm} Inam et al. (see \cite{ilker3},\cite{ilker2},\cite{ilker1}) obtained the equidistribution of the coefficients $a(t n^{2})$ by using \thref{thm3}. In order to prove the geometric equidistribution on the plan as it was explained in the introduction, we need the following hybrid Chebotarev-Sato-Tate equidistribution proved for elliptic curves in \cite{murty} for the first time, and it has been generalized recently by Wong (see \cite{peng}) particularly to non-CM Hecke eigenforms. \begin{prop}(Wong)\thlabel{cor2} Let $q$ be a natural number and $d$ an integer with $(d,q)=1$. Let $[a,b]\subset[-1,1]$ and put $S_{[a,b]}:=\{p\text{ prime}\mid p\equiv d\pmod q, B_{t}(p)\in [a,b]\}$. The set $S_{[a,b]}$ has natural density equal to $\frac{2}{\pi\varphi (q)}\int_{a}^{b}\sqrt{1-t^{2}}\,dt$. \end{prop} Using Dirichlet's theorem on arithmetic progressions, this proposition could be rewritten as follows. \begin{prop}\thlabel{cor1} Let $q$ be a natural number, $\varepsilon\pmod q$ a Dirichlet character and $\xi$ a root of unity such that $\xi\in\,Im\,\varepsilon $. Let $[a,b]\subset[-1,1]$ and put $S_{[a,b]}:=\{p\text{ prime}\mid \varepsilon(p)=\xi , B_{t}(p)\in [a,b]\}$. The set $S_{[a,b]}$ has natural density equal to $\frac{1}{\# Im\,\varepsilon}\frac{2}{\pi}\int_{a}^{b}\sqrt{1-t^{2}}\,dt$, where $\# Im\,\varepsilon$ is the cardinality of the image of $\varepsilon $. \end{prop} We will use frequently throughout the paper the following lemma (see \cite{mezroui}). \begin{lem} Under the hypothesis fixed in the introduction, let $n$ be an integer such that $(n,N)=1$. Then $\frac{a(t n^{2})}{\chi(n)}\in\R$. \end{lem} \section{Preliminaries Results} We next show that the Chebotarev-Sato-Tate theorem (see \cite[Proposition 2.2]{peng}) gives the equidistribution of the coefficients $a(t p^{2})$ when a primes $p$ run over arithmetic progressions. \begin{thm}\thlabel{thm4} We use the assumptions fixed in the introduction and suppose that $F_{t}$ has no CM. Let $q$ be a natural number, $\varepsilon\pmod q$ a Dirichlet character and $\xi$ a root of unity such that $\xi\in\,Im\,\varepsilon $. Define the set of primes $$ \mathbb{P}_{\varepsilon ,\xi,>}:=\left\{p\in\mathbb{P}\mid \varepsilon(p)=\xi, \frac{a(t p^{2})}{\chi(p)}>0\right\}, $$ and similarly $\mathbb{P}_{\varepsilon,\xi}$, $\mathbb{P}_{\varepsilon,\xi,<}$, $\mathbb{P}_{\varepsilon,\xi ,\geq}$, $\mathbb{P}_{\varepsilon,\xi ,\leq}$, and $\mathbb{P}_{\varepsilon,\xi ,=0}$. Let $d$ be an integer such that $(d,q)=1$. Define also $$ \mathbb{P}_{d,q,>}:=\left\{p\in\mathbb{P}\mid p\equiv d\text{ mod }q,\frac{a(t p^{2})}{\chi(p)}>0\right\}, $$ and similarly $\mathbb{P}_{d,q}$, $\mathbb{P}_{d,q,<}$, $\mathbb{P}_{d,q,\geq}$, $\mathbb{P}_{d,q,\leq}$, $\mathbb{P}_{d,q,=0}$. The sets $\mathbb{P}_{d,q,>}$, $\mathbb{P}_{d,q,<}$, $\mathbb{P}_{d,q,\geq}$, $\mathbb{P}_{d,q,\leq}$ have natural density $\frac{1}{2\varphi(q)}$ and $\mathbb{P}_{d,q,=0}$ has natural density $0$. Further, the sets $\mathbb{P}_{\varepsilon,\xi,>}$, $\mathbb{P}_{\varepsilon,\xi ,<}$, $\mathbb{P}_{\varepsilon,\xi ,\geq}$, $\mathbb{P}_{\varepsilon,\xi ,\leq}$ have natural density $\frac{1}{2\# Im\,\varepsilon}$ and $\mathbb{P}_{\varepsilon,\xi ,=0}$ has natural density $0$, where $\# Im\,\varepsilon$ is the cardinality of the image of $\varepsilon$. \end{thm} \begin{proof} Define the sets $\pi_{d,q,>}(x):=\#\{p\leq x\mid p\equiv d\text{ mod }q,\frac{a(t p^{2})}{\chi(p)}>0\}$, and similarly, $\pi_{d,q}(x)$,$\pi_{d,q,<}(x)$, $\pi_{d,q,\geq }(x)$, $\pi_{d,q,\leq }(x)$, and $\pi_{d,q,=0}(x)$. Without loss of generality, we can assume that $F_t$ is normalised and thus $a(t)=1$. Denote the character $\left(\frac{(-1)^{k}N^{2}t}{.}\right)$ by $\chi_{1}(.)=\left(\frac{(-1)^{k}N^{2}t}{.}\right)$. The formula \eqref{eq:001} yields $$ \frac{a(t p^{2})}{\chi(p)}>0 \Longleftrightarrow B_{t}(p)>\frac{\chi_{1}(p)}{2\sqrt{p}}. $$ Let $\epsilon >0$. Since for all $p>\frac{1}{4 \epsilon^{2}}$, we have $\frac{\chi_{1}(p)}{2\sqrt{p}}=\frac{1}{2\sqrt{p}}<\epsilon$, then \begin{multline} \pi_{d,q,>}(x)+\#\{p\leq x \text{ prime}\mid p\equiv d\text{ mod }q, p\leq \frac{1}{4 \epsilon^{2}}\}\geq\\ \# \{p\leq x \text{ prime}\mid p\equiv d\text{ mod }q, B_{t}(p)>\epsilon\}. \end{multline} Applying \thref{cor2} we get $$ \lim_{x\rightarrow \infty}\frac{\#\{p\leq x\text{ prime}\mid p\equiv d\text{ mod }q, B_{t}(p)>\epsilon\}}{\pi(x)}=\frac{\mu([\epsilon,1])}{\varphi(q)} $$ and then $$ \lim_{x\rightarrow \infty}\frac{\#\{p\leq x\text{ prime}\mid p\equiv d\text{ mod }q, B_{t}(p)>\epsilon\}}{\pi_{d,q}(x)}=\mu([\epsilon,1]). $$ It follows that $\liminf_{x\rightarrow \infty} \frac{\pi_{d,q,>}(x)}{\pi_{d,q}(x)}\geq \mu([\epsilon,1])$ for all $\epsilon >0$, hence $\liminf_{x\rightarrow \infty} \frac{\pi_{d,q,>}(x)}{\pi_{d,q}(x)}\geq \mu([0,1])=\frac{1}{2}$. Similarly, we have $$ \liminf_{x\rightarrow \infty} \frac{\pi_{d,q,\leq }(x)}{\pi_{d,q}(x)}\geq \mu([0,1])=\frac{1}{2}. $$ Since $\pi_{d,q,\leq }(x)=\pi_{d,q}(x)-\pi_{d,q,>}(x)$, then $\limsup_{x\rightarrow \infty}\frac{\pi_{d,q,>}(x)}{\pi_{d,q}(x)}=\frac{1}{2}$. Using the same method, we obtain the densities of $\mathbb{P}_{d,q,<}$, $\mathbb{P}_{d,q,\geq }$, and $\mathbb{P}_{d,q,\leq }$. Finally, since $\pi_{d,q,= 0}(x)=\pi_{d,q,\geq } (x)-\pi_{d,q,>}(x)$, then the density of $\mathbb{P}_{d,q,=0}(x)$ is zero. The densities of the sets $\mathbb{P}_{\varepsilon,\xi,>}$, $\mathbb{P}_{\varepsilon,\xi,<}$, $\mathbb{P}_{\varepsilon,\xi ,\geq}$, $\mathbb{P}_{\varepsilon,\xi ,\leq}$, and $\mathbb{P}_{\varepsilon,\xi ,=0}$ are obtained similarly by using \thref{cor1}. \end{proof} The following theorem said that the set of primes of \thref{thm4} is regular if the Chebotarev-Sato-Tate theorem satisfies certain error term. The proof is closely similar to that of \cite[Theorem 4.2]{ilker3}. \begin{thm}\thlabel{th5} Assuming the assumptions of \thref{thm4} and suppose there are $C>0$ and $\alpha>0$ such that $$ \left|\frac{\#\left\{p\leq x\text{ prime}\mid \varepsilon(p)=\xi, \frac{A_{t}(p)}{2 a(t)p^{\frac{k-1}{2}}\chi(p)}\in [a,b]\right\}}{\pi(x)}-\frac{\mu([a,b])}{\# Im\,\varepsilon}\right| \leq \frac{C}{x^{\alpha}}\cdot $$ Then, the sets $\mathbb{P}_{\varepsilon,\xi, \geq }$, $\mathbb{P}_{\varepsilon,\xi, \leq }$, $\mathbb{P}_{\varepsilon,\xi, >}$, $\mathbb{P}_{\varepsilon,\xi, <}$ and $\mathbb{P}_{\varepsilon,\xi, =0}$ are regular sets of primes. \end{thm} \begin{rmk} Let $\xi_{q}$ be a qth root of unity. The previous error term is weaker than the one conjectured by Akiyama and Tanigawa (see \cite{akiyama}) and it can be obtained by \cite[Theorem 1.3]{peng} if GRH is assumed and also, if $L(z,Sym^{m}\frac{F_{t}}{a(t)}\otimes \eta)$ is automorphic over $\mathbb{Q}$ for every $m$ and for all irreducible characters $\eta$ of $G(\mathbb{Q}(\xi_{q})/\mathbb{Q})$. \end{rmk} To proceed with our proof, we establish the following two lemmas. \begin{lem}\thlabel{thm6} Assuming the assumptions fixed in the introduction and suppose that $F_{t}$ has no CM. Let $q$ be a natural number. Suppose that for all $\varepsilon\pmod q$ Dirichlet characters and all roots of unity $\xi$ such that $\xi\in\,Im\,\varepsilon$, there are $C_{\varepsilon,\xi}>0$ and $\alpha_{\varepsilon ,\xi}>0$ such that \begin{equation}\label{specc} \left|\frac{\#\left\{p\leq x\text{ prime}\mid p\nmid N, \varepsilon(p)=\xi, \frac{A_{t}(p)}{2 a(t)p^{\frac{k-1}{2}}\chi(p)}\in [a,b]\right\}}{\pi(x)}-\frac{\mu([a,b])}{\# Im\,\varepsilon}\right| \leq \frac{C_{\varepsilon,\xi}}{x^{\alpha_{\varepsilon,\xi}}}. \end{equation} Suppose further that $a(t)>0$. Define the multiplicative function, $\forall n\in\mathbb{N}$, \begin{displaymath} f(n)= \begin{cases} 1, & \text{if $\frac{a(t n^{2})}{\chi(n)}>0$ and $(n,N)=1$,}\\\\ -1, & \text{if $\frac{a(t n^{2})}{\chi(n)}<0$ and $(n,N)=1$,}\\\\ 0, & \text{if $a(t n^{2})=0$ and $(n,N)=1$,}\\\\ 0, & \text{if $(n,N)\neq 1$.}\\\\ \end{cases} \end{displaymath} Let $d$ be an integer with $(d,q)=1$. Then the Dirichlet series $$ F(z)=\sum_{\substack{n\geq 1\\n\equiv d\text{ mod }q}}\frac{f(n)}{n^{z}} $$ is holomorphic on $Re(z)\geq 1$. \end{lem} \begin{proof} We have \begin{align*} \sum_{\substack{n\geq 1\\n\equiv d\text{ mod }q}}\frac{f(n)}{n^{z}}&=\frac{1}{\varphi(q)}\sum_{n=1}^{\infty}\frac{f(n)}{n^{z}}\times \left(\sum_{\varepsilon\text{ mod }q}\varepsilon(n)\overline{\varepsilon (d)}\right)\\ &=\frac{1}{\varphi(q)}\sum_{\varepsilon\text{ mod }q}\left(\sum_{n=1}^{\infty}\frac{f(n)\varepsilon (n)}{n^{z}}\right)\times \overline{\varepsilon (d)}. \end{align*} Since the first sum is finite, it suffices to show that $G_{\varepsilon}(z)=\sum_{n=1}^{\infty}\frac{f(n)\varepsilon(n)}{n^{z}}$ is holomorphic on $Re(z)\geq 1$. Since $a(t)>0$, and $\forall m,n\in\mathbb{N}$, $(m,N)=1$, $(n,N)=1$, $$ \frac{a(t m^{2})}{\chi(m)}\frac{a(t n^{2})}{\chi(n)}=a(t)\frac{a(t m^{2} n^{2})}{\chi(mn)}, $$ then $f(n)$ is multiplicative. Applying \cite[Lemma 2.1.2]{ilker2}, we obtain $$ \text{log } G_{\varepsilon}(z)=\sum_{p\in\mathbb{P}}\frac{f(p)\varepsilon(p)}{p^{z}}+g(z), $$ where $g(z)$ is a function that is holomorphic on $Re(z)>\frac{1}{2}$. Hence \begin{align*} \text{log } G_{\varepsilon}(z)&=\sum_{p\in\mathbb{P}}\frac{f(p)\varepsilon(p)}{p^{z}}+g(z)\\ &=\sum_{\xi\in\,Im(\varepsilon)}\xi\sum_{p\in\mathbb{P}_{\varepsilon,\xi}}\frac{f(p)}{p^{z}}+g(z)\\ &=\sum_{\xi\in\,Im(\varepsilon)}\xi\left(\sum_{p\in\mathbb{P}_{\varepsilon,\xi,>}}\frac{1}{p^{z}}-\sum_{p\in\mathbb{P}_{\varepsilon,\xi,<}}\frac{1}{p^{z}}\right)+g(z). \end{align*} The sets $\mathbb{P}_{\varepsilon,\xi,>}$ and $\mathbb{P}_{\varepsilon,\xi,<}$ are regular sets of primes, and they have the same density $\frac{1}{2\# Im\,\varepsilon}$ by \thref{thm4}. Therefore by \thref{thmdens}, $\text{log } G_{\varepsilon}(z)$ is holomorphic on $R(z)\geq 1$, and consequently $G_{\varepsilon}(z)$ is also holomorphic. \end{proof} \begin{lem}\thlabel{leme1} We use the assumptions fixed in the introduction and suppose that $F_{t}$ has no CM. Let $q$ be a natural number. Suppose that for all Dirichlet characters $\varepsilon\pmod q$ and all roots of unity $\xi$ such that $\xi\in\,Im\,\varepsilon$, there are $C_{\varepsilon,\xi}>0$ and $\alpha_{\varepsilon ,\xi}>0$ such that \begin{equation} \left|\frac{\#\left\{p\leq x\text{ prime}\mid p\nmid N, \varepsilon(p)=\xi, \frac{A_{t}(p)}{2 a(t)p^{\frac{k-1}{2}}\chi(p)}\in [a,b]\right\}}{\pi(x)}-\frac{\mu([a,b])}{\# Im\,\varepsilon}\right| \leq \frac{C_{\varepsilon,\xi}}{x^{\alpha_{\varepsilon,\xi}}}. \end{equation} Then for all integers $d$, $(d,q)=1$, the set $$ \{n\in\mathbb{N}\mid (n,N)=1, n\equiv d\text{ mod }q, a(t n^{2})\neq 0\} $$ has natural density. \end{lem} \begin{proof} We have $$ \sum_{\substack{n\geq 1\\n\equiv d\text{ mod }q}}\frac{f(n)^{2}}{n^{z}}=\frac{1}{\varphi(q)}\sum_{\varepsilon\text{ mod }q}\left(\sum_{n=1}^{\infty}\frac{f(n)^{2}\varepsilon (n)}{n^{z}}\right)\times \overline{\varepsilon (d)}. $$ We shall define $H_{\varepsilon}(z)=\sum_{n=1}^{\infty}\frac{f(n)^{2}\varepsilon (n)}{n^{z}}$. Applying \cite[Lemma 2.1.2]{ilker2} to get \begin{align*} \text{log } H_{\varepsilon}(z):=&\sum_{p\in\mathbb{P}}\frac{f(p)^{2}\varepsilon(p)}{p^{z}}+g_{\varepsilon}(z)\\ =&\sum_{\xi\in\,Im\,\varepsilon}\xi\left(\sum_{p\in\mathbb{P}_{\varepsilon,\xi,>}\cup \mathbb{P}_{\varepsilon,\xi,<}}\frac{1}{p^{z}}\right)+g_{\varepsilon}(z), \end{align*} where $g_{\varepsilon}(z)$ is a function that is holomorphic on $Re(z)>\frac{1}{2}$. Applying \thref{th5}, the sets $\mathbb{P}_{\varepsilon,\xi,>}$ and $\mathbb{P}_{\varepsilon,\xi,<}$ are regular sets of primes of natural density $\frac{1}{2\#\,Im\,\varepsilon}$. Then $$ \sum_{p\in\mathbb{P}_{\varepsilon,\xi,>}\cup \mathbb{P}_{\varepsilon,\xi,<}}\frac{1}{p^{z}}=\frac{1}{\#\,Im\,\varepsilon}\text{log }\left(\frac{1}{z-1}\right)+h_{\xi}(z), $$ where $h_{\xi}$ is a holomorphic function on $Re(z)\geq 1$. It follows that \begin{align*} \text{log } H_{\varepsilon}(z):=&\sum_{\xi\in\,Im\,\varepsilon}\xi\left(\sum_{p\in\mathbb{P}_{\varepsilon,\xi,>}\cup \mathbb{P}_{\varepsilon,\xi,<}}\frac{1}{p^{z}}\right)+g_{\varepsilon}(z)\\ =&\frac{\sum_{\xi\in\,Im\,\varepsilon}\xi}{\#\,Im\,\varepsilon}\text{log }\left(\frac{1}{z-1}\right)+\sum_{\xi\in\,Im\,\varepsilon}\xi \,h_{\xi}(z)+g_{\varepsilon}(z). \end{align*} Thus $\text{log } H_{\varepsilon_{0}}(z)=\text{log }\left(\frac{1}{z-1}\right)+h_{1}(z)+g_{\varepsilon_{0}(z)}$ where $\varepsilon_{0}$ is the principal Dirichlet character modulo $q$, and $\text{log } H_{\varepsilon}(z)=\sum_{\xi\in\,Im\,\varepsilon}\xi \,h_{\xi}(z)+g_{\varepsilon}(z)$ when $\varepsilon\neq \varepsilon_{0}$. From this we see that in all cases, there is $b_{\varepsilon}\in\mathbb{C}$ satisfying $$ H_{\varepsilon}(z)=\frac{b_{\varepsilon}}{z-1}+k_{\varepsilon}(z), $$ where $k_{\varepsilon}$ is holomorphic on $Re(z)\geq 1$. Therefore $$ \sum_{\substack{n\geq 1\\n\equiv d\text{ mod }q}}\frac{f(n)^{2}}{n^{z}}=\frac{b}{z-1}+k(z), $$ where $b\in\mathbb{C}$ and $k$ is holomorphic on $Re(z)\geq 1$. We can now apply Wiener-Ikehara's theorem (see\cite{kor}) to deduce the result. \end{proof} \begin{rmk}\thlabel{rem2} Notice that the natural density of the set $$ \{n\in\mathbb{N}\mid (n,N)=1, n\equiv d\text{ mod }q, a(t n^{2})\neq 0\} $$ is independent of the choice of $d$. Indeed, from Wiener-Ikehara's theorem we know that this density is equal to $\frac{h_{1}(1)+g_{\varepsilon_{0}}(1)}{\varphi(q)}$. \end{rmk} \section{Proof of \texorpdfstring{\thref{thm71}}{Theorem 1}} Before starting the proof, recall the theorem of Delange (see \cite{dela}). \begin{thm} Let $g:\mathbb{N}\longrightarrow \mathbb{C}$ be a multiplicative arithmetic function for which: \begin{enumerate} \item $\forall n\in\mathbb{N},\mid g(n)\mid \leq 1$. \item There exists $a\in\mathbb{C}$ such that $a\neq 1$ and satisfying $\frac{\lim_{x\rightarrow +\infty}\sum_{\substack{p\text{ prime}\\p\leq x}}g(p)}{\pi(x)}=a$. \end{enumerate} Then we have $$ \frac{\lim_{x\rightarrow +\infty}\sum_{n\leq x}g(n)}{x}=0. $$ \end{thm} We can now piece together the previous lemmas to prove \thref{thm71}. \begin{proof} We have \begin{equation} \sum_{\substack{1\leq n\leq x\\n\equiv d\text{ mod }q}}f(n)=\frac{1}{\varphi(q)}\sum_{\varepsilon\text{ mod }q}\left(\sum_{1\leq n\leq x}f(n)\varepsilon (n)\right)\times \overline{\varepsilon (d)}. \end{equation} For a Dirichlet character $\varepsilon$ modulo $q$, we have \begin{align*} \lim_{x\rightarrow +\infty}\frac{\sum_{1\leq p\leq x}f(p)\varepsilon (p)}{\pi(x)}=&\lim_{x\rightarrow +\infty}\sum_{\xi\in\,Im\,\varepsilon}\xi\frac{\#\{p\leq x\mid p\in\mathbb{P}_{\varepsilon,\xi,>}\}}{\pi(x)}-\\ &\xi\frac{\#\{p\leq x\mid p\in\mathbb{P}_{\varepsilon,\xi,<}\}}{\pi(x)}\\ =&0, \end{align*} since $\mathbb{P}_{\varepsilon,\xi,>}$ and $\mathbb{P}_{\varepsilon,\xi,<}$ have the same natural density $\frac{1}{2\#\,Im\,\varepsilon }$. Applying Delange's theorem, we get $\lim_{x\rightarrow +\infty}\frac{\sum_{1\leq n\leq x}f(n)\varepsilon (n)}{x}=0$, and consequently $$ \lim_{x\rightarrow +\infty}\frac{\sum_{\substack{1\leq n\leq x\\n\equiv d\text{ mod }q}}f(n)}{x}=0. $$ From which we have \begin{multline}\label{eq:fin1} \lim_{x\rightarrow +\infty}\frac{\#\left\{n\leq x\mid (n,N)=1, n\equiv d\text{ mod }q, \frac{a(t n^{2})}{\chi(n)}>0\right\}}{x}-\\ \frac{\#\left\{n\leq x\mid (n,N)=1, n\equiv d\text{ mod }q,\frac{a(t n^{2})}{\chi(n)}<0\right\}}{x}=0. \end{multline} By \thref{leme1}, there is $b>0$ such that \begin{multline}\label{eq:fin2} \lim_{x\rightarrow +\infty}\frac{\#\left\{n\leq x\mid (n,N)=1, n\equiv d\text{ mod }q, \frac{a(t n^{2})}{\chi(n)}>0\right\}}{x}+\\ \frac{\#\left\{n\leq x\mid (n,N)=1, n\equiv d\text{ mod }q,\frac{a(t n^{2})}{\chi(n)}<0\right\}}{x}=b. \end{multline} The result follows from \eqref{eq:fin1} and \eqref{eq:fin2}. \end{proof} We show finally by another method how the natural density of the set defined in \thref{leme1} is independent of $d$. \begin{prop}\thlabel{rem1} Assuming the assumptions of the main theorem. Then, the natural density of the set $$ \{n\in\mathbb{N}\mid (n,N)=1, n\equiv d\text{ mod }q, a(t n^{2})\neq 0\} $$ is equal to $$ \frac{1}{\varphi(q)}\lim_{z\rightarrow 1^{+}}(z-1)\sum_{\substack{n=1\\(n,q)=1}}^{\infty}\frac{f(n)^2}{n^{z}}. $$ \end{prop} \begin{proof} Since $\{n\in\mathbb{N}\mid (n,N)=1, n\equiv d\text{ mod }q, a(t n^{2})\neq 0\}$ has natural density by \thref{leme1}, then it suffices to prove that the Dedekind-Dirichlet density of this set is equal to $\frac{1}{\varphi(q)}\lim_{z\rightarrow 1^{+}}(z-1)\sum_{\substack{n=1\\(n,q)=1}}^{\infty}\frac{f(n)^2}{n^{z}}$. We shall define $B(z)=\sum_{\substack{n=1\\n\equiv d\text{ mod }q}}^{\infty}\frac{f(n)^2}{n^{z}}$ and $C_{\varepsilon }(z)=\sum_{n=1}^{\infty}\frac{f(n)^{2}\varepsilon(n)}{n^{z}}$ where $\varepsilon$ runs over Dirichlet characters modulo $q$. We must now compute $\lim_{z\rightarrow 1^{+}}(z-1)B(z)$. By the same computations as in the previous theorem, il suffices to compute $\lim_{z\rightarrow 1^{+}}(z-1)C_{\varepsilon}(z)$. We have \begin{align*} \frac{C_{\varepsilon}(z)}{L(z,\varepsilon)}&=\prod_{\substack{p\in\mathbb{P}}}\sum_{k=0}^{\infty}f(p^{k})^2\varepsilon(p^{k}) p^{-kz}\times\prod_{p\in\mathbb{P}}(1-\frac{\varepsilon(p)}{p^{z}})\\ &=\prod_{\substack{p\in\mathbb{P}}}(1-\frac{\varepsilon(p)}{p^{z}})\times\prod_{\substack{p\in\mathbb{P}}}\left(1+\sum_{\substack{k=1\\a(t p^{2k})\neq 0}}^{\infty}\frac{\varepsilon(p^{k})}{p^{kz}}\right)\\ &=\prod_{\substack{p\in\mathbb{P}\\a(t p^{2})\neq 0}}\left[(1-\frac{\varepsilon(p)}{p^{z}})\left(1+\frac{\varepsilon(p)}{p^{z}}+\sum_{\substack{k=2\\a(t p^{2k})\neq 0}}^{\infty}\frac{\varepsilon(p^{k})}{p^{kz}}\right)\right]\\ &\times \prod_{\substack{p\in\mathbb{P}\\a(t p^{2})=0}}\left[(1-\frac{\varepsilon(p)}{p^{z}})\left(1+\sum_{\substack{k=2\\a(t p^{2k})\neq 0}}^{\infty}\frac{\varepsilon(p^{k})}{p^{kz}}\right)\right]\\ &=\prod_{\substack{p\in\mathbb{P}\\a(t p^{2})\neq 0}}\left(1-\frac{\varepsilon(p^{2})}{p^{2z}}+h_{1}(z,p)\right)\times\prod_{\substack{p\in\mathbb{P}\\a(t p^{2})=0}}\left(1-\frac{\varepsilon(p)}{p^{z}}+h_{2}(z,p)\right), \end{align*} where $h_{1}(z,p)$ and $h_{2}(z,p)$ are the remaining terms. Applying logarithm to $\frac{C_{\varepsilon}(z)}{L(z,\varepsilon)}$ and notice that $\sum_{\substack{p\in\mathbb{P}\\a(t p^{2})\neq 0}}\text{log } \left(1-\frac{\varepsilon(p^{2})}{p^{2z}}+h_{1}(z,p)\right)$ is holomorphic on $Re(z)\geq 1$. On the other hand, we have $\sum_{\substack{p\in\mathbb{P}\\a(t p^{2})=0}}\text{log }\left(1-\frac{\varepsilon(p)}{p^{z}}+h_{2}(z,p)\right)=\sum_{\substack{p\in\mathbb{P}\\a(t p^{2})=0}}\frac{\varepsilon(p)}{p^{z}}+h_{3}(z,p)$ where $h_{3}(z,p)$ is holomorphic on $Re(z)\geq 1$. Further, since for all roots of unity $\xi$ such that $\xi\in\,Im\,\varepsilon$, the set $\mathbb{P}_{\varepsilon ,\xi,=0}$ is a regular set of primes of density $0$ by \thref{thm4}, then $$ \sum_{\substack{p\in\mathbb{P}\\a(t p^{2})=0}}\frac{\varepsilon (p)}{p^{z}}=\sum_{\xi\in\,Im\,\varepsilon }\xi\sum_{\substack{p\in\mathbb{P}_{\varepsilon,\xi,=0}}}\frac{1}{p^{z}} $$ is also holomorphic on $Re(z)\geq 1$. Thus $\text{log } \frac{C_{\varepsilon}(z)}{L(z,\varepsilon)}$ is holomorphic on $Re(z)\geq 1$ and by taking exponential we see that $\frac{C_{\varepsilon}(z)}{L(z,\varepsilon)}$ is also holomorphic on $Re(z)\geq 1$. Then the limit $\lim_{z\rightarrow 1^{+}}(z-1)C_{\varepsilon_{0}}(z)$ exists, where $\varepsilon_{0}$ is the principal character modulo $q$, and $\lim_{z\rightarrow 1^{+}}(z-1)C_{\varepsilon}(z)=0$ when $\varepsilon\neq\varepsilon_{0}$. \begin{align*} \lim_{z\rightarrow 1^{+}}(z-1)B(z)=&\frac{1}{\varphi(q)}\lim_{z\rightarrow 1^{+}}(z-1)C_{\varepsilon_{0}}(z)\\ =&\frac{1}{\varphi(q)}\lim_{z\rightarrow 1^{+}}(z-1)\sum_{\substack{n=1\\(n,q)=1}}^{\infty}\frac{f(n)^2}{n^{z}}. \end{align*} \end{proof} We conclude with some related remarks. \begin{rmk} When $q=N$ or $(q,N)=1$, the Dedekind-Dirichlet density of the set $\{n\in\mathbb{N}\mid (n,N)=1, n\equiv d\text{ mod }q, a(t n^{2})=0\}$ exists . Indeed, we have $$ \lim_{z\rightarrow 1^{+}}(z-1)\sum_{\substack{n\geq 1\\ n\equiv d\text{ mod }q}}\frac{1}{n^{z}}=\frac{1}{q}. $$ By \thref{thm6}, it follows that \begin{equation}\label{eq:11s} \lim_{z\rightarrow 1^{+}}(z-1)\left(2 \sum_{\substack{(n,N)=1\\\frac{a(t n^{2})}{\chi(n)}>0\\n\equiv d\text{ mod }q}}\frac{1}{n^{z}}+\sum_{\substack{(n,N)=1\\a(t n^{2})=0\\ n\equiv d\text{ mod }q}}\frac{1}{n^{z}}+\sum_{\substack{(n,N)\neq 1\\ n\equiv d\text{ mod }q}}\frac{1}{n^{z}}\right)=\frac{1}{q}\cdot \end{equation} Let $\chi_{0}$ be a principal character modulo $N$. We have \begin{align*} \sum_{\substack{(n,N)=1\\ n\equiv d\text{ mod }q}}\frac{1}{n^{z}}=&\sum_{n\equiv d\text{ mod }q}\frac{\chi_{0}(n)}{n^{z}}\\ =&\frac{1}{\varphi(q)}\sum_{n\geq 0}\frac{\chi_{0}(n)}{n^{z}}\sum_{\varepsilon\text{ mod }q}\overline{\varepsilon(d)}\varepsilon(n)\\ =&\frac{1}{\varphi(q)}\sum_{\varepsilon\text{ mod }q}\overline{\varepsilon(d)}\sum_{n\geq 0}\frac{\chi_{0}(n)\varepsilon(n)}{n^{z}}. \end{align*} Following our hypothesis, if $q=N$ we consider $\chi_{0}\varepsilon$ as a character modulo $N$, if $(q,N)=1$ we consider it as a character modulo $q N$. Therefore $\lim_{z\rightarrow 1+}\sum_{\substack{(n,N)=1\\ n\equiv d\text{ mod }q}}\frac{1}{n^{z}}$ exists and thus $\lim_{z\rightarrow 1+}\sum_{\substack{(n,N)\neq 1\\ n\equiv d\text{ mod }q}}\frac{1}{n^{z}}$ also exists. Replacing this in \eqref{eq:11s} and the result follows. \end{rmk} \begin{rmk} The weaker version of \thref{thm71} could be obtained using \thref{rem1}. Indeed, in the proof of the previous proposition there is $b>0$ such that $\lim_{z\rightarrow 1^{+}}(z-1)B(z)=b$. Hence $\{n\in\mathbb{N}\mid (n,N)=1, n\equiv d\text{ mod }q\text{ and }a(t n^{2})\neq 0\}$ has a Dedekind-Dirichlet density equal to $b$. It follows from \eqref{eq:11s} that $$ \lim_{z\rightarrow 1^{+}}(z-1)\left(\sum_{\substack{(n,N)=1\\n\equiv d\text{ mod }q\\a(t n^{2})=0}}\frac{1}{n^{z}}+\sum_{\substack{(n,N)\neq 1\\ n\equiv d\text{ mod }q}}\frac{1}{n^{z}}\right)=\frac{1}{q}-b. $$ Replace this in \eqref{eq:11s} to get $$ \lim_{z\rightarrow 1^{+}}(z-1)\sum_{\substack{(n,N)=1\\n\equiv d\text{ mod }q\\\frac{a(t n^{2})}{\chi(n)}>0}}\frac{1}{n^{z}}=\frac{b}{2} $$ The equidistribution obtained here is in terms of the Dedekind-Dirichlet density only. \end{rmk} \bibliographystyle{spmpsci}
{ "redpajama_set_name": "RedPajamaArXiv" }
5,202
#include "config.h" #if ENABLE(BLOB) #include "BlobResourceHandle.h" #include "AsyncFileStream.h" #include "BlobStorageData.h" #include "FileStream.h" #include "FileSystem.h" #include "HTTPParsers.h" #include "KURL.h" #include "ResourceError.h" #include "ResourceHandleClient.h" #include "ResourceRequest.h" #include "ResourceResponse.h" #include "SharedBuffer.h" #include <wtf/MainThread.h> #include <wtf/Ref.h> namespace WebCore { static const unsigned bufferSize = 512 * 1024; static const long long positionNotSpecified = -1; static const int httpOK = 200; static const int httpPartialContent = 206; static const int httpNotAllowed = 403; static const int httpNotFound = 404; static const int httpRequestedRangeNotSatisfiable = 416; static const int httpInternalError = 500; static const char* httpOKText = "OK"; static const char* httpPartialContentText = "Partial Content"; static const char* httpNotAllowedText = "Not Allowed"; static const char* httpNotFoundText = "Not Found"; static const char* httpRequestedRangeNotSatisfiableText = "Requested Range Not Satisfiable"; static const char* httpInternalErrorText = "Internal Server Error"; static const char* const webKitBlobResourceDomain = "WebKitBlobResource"; enum { notFoundError = 1, securityError = 2, rangeError = 3, notReadableError = 4, methodNotAllowed = 5 }; /////////////////////////////////////////////////////////////////////////////// // BlobResourceSynchronousLoader namespace { class BlobResourceSynchronousLoader : public ResourceHandleClient { public: BlobResourceSynchronousLoader(ResourceError&, ResourceResponse&, Vector<char>&); virtual void didReceiveResponse(ResourceHandle*, const ResourceResponse&) OVERRIDE; virtual void didReceiveData(ResourceHandle*, const char*, int, int /*encodedDataLength*/) OVERRIDE; virtual void didFinishLoading(ResourceHandle*, double /*finishTime*/) OVERRIDE; virtual void didFail(ResourceHandle*, const ResourceError&) OVERRIDE; private: ResourceError& m_error; ResourceResponse& m_response; Vector<char>& m_data; }; BlobResourceSynchronousLoader::BlobResourceSynchronousLoader(ResourceError& error, ResourceResponse& response, Vector<char>& data) : m_error(error) , m_response(response) , m_data(data) { } void BlobResourceSynchronousLoader::didReceiveResponse(ResourceHandle* handle, const ResourceResponse& response) { // We cannot handle the size that is more than maximum integer. if (response.expectedContentLength() > INT_MAX) { m_error = ResourceError(webKitBlobResourceDomain, notReadableError, response.url(), "File is too large"); return; } m_response = response; // Read all the data. m_data.resize(static_cast<size_t>(response.expectedContentLength())); static_cast<BlobResourceHandle*>(handle)->readSync(m_data.data(), static_cast<int>(m_data.size())); } void BlobResourceSynchronousLoader::didReceiveData(ResourceHandle*, const char*, int, int) { } void BlobResourceSynchronousLoader::didFinishLoading(ResourceHandle*, double) { } void BlobResourceSynchronousLoader::didFail(ResourceHandle*, const ResourceError& error) { m_error = error; } } /////////////////////////////////////////////////////////////////////////////// // BlobResourceHandle PassRefPtr<BlobResourceHandle> BlobResourceHandle::createAsync(BlobStorageData* blobData, const ResourceRequest& request, ResourceHandleClient* client) { // FIXME: Should probably call didFail() instead of blocking the load without explanation. if (!equalIgnoringCase(request.httpMethod(), "GET")) return 0; return adoptRef(new BlobResourceHandle(blobData, request, client, true)); } void BlobResourceHandle::loadResourceSynchronously(BlobStorageData* blobData, const ResourceRequest& request, ResourceError& error, ResourceResponse& response, Vector<char>& data) { if (!equalIgnoringCase(request.httpMethod(), "GET")) { error = ResourceError(webKitBlobResourceDomain, methodNotAllowed, response.url(), "Request method must be GET"); return; } BlobResourceSynchronousLoader loader(error, response, data); RefPtr<BlobResourceHandle> handle = adoptRef(new BlobResourceHandle(blobData, request, &loader, false)); handle->start(); } BlobResourceHandle::BlobResourceHandle(PassRefPtr<BlobStorageData> blobData, const ResourceRequest& request, ResourceHandleClient* client, bool async) : ResourceHandle(0, request, client, false, false) , m_blobData(blobData) , m_async(async) , m_errorCode(0) , m_aborted(false) , m_rangeOffset(positionNotSpecified) , m_rangeEnd(positionNotSpecified) , m_rangeSuffixLength(positionNotSpecified) , m_totalRemainingSize(0) , m_currentItemReadSize(0) , m_sizeItemCount(0) , m_readItemCount(0) , m_fileOpened(false) { if (m_async) m_asyncStream = AsyncFileStream::create(this); else m_stream = FileStream::create(); } BlobResourceHandle::~BlobResourceHandle() { if (m_async) { if (m_asyncStream) m_asyncStream->stop(); } else { if (m_stream) m_stream->stop(); } } void BlobResourceHandle::cancel() { if (m_async) { if (m_asyncStream) { m_asyncStream->stop(); m_asyncStream = 0; } } m_aborted = true; ResourceHandle::cancel(); } void delayedStartBlobResourceHandle(void* context) { RefPtr<BlobResourceHandle> handle = adoptRef(static_cast<BlobResourceHandle*>(context)); handle->doStart(); } void BlobResourceHandle::start() { if (m_async) { // Keep BlobResourceHandle alive until delayedStartBlobResourceHandle runs. ref(); // Finish this async call quickly and return. callOnMainThread(delayedStartBlobResourceHandle, this); return; } doStart(); } void BlobResourceHandle::doStart() { // Do not continue if the request is aborted or an error occurs. if (m_aborted || m_errorCode) return; // If the blob data is not found, fail now. if (!m_blobData) { m_errorCode = notFoundError; notifyResponse(); return; } // Parse the "Range" header we care about. String range = firstRequest().httpHeaderField("Range"); if (!range.isEmpty() && !parseRange(range, m_rangeOffset, m_rangeEnd, m_rangeSuffixLength)) { m_errorCode = rangeError; notifyResponse(); return; } if (m_async) getSizeForNext(); else { Ref<BlobResourceHandle> protect(*this); // getSizeForNext calls the client for (size_t i = 0; i < m_blobData->items().size() && !m_aborted && !m_errorCode; ++i) getSizeForNext(); notifyResponse(); } } void BlobResourceHandle::getSizeForNext() { // Do we finish validating and counting size for all items? if (m_sizeItemCount >= m_blobData->items().size()) { seek(); // Start reading if in asynchronous mode. if (m_async) { Ref<BlobResourceHandle> protect(*this); notifyResponse(); m_buffer.resize(bufferSize); readAsync(); } return; } const BlobDataItem& item = m_blobData->items().at(m_sizeItemCount); switch (item.type) { case BlobDataItem::Data: didGetSize(item.length); break; case BlobDataItem::File: if (m_async) m_asyncStream->getSize(item.path, item.expectedModificationTime); else didGetSize(m_stream->getSize(item.path, item.expectedModificationTime)); break; default: ASSERT_NOT_REACHED(); } } void BlobResourceHandle::didGetSize(long long size) { // Do not continue if the request is aborted or an error occurs. if (m_aborted || m_errorCode) return; // If the size is -1, it means the file has been moved or changed. Fail now. if (size == -1) { m_errorCode = notFoundError; notifyResponse(); return; } // The size passed back is the size of the whole file. If the underlying item is a sliced file, we need to use the slice length. const BlobDataItem& item = m_blobData->items().at(m_sizeItemCount); if (item.type == BlobDataItem::File && item.length != BlobDataItem::toEndOfFile) size = item.length; // Cache the size. m_itemLengthList.append(size); // Count the size. m_totalRemainingSize += size; m_sizeItemCount++; // Continue with the next item. getSizeForNext(); } void BlobResourceHandle::seek() { // Convert from the suffix length to the range. if (m_rangeSuffixLength != positionNotSpecified) { m_rangeOffset = m_totalRemainingSize - m_rangeSuffixLength; m_rangeEnd = m_rangeOffset + m_rangeSuffixLength - 1; } // Bail out if the range is not provided. if (m_rangeOffset == positionNotSpecified) return; // Skip the initial items that are not in the range. long long offset = m_rangeOffset; for (m_readItemCount = 0; m_readItemCount < m_blobData->items().size() && offset >= m_itemLengthList[m_readItemCount]; ++m_readItemCount) offset -= m_itemLengthList[m_readItemCount]; // Set the offset that need to jump to for the first item in the range. m_currentItemReadSize = offset; // Adjust the total remaining size in order not to go beyond the range. if (m_rangeEnd != positionNotSpecified) { long long rangeSize = m_rangeEnd - m_rangeOffset + 1; if (m_totalRemainingSize > rangeSize) m_totalRemainingSize = rangeSize; } else m_totalRemainingSize -= m_rangeOffset; } int BlobResourceHandle::readSync(char* buf, int length) { ASSERT(!m_async); Ref<BlobResourceHandle> protect(*this); int offset = 0; int remaining = length; while (remaining) { // Do not continue if the request is aborted or an error occurs. if (m_aborted || m_errorCode) break; // If there is no more remaining data to read, we are done. if (!m_totalRemainingSize || m_readItemCount >= m_blobData->items().size()) break; const BlobDataItem& item = m_blobData->items().at(m_readItemCount); int bytesRead = 0; if (item.type == BlobDataItem::Data) bytesRead = readDataSync(item, buf + offset, remaining); else if (item.type == BlobDataItem::File) bytesRead = readFileSync(item, buf + offset, remaining); else ASSERT_NOT_REACHED(); if (bytesRead > 0) { offset += bytesRead; remaining -= bytesRead; } } int result; if (m_aborted || m_errorCode) result = -1; else result = length - remaining; if (result > 0) notifyReceiveData(buf, result); if (!result) notifyFinish(); return result; } int BlobResourceHandle::readDataSync(const BlobDataItem& item, char* buf, int length) { ASSERT(!m_async); long long remaining = item.length - m_currentItemReadSize; int bytesToRead = (length > remaining) ? static_cast<int>(remaining) : length; if (bytesToRead > m_totalRemainingSize) bytesToRead = static_cast<int>(m_totalRemainingSize); memcpy(buf, item.data->data() + item.offset + m_currentItemReadSize, bytesToRead); m_totalRemainingSize -= bytesToRead; m_currentItemReadSize += bytesToRead; if (m_currentItemReadSize == item.length) { m_readItemCount++; m_currentItemReadSize = 0; } return bytesToRead; } int BlobResourceHandle::readFileSync(const BlobDataItem& item, char* buf, int length) { ASSERT(!m_async); if (!m_fileOpened) { long long bytesToRead = m_itemLengthList[m_readItemCount] - m_currentItemReadSize; if (bytesToRead > m_totalRemainingSize) bytesToRead = m_totalRemainingSize; bool success = m_stream->openForRead(item.path, item.offset + m_currentItemReadSize, bytesToRead); m_currentItemReadSize = 0; if (!success) { m_errorCode = notReadableError; return 0; } m_fileOpened = true; } int bytesRead = m_stream->read(buf, length); if (bytesRead < 0) { m_errorCode = notReadableError; return 0; } if (!bytesRead) { m_stream->close(); m_fileOpened = false; m_readItemCount++; } else m_totalRemainingSize -= bytesRead; return bytesRead; } void BlobResourceHandle::readAsync() { ASSERT(m_async); // Do not continue if the request is aborted or an error occurs. if (m_aborted || m_errorCode) return; // If there is no more remaining data to read, we are done. if (!m_totalRemainingSize || m_readItemCount >= m_blobData->items().size()) { notifyFinish(); return; } const BlobDataItem& item = m_blobData->items().at(m_readItemCount); if (item.type == BlobDataItem::Data) readDataAsync(item); else if (item.type == BlobDataItem::File) readFileAsync(item); else ASSERT_NOT_REACHED(); } void BlobResourceHandle::readDataAsync(const BlobDataItem& item) { ASSERT(m_async); Ref<BlobResourceHandle> protect(*this); long long bytesToRead = item.length - m_currentItemReadSize; if (bytesToRead > m_totalRemainingSize) bytesToRead = m_totalRemainingSize; consumeData(item.data->data() + item.offset + m_currentItemReadSize, static_cast<int>(bytesToRead)); m_currentItemReadSize = 0; } void BlobResourceHandle::readFileAsync(const BlobDataItem& item) { ASSERT(m_async); if (m_fileOpened) { m_asyncStream->read(m_buffer.data(), m_buffer.size()); return; } long long bytesToRead = m_itemLengthList[m_readItemCount] - m_currentItemReadSize; if (bytesToRead > m_totalRemainingSize) bytesToRead = static_cast<int>(m_totalRemainingSize); m_asyncStream->openForRead(item.path, item.offset + m_currentItemReadSize, bytesToRead); m_fileOpened = true; m_currentItemReadSize = 0; } void BlobResourceHandle::didOpen(bool success) { ASSERT(m_async); if (!success) { failed(notReadableError); return; } // Continue the reading. readAsync(); } void BlobResourceHandle::didRead(int bytesRead) { if (bytesRead < 0) { failed(notReadableError); return; } consumeData(m_buffer.data(), bytesRead); } void BlobResourceHandle::consumeData(const char* data, int bytesRead) { ASSERT(m_async); Ref<BlobResourceHandle> protect(*this); m_totalRemainingSize -= bytesRead; // Notify the client. if (bytesRead) notifyReceiveData(data, bytesRead); if (m_fileOpened) { // When the current item is a file item, the reading is completed only if bytesRead is 0. if (!bytesRead) { // Close the file. m_fileOpened = false; m_asyncStream->close(); // Move to the next item. m_readItemCount++; } } else { // Otherwise, we read the current text item as a whole and move to the next item. m_readItemCount++; } // Continue the reading. readAsync(); } void BlobResourceHandle::failed(int errorCode) { ASSERT(m_async); Ref<BlobResourceHandle> protect(*this); // Notify the client. notifyFail(errorCode); // Close the file if needed. if (m_fileOpened) { m_fileOpened = false; m_asyncStream->close(); } } void BlobResourceHandle::notifyResponse() { if (!client()) return; if (m_errorCode) { Ref<BlobResourceHandle> protect(*this); notifyResponseOnError(); notifyFinish(); } else notifyResponseOnSuccess(); } void BlobResourceHandle::notifyResponseOnSuccess() { bool isRangeRequest = m_rangeOffset != positionNotSpecified; ResourceResponse response(firstRequest().url(), m_blobData->contentType(), m_totalRemainingSize, String(), String()); response.setExpectedContentLength(m_totalRemainingSize); response.setHTTPStatusCode(isRangeRequest ? httpPartialContent : httpOK); response.setHTTPStatusText(isRangeRequest ? httpPartialContentText : httpOKText); if (!m_blobData->contentDisposition().isEmpty()) response.setHTTPHeaderField("Content-Disposition", m_blobData->contentDisposition()); // BlobResourceHandle cannot be used with downloading, and doesn't even wait for continueDidReceiveResponse. // It's currently client's responsibility to know that didReceiveResponseAsync cannot be used to convert a // load into a download or blobs. if (client()->usesAsyncCallbacks()) client()->didReceiveResponseAsync(this, response); else client()->didReceiveResponse(this, response); } void BlobResourceHandle::notifyResponseOnError() { ASSERT(m_errorCode); ResourceResponse response(firstRequest().url(), "text/plain", 0, String(), String()); switch (m_errorCode) { case rangeError: response.setHTTPStatusCode(httpRequestedRangeNotSatisfiable); response.setHTTPStatusText(httpRequestedRangeNotSatisfiableText); break; case notFoundError: response.setHTTPStatusCode(httpNotFound); response.setHTTPStatusText(httpNotFoundText); break; case securityError: response.setHTTPStatusCode(httpNotAllowed); response.setHTTPStatusText(httpNotAllowedText); break; default: response.setHTTPStatusCode(httpInternalError); response.setHTTPStatusText(httpInternalErrorText); break; } // Note that we don't wait for continueDidReceiveResponse when using didReceiveResponseAsync. // This is not formally correct, but the client has to be a no-op anyway, because blobs can't be downloaded. if (client()->usesAsyncCallbacks()) client()->didReceiveResponseAsync(this, response); else client()->didReceiveResponse(this, response); } void BlobResourceHandle::notifyReceiveData(const char* data, int bytesRead) { if (client()) client()->didReceiveBuffer(this, SharedBuffer::create(data, bytesRead), bytesRead); } void BlobResourceHandle::notifyFail(int errorCode) { if (client()) client()->didFail(this, ResourceError(webKitBlobResourceDomain, errorCode, firstRequest().url(), String())); } static void doNotifyFinish(void* context) { BlobResourceHandle* handle = static_cast<BlobResourceHandle*>(context); if (!handle->aborted() && handle->client()) handle->client()->didFinishLoading(handle, 0); // Balance the ref() in BlobResourceHandle::notfyFinish(). handle->deref(); } void BlobResourceHandle::notifyFinish() { // Balanced in doNotifyFinish(). ref(); if (m_async) { // Schedule to notify the client from a standalone function because the client might dispose the handle immediately from the callback function // while we still have BlobResourceHandle calls in the stack. callOnMainThread(doNotifyFinish, this); return; } doNotifyFinish(this); } } // namespace WebCore #endif // ENABLE(BLOB)
{ "redpajama_set_name": "RedPajamaGithub" }
6,104
Q: Не подключиться к серверу с помощью filezilla Всегда спокойно подключался через этот фтп клиент, и соответственно в настройки никогда не лазил. Сейчас попробывал подключиться и вылетает такая ошибка Статус: Соединение установлено, ожидание приглашения... Ответ: 220 ProFTPD 1.3.5a Server (ProFTPD) [194.58.97.43] Команда: AUTH TLS Ответ: 234 AUTH TLS successful Статус: Инициализирую TLS... Ошибка: Ошибка GnuTLS -110: The TLS connection was non-properly terminated. Статус: Сервер неверно закрыл TLS соединение Ошибка: Невозможно подключиться к серверу A: Получилось решить проблему только переключением способа шифрования - вместо "Использовать явный FTP через TLS если доступен" ставлю "Использовать обычный FTP (небезопасно)".
{ "redpajama_set_name": "RedPajamaStackExchange" }
6,978
Organizations are experiencing a profound shift in the way they need to deliver value to customers. The contexts and conditions under which value offerings are developed and delivered today has changed dramatically over the last one to two decades, where digital technology has converged with traditional products and has become embedded into customers' everyday lives. While classic project management approaches emphasize efficiency and meeting internal business demands in terms of project delivery, they tend to lack flexibility in their way of approaching change, dealing with uncertainty and reducing time to market. Organizations have to become more agile and adjust the way they plan and build offerings, by increasing the frequency by which they deliver product increments and the way they learn from customer feedback during iterative product development and delivery. The purpose of this technical note is to convey the essential characteristics of the agile mindset that underpins a variety of agile methodologies and frameworks used in practice. Secondly, different agile methodologies and their advantages are explained in detail. Finally, the implementation of agile methodologies in organizations is discussed. In addition to known operational changes that are necessary, the broader organizational transformation that this type of change entails when introducing agile thinking from the start-up world into large established firms is also discussed.
{ "redpajama_set_name": "RedPajamaC4" }
7,402
{"url":"https:\/\/questions.examside.com\/past-years\/jee\/question\/locus-of-the-image-of-the-point-2-3-in-the-line-left-2x-3y-4-jee-main-2015-marks-4-53q58lreycxabfv0.htm","text":"NEW\nNew Website Launch\nExperience the best way to solve previous year questions with mock tests (very detailed analysis), bookmark your favourite questions, practice etc...\n1\n\n### JEE Main 2015 (Offline)\n\nLocus of the image of the point $$(2, 3)$$ in the line $$\\left( {2x - 3y + 4} \\right) + k\\left( {x - 2y + 3} \\right) = 0,\\,k \\in R,$$ is a:\nA\ncircle of radius $$\\sqrt 2$$.\nB\ncircle of radius $$\\sqrt 3$$.\nC\nstraight line parallel to $$x$$-axis\nD\nstraight line parallel to $$y$$-axis\n\n## Explanation\n\nIntersection point of $$2x - 3y + 4 = 0$$\n\nand $$x-2y+3=0$$ is $$(1, 2)$$\n\nSince, $$P$$ is the fixed point for given family of lines\n\nSo, $$PB=PA$$\n\n$${\\left( {\\alpha - 1} \\right)^2} + {\\left( {\\beta - 2} \\right)^2} = {\\left( {2 - 1} \\right)^2} + {\\left( {3 - 2} \\right)^2}$$\n\n$${\\left( {\\alpha - 1} \\right)^2} + {\\left( {\\beta - 2} \\right)^2} = 1 + 1 = 2$$\n\n$${\\left( {x - 1} \\right)^2} + {\\left( {y - 2} \\right)^2} = {\\left( {\\sqrt 2 } \\right)^2}$$\n\n$${\\left( {x - a} \\right)^2} + {\\left( {y - b} \\right)^2} = {r^2}$$\n\nTherefore, given locus is a circle with center $$(1, 2)$$ and radius $$\\sqrt 2 .$$\n2\n\n### JEE Main 2014 (Offline)\n\nLet $$C$$ be the circle with centre at $$(1, 1)$$ and radius $$=$$ $$1$$. If $$T$$ is the circle centred at $$(0, y)$$, passing through origin and touching the circle $$C$$ externally, then the radius of $$T$$ is equal to\nA\n$${1 \\over 2}$$\nB\n$${1 \\over 4}$$\nC\n$${{\\sqrt 3 } \\over {\\sqrt 2 }}$$\nD\n$${{\\sqrt 3 } \\over 2}$$\n\n## Explanation\n\nEquation of circle $$C \\equiv {\\left( {x - 1} \\right)^2} + {\\left( {y - 1} \\right)^2} = 1$$\n\nRadius of $$T = \\left| y \\right|$$\n\n$$T$$ touches $$C$$ externally\n\ntherefore,\n\nDistance between the centers $$=$$ sum of their radii\n\n$$\\Rightarrow \\sqrt {{{\\left( {0 - 1} \\right)}^2} + {{\\left( {y - 1} \\right)}^2}} = 1 + \\left| y \\right|$$\n\n$$\\Rightarrow {\\left( {0 - 1} \\right)^2} + {\\left( {y - 1} \\right)^2} = {\\left( {1 + \\left| y \\right|} \\right)^2}$$\n\n$$\\Rightarrow 1 + {y^2} + 1 - 2y = 1 + {y^2} + 2\\left| y \\right|$$\n\n$$2\\left| y \\right| = 1 - 2y$$\n\nIf $$y>0$$ then $$2y=1-2y$$ $$\\Rightarrow y = {1 \\over 4}$$\n\n$$y<0$$ then $$-2y=1-2y$$ $$\\Rightarrow 0 = 1$$ (not possible)\n\n$$\\therefore$$ $$y = {1 \\over 4}$$\n3\n\n### JEE Main 2013 (Offline)\n\nThe circle passing through $$(1, -2)$$ and touching the axis of $$x$$ at $$(3, 0)$$ also passes through the point\nA\n$$\\left( { - 5,\\,2} \\right)$$\nB\n$$\\left( { 2,\\,-5} \\right)$$\nC\n$$\\left( { 5,\\,-2} \\right)$$\nD\n$$\\left( { - 2,\\,5} \\right)$$\n\n## Explanation\n\nSince circle touches $$x$$-axis at $$(3,0)$$\n\n$$\\therefore$$ The equation of circle be\n\n$${\\left( {x - 3} \\right)^2} + {\\left( {y - 0} \\right)^2} + \\lambda y = 0$$\n\nAs it passes through $$(1, -2)$$\n\n$$\\therefore$$ Put $$x=1,$$ $$y=-2$$\n\n$$\\Rightarrow {\\left( {1 - 3} \\right)^2} + {\\left( { - 2} \\right)^2} + \\lambda \\left( { - 2} \\right) = 0$$\n\n$$\\Rightarrow \\lambda = 4$$\n\n$$\\therefore$$ equation of circle is $${\\left( {x - 3} \\right)^2} + {y^2} - 8 = 0$$\n\nNow, from the options $$\\left( {5, - 2} \\right)$$ satisfies equation of circle.\n4\n\n### AIEEE 2012\n\nThe length of the diameter of the circle which touches the $$x$$-axis at the point $$(1, 0)$$ and passes through the point $$(2, 3)$$ is:\nA\n$${{10} \\over 3}$$\nB\n$${{3} \\over 5}$$\nC\n$${{6} \\over 5}$$\nD\n$${{5} \\over 3}$$\n\n## Explanation\n\nLet center of the circle be $$\\left( {1,h} \\right)$$\n\n$$\\left[ {\\,\\,} \\right.$$ as circle touches $$x$$-axis at $$\\left. {\\left( {1,0} \\right)\\,\\,} \\right]$$\n\nLet the circle passes through the point $$B(2,3)$$\n\n$$\\therefore$$ $$CA=CB$$ (radius)\n\n$$\\Rightarrow C{A^2} = C{B^2}$$\n\n$$\\Rightarrow {\\left( {1 - 1} \\right)^2} + \\left( {h - 0} \\right){}^2 = {\\left( {1 - 2} \\right)^2} + {\\left( {h - 3} \\right)^2}$$\n\n$$\\Rightarrow {h^2} = 1 + {h^2} + 9 - 6h$$\n\n$$\\Rightarrow h = {{10} \\over 6} = {5 \\over 3}$$\n\nThus, diameter is $$2h = {{10} \\over 3}.$$\n\n### Joint Entrance Examination\n\nJEE Main JEE Advanced WB JEE\n\n### Graduate Aptitude Test in Engineering\n\nGATE CSE GATE ECE GATE EE GATE ME GATE CE GATE PI GATE IN\n\nNEET\n\nClass 12","date":"2022-05-28 06:47:20","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 1, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.8048349022865295, \"perplexity\": 1331.9686368810649}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2022-21\/segments\/1652663013003.96\/warc\/CC-MAIN-20220528062047-20220528092047-00637.warc.gz\"}"}
null
null
This 85sqm apartment is located on the third floor with an elevator and is composed of a living room, two bedrooms, a bathroom and a kitchen. The peculiarity of this apartment is that the walls, furniture and wardrobes are painted in different colors. The apartment opens onto the living room; furnished with a sofa bed (140cm x 190cm), an armchair, a table with five chairs and a corner with the television. A bookcase, a lamp and beautiful paintings complete the furnishings of the living room. From the living room there is a corridor leading to the two bedrooms, bathroom and kitchen . The first bedroom is furnished with two single beds (80cm x 190cm), if necessary they bcan be joined and become a double bed (160cm x 190cm), a bedside table and a large wardrobe. The second bedroom is furnished with a double bed (160cm x 190cm), two bedside tables, a chest and a wardrobe. The kitchen includes a refridgerator, oven, hob, dishwasher, electric boiler, microwave, crockery and cutlery for six people. The kitchen is also equipped with a table and four chairs. The bathroom has a toilet and a shower cabin. The apartment has an air conditioning system in the two bedrooms. This fun apartment's windows face on Via Giovanni da Castel Bolognesi and Via Bernardino Passeri. In this area, there are free parking spaces (white stripes) and payment parking spaces (blue stripes). The apartment is located in an historic building built for the State Railway Company in 1913, the last building was completed on the 22nd October 1915. During this period the First World War broke out and it is said that many women were used to finish the construction because most of the men had left for the war. American bombers unloaded 179 bombs on the Ostiense Station, the Trastevere and Garbatella neighbourhood. The bombers did not hit their target and a few bombs hitthe building in Giovanni da Castel Bolognese and Via Ettore Rolli, devastating the buildings 'G', 'F' and the small school in the courtyard. The beautiful actress Elsa Martinelli, came to live in this building and consequently saw the passing of well-known actors and characters of the movie industry. The young diva smoked wonderful American filtered cigarettes, she only took two or three puffs and then threw them in the yard. The butts immediately became and objest of interest. The Pbuilding itself become a movie set: Sergio Corbucci shot "Totò, Peppino and la dolce vita", Luciano Salce "Il Federale", with Ugo Tognazzi and Stefania Sandrelli. Today one often finds cinematographic crews in the courtyard shooting TV dramas and feature films. The beauty of this neighbourhood is that it has Testaccio on one side and Trastevere on the other. The area is rich in clubs, bars and restaurants, its proximity to the historic city centre makes it a very interesting and pleasant neighbourhood for those who want to visit the Rome. The City of the Other Economy is in Testaccio, inside the Campo Boario ex slaughterhouse, in the ancient restored spaces of Campo Boario di Testaccio, 3,500 square meters of exhibition, sale, events and meetings for the promotion of the other economy: organic and social agriculture, fair trade, renewable energy and bio-building, re-use and recycling, sustainable mobility, but also a bookshop , a biobar and a bioristorante. Mercato di Testaccio, an unmissable gastronomic centre, an ideal meeting point for all gourmets. A large covered square where you can go shopping and spend leisure time; open Monday - Saturday until 15.30. The Porta Portese market, the Rome market par excellence. This lively, typically Roman place was a source of inspiration for film directors, writers and singers. like all flea markets, this market also offers all kinds of bric-a-brac, books, antiques, toys, records, furniture, cosmetics and vintage postcardsOpen every Sunday from 06.00 - 14.00.
{ "redpajama_set_name": "RedPajamaC4" }
3,355
\section{Introduction} \label{sec:Introduction} Mathematical expressions are indispensable for describing problems in maths, physics and many other fields. Meanwhile, people have begun to use handwritten mathematical expressions as one natural input mode. However, machine recognition of these handwritten mathematical expressions is difficult and exhibits three distinct challenges~\cite{belaid1984syntactic}, i.e., the complicated two-dimensional structures, enormous ambiguities coming from handwriting input and variant scales of handwritten math symbols. Handwritten mathematical expression recognition comprises two major problems~\cite{chan2000mathematical}: symbol recognition and structural analysis. The two problems can be solved sequentially~\cite{zanibbi2002recognizing} or globally~\cite{alvaro2016integrated}. However, both conventional sequential and global approaches have the following limitations: 1) the challenging symbol segmentation is inevitable, which brings many difficulties; 2) the structural analysis is commonly based on two-dimensional context free grammar~\cite{chou1989recognition}, which requires priori knowledge to define a math grammar; 3) the complexity of parsing algorithms increases with the size of math grammar. In recent research of deep learning, a novel attention based encoder-decoder model has been proposed~\cite{bahdanau2014neural,sutskever2014sequence}. Its general application in machine translation~\cite{cho2014learning}, speech recognition~\cite{bahdanau2016end}, character recognition~\cite{zhang2017ran,zhang2017trajectory} and image captioning~\cite{xu2015show} inspires researchers that mathematical expression recognition can also be one proper application~\cite{zhang2017watch,zhang2017gru,deng2016you,anh2017training}. More specifically, \cite{zhang2017watch} proposed a model namely WAP. The WAP learns to encode input expression images and decode them into LaTeX strings. The encoder is a convolutional neural network (CNN)~\cite{krizhevsky2012imagenet} based on VGG architecture~\cite{simonyan2014very} that maps images to high-level features. The decoder is a recurrent neural network (RNN)~\cite{graves2013speech} with gated recurrent units (GRU)~\cite{chung2014empirical} that converts these high-level features into output strings one symbol at a time. For each predicted symbol, an attention model built in the decoder scans the entire input expression image and chooses the most relevant region to describe a math symbol or an implicit spatial operator. Compared with conventional approaches for handwritten mathematical expression recognition, the attention based encoder-decoder model possesses three distinctive properties: 1) It is end-to-end trainable; 2) It is data-driven, in contrast to traditional systems that require a predefined math grammar; 3) Symbol segmentation can be automatically performed through attention model. \begin{figure} \centering \includegraphics[width=3in]{under-parsing-example} \caption{An incorrectly recognized example of handwritten mathematical expression due to the under-parsing problem: the decimal point ``.'' is missed in the predicted LaTeX notation.} \label{fig:under-parsing-example} \end{figure} In this study, we still focus on offline handwritten mathematical expression recognition and report our recent progress on WAP model~\cite{zhang2017watch}. The main contribution is in two aspects. Firstly, we improve the CNN encoder by employing a novel architecture called densely connected convolutional networks (DenseNet)~\cite{huang2016densely}. The DenseNet has shown excellent performance on image classification task as it strengthens feature extraction and facilitates gradient propagation. Secondly, we present a novel multi-scale attention model to deal with the problems caused by pooling operations. Although pooling layers are essential parts of convolutional networks, they shrink the size of feature maps, yielding decrease of resolution. Because the scales of handwritten math symbols vary severely, the fine-grained details of extracted feature maps are especially important in handwritten mathematical expression recognition, which are lost in low-resolution feature maps. For example, in Fig.~\ref{fig:under-parsing-example}, the decimal point is very close to math symbol ``3'' and its scale is much smaller than its adjacent symbols. After many pooling layers, the visual information of the decimal point is gone, which leads to an under-parsing problem. To implement the multi-scale attention model, we propose a multi-scale dense encoder that will provide both low-resolution features and high-resolution features. The low-resolution features capture a larger receptive field and are more semantic while the high-resolution features restore more fine-grained visual information. The decoder then attends to both low-resolution and high-resolution features for predicting output LaTeX strings. The remainder of the paper is organized as follows. In Section~\ref{sec:Methodology}, we introduce the dense encoder and the proposed multi-scale attention model in detail. We introduce the implementation of training and testing procedure in Section~\ref{sec:Training and Testing Details}. The performances of dense encoder and multi-scale attention model are shown through experimental results and visualization analysis in Section~\ref{sec:Experiments}. Finally we conclude this study in Section~\ref{sec:Conclusion}. \section{Methodology} \label{sec:Methodology} In this section, we first make a brief summarization of DenseNet since our encoder is based on densely connected convolutional blocks. Then we introduce the classic attention based encoder-decoder framework. Finally, we extend DenseNet by introducing a multi-scale dense encoder and describe the implementation of multi-scale attention model in detail. \subsection{Dense Encoder} \label{sec:Dense Encoder} The main idea of DenseNet is to use the concatenation of output feature maps of preceding layers as the input of succeeding layers. As DenseNet is composed of many convolution layers, let $H_{l}(\cdot)$ denote the convolution function of the $l^{\textrm{th}}$ layer, then the output of layer $l$ is represented as: \begin{equation}\label{eq:dense output} \mathbf{x}_{l}=H_{l}([\mathbf{x}_{0};\mathbf{x}_{1};\ldots;\mathbf{x}_{l-1}]) \end{equation} where $\mathbf{x}_{0}, \mathbf{x}_{1},\ldots, \mathbf{x}_{l}$ denote the output features produced in layers $0, 1, \ldots, l$, ``$;$'' denotes the concatenation operation of feature maps. This iterative connection enables the network to learn shorter interactions cross different layers and reuses features computed in preceding layers. By doing so the DenseNet strengthens feature extraction and facilitates gradient propagation. An essential part of convolutional networks is pooling layers, which is capable of increasing receptive field and improving invariance. However, the pooling layers disenable the concatenation operation as the size of feature maps changes. Also, DenseNet is inherently memory demanding because the number of inter-layer connections grows quadratically with depth. Consequently, the DenseNet is divided into multiple densely connected blocks as shown in Fig.~\ref{fig:multi-scale-dense}. A compression layer is appended before each pooling layer to further improve model compactness. \subsection{Decoder} \label{sec:Decoder with Attention} We employ GRU as the decoder because it is an improved version of simple RNN which can alleviate the vanishing and exploding gradient problems~\cite{bengio1994learning,zhang2016rnn}. Given input ${{\bf{x}}_t}$, the GRU output ${\mathbf{h}_t}$ is computed by: \begin{equation}\label{eq:GRU function} {{\bf{h}}_t} = \textrm{GRU} \left( {{\bf{x}}_t}, {{\bf{h}}_{t - 1}} \right) \end{equation} and the GRU function can be expanded as follows: \begin{align}\label{eq:expandGRU} & {{\mathbf{z}}_t} = \sigma ({{\mathbf{W}}_{xz}}{{\mathbf{x}}_{t}} + {{\mathbf{U}}_{hz}}{{\mathbf{h}}_{t - 1}}) \\ & {{\mathbf{r}}_t} = \sigma ({{\mathbf{W}}_{xr}}{{\mathbf{x}}_{t}} + {{\mathbf{U}}_{hr}}{{\mathbf{h}}_{t - 1}}) \\ & {{\bf{\tilde h}}_t} = \tanh ({{\bf{W}}_{xh}}{{\bf{x}}_{t}} + {{\bf{U}}_{rh}}({{\bf{r}}_t} \otimes {{\bf{h}}_{t - 1}})) \\ & {{\bf{h}}_t} = (1 - {{\bf{z}}_t}) \otimes {{\bf{h}}_{t - 1}} + {{\bf{z}}_t} \otimes {{\bf{\tilde h}}_t} \end{align} where $\sigma$ is the sigmoid function and $\otimes$ is an element-wise multiplication operator. ${{\mathbf{z}}_t}$, ${{\mathbf{r}}_t}$ and ${{\bf{\tilde h}}_t}$ are the update gate, reset gate and candidate activation, respectively. Assuming the output of CNN encoder is a three-dimensional array of size $H \times W \times C$, consider the output as a variable-length grid of $L$ elements, $L=H \times W$. Each of these elements is a $C$-dimensional annotation that corresponds to a local region of the image. \begin{equation}\label{eq:annotations} \mathbf{A} = \left\{ { \mathbf{a}_1, \ldots ,\mathbf{a}_L} \right\}\;,\;{{\mathbf{a}}_i} \in {\mathbb{R}^C} \end{equation} Meanwhile, the GRU decoder is employed to generate a corresponding LaTeX string of the input mathematical expression. The output string $\mathbf{Y}$ is represented by a sequence of one-hot encoded symbols. \begin{equation}\label{eq:outputY} \mathbf{Y} = \left\{ { \mathbf{y}_1, \ldots ,\mathbf{y}_T} \right\}\;,\;{{\mathbf{y}}_i} \in {\mathbb{R}^K} \end{equation} where $K$ is the number of total symbols in the vocabulary and $T$ is the length of LaTeX string. Note that, both the annotation sequence $\mathbf{A}$ and the LaTeX string $\mathbf{Y}$ are not fixed-length. To address the learning problem of variable-length annotation sequences and associate them with variable-length output sequences, we attempt to compute an intermediate fixed-length vector ${{\mathbf{c}}_t}$, namely context vector, at each decoding step $t$. The context vector ${{\mathbf{c}}_t}$ is computed via weighted summing the variable-length annotations ${{\mathbf{a}}_i}$: \begin{equation}\label{eq:context vector} {{\mathbf{c}}_t} = \sum\nolimits_{i=1}^L {{\alpha _{ti}}{{\mathbf{a}}_i}} \end{equation} Here, the weighting coefficients $\alpha _{ti}$ are called attention probabilities and they will make decoder to know which part of input image is the suitable place to attend to generate the next predicted symbol and then assign a higher weight to the corresponding local annotation vectors ${{\mathbf{a}}_i}$. After computing the intermediate fixed-length context vector, we then generate the LaTeX string one symbol at a time. By doing so, the problem of associating variable-length annotation sequences with variable-length output LaTeX strings is addressed. The probability of each predicted symbol is computed by the context vector ${{\mathbf{c}}_t}$, current decoder state ${{\mathbf{s}}_t}$ and previous target symbol ${{\mathbf{y}}_{t - 1}}$ using the following equation: \begin{equation}\label{eq:computePy} p({{\mathbf{y}}_t}|{{{\mathbf{y}}_{t - 1}},\mathbf{X}}) = g \left ({{\mathbf{W}}_o}h({\mathbf{E}}{{\mathbf{y}}_{t - 1}} + {{\mathbf{W}}_s}{{\mathbf{s}}_t} + {{\mathbf{W}}_c}{{\mathbf{c}}_t})\right ) \end{equation} where $\mathbf{X}$ denotes input mathematical expression images, $g$ denotes a softmax activation function, $h$ denotes a maxout activation function, let $m$ and $n$ denote the dimensions of embedding and GRU decoder state respectively, then ${{\mathbf{W}}_o} \in {\mathbb{R}^{K \times \frac{m}{2}}}$ and ${{\mathbf{W}}_s} \in {\mathbb{R}^{m \times n}}$, ${\mathbf{E}}$ denotes the embedding matrix. \subsection{Multi-Scale Attention with Dense Encoder} \label{sec:Multi-Scale Attention with Dense Encoder} \subsubsection{Multi-Scale Dense Encoder} \label{sec:Multi-Scale Dense Encoder} \begin{figure} \centering \includegraphics[width=3.25in]{multi-scale-dense} \caption{Architecture of multi-scale dense encoder. The left part is the main branch while the right part is the multi-scale branch.} \label{fig:multi-scale-dense} \end{figure} To implement the multi-scale attention model, we first extend the single-scale dense encoder into multi-scale dense encoder. As illustrated in Fig.~\ref{fig:multi-scale-dense}, our dense encoder consists of two branches, i.e., except the main branch which produces low-resolution annotations $\mathbf{A}$, our dense encoder has another multi-scale branch that produces high-resolution annotations $\mathbf{B}$. The multi-scale branch is extended before the last pooling layer of the main branch so that the output feature maps of multi-scale branch has a higher resolution. The high-resolution annotation is a three-dimensional array of size $2H \times 2W \times {C^{'}}$, which can be represented as a variable-length grid of $4L$ elements: \begin{equation}\label{eq:new annotations} \mathbf{B} = \left\{ { \mathbf{b}_1, \ldots ,\mathbf{b}_{4L}} \right\}\;,\;{{\mathbf{b}}_i} \in {\mathbb{R}^{C^{'}}} \end{equation} where $L$ is the length of annotation sequence $A$. Intuitively, we can extend several multi-scale branches before every pooling layer but such operation brings too much computational cost as the size of feature maps becomes too large. As for the implementation details of dense encoder, we employ three dense blocks in the main branch as described by yellow rectangles in Fig.~\ref{fig:multi-scale-dense}. Before entering the first dense block, a $7 \times 7$ convolution (stride is $2 \times 2$) with 48 output channels is performed on the input expression images, followed by a $2 \times 2$ max pooling layer. Each dense block is titled as ``DenseB'' because we use bottleneck layers to improve computational efficiency, i.e. a $1 \times 1$ convolution is introduced before each $3 \times 3$ convolution to reduce the input to $4k$ feature maps. The growth rate $k=24$ and the depth (number of convolution layers) of each block $D=32$ which means each block has 16 $1 \times 1$ convolution layers and 16 $3 \times 3$ convolution layers. A batch normalization layer~\cite{ioffe2015batch} and a ReLU activation layer~\cite{glorot2011deep} are performed after each convolution layer consecutively. We use $1 \times 1$ convolution followed by $2 \times 2$ average pooling as transition layers between two contiguous dense blocks. The transition layer reduces the number of feature maps of each block by half. While in the multi-scale branch, we append another dense block with bottleneck layer, $k=24$ and $D=16$. We investigate the depth of block in multi-scale branch ($D=0, 8, 16, 24$) in Section~\ref{sec:Experiments}. \subsubsection{Multi-Scale Attention Model} \label{sec:Multi-Scale Attention Model} In this study, our decoder adopts two unidirectional GRU layers to calculate the decoder state ${\mathbf{s}_t}$ and the multi-scale context vector ${\mathbf{c}_t}$ that are both used as input to calculate the probability of predicted symbol in Eq.~\eqref{eq:computePy}. We employ two different single-scale coverage based attention model to generate the low-resolution context vector and high-resolution context vector by attending to low-resolution annotations and high-resolution annotations respectively. As the low-resolution context vector and high-resolution context vector have the same length $1$, we concatenate them to produce the multi-scale context vector: \begin{align}\label{eq:compute decoder state} & {{\mathbf{\hat s}}_t} = \textrm{GRU} \left( {{\bf{y}}_{t-1}}, {{\bf{s}}_{t - 1}} \right) \\ & {\mathbf{cA}_t} = f_{\text{catt}} \left( \mathbf{A}, \mathbf{\hat s}_t \right) \\ & {\mathbf{cB}_t} = f_{\text{catt}} \left( \mathbf{B}, \mathbf{\hat s}_t \right) \\ & {\mathbf{c}_t} = [{\mathbf{cA}_t};{\mathbf{cB}_t}] \\ & {{\mathbf{s}}_t} = \textrm{GRU} \left( {{\mathbf{c}}_t}, {{\mathbf{\hat s}}_t} \right) \end{align} where ${{\mathbf{s}}_{t-1}}$ denotes the previous decoder state, ${{\mathbf{\hat s}}_t}$ is the prediction of current decoder state, ${\mathbf{cA}_t}$ is the low-resolution context vector at decoding step $t$, similarly ${\mathbf{cB}_t}$ is the high-resolution context vector. The multi-scale context vector ${\mathbf{c}_t}$ is the concatenation of ${\mathbf{cA}_t}$ and ${\mathbf{cB}_t}$ and it performs as the input during the computation of current decoder state ${{\mathbf{s}}_{t}}$. $f_{\text{catt}}$ denotes a single-scale coverage based attention model. Take the computation of low-resolution context vector ${\mathbf{cA}_t}$ as an example, we parameterize $f_{\text{catt}}$ as a multi-layer perceptron: \begin{align}\label{eq:coverage attention} & {\mathbf{F}} = {\mathbf{Q}} * \sum\nolimits_{l=1}^{t - 1} {{{\bm{\alpha}}_l}} \\ & {e_{ti}} = {\bm{\nu }}_{\text{att}}^{\rm T}\tanh ({{\mathbf{U}}_{s}}{{\mathbf{\hat s}}_t} + {{\mathbf{U}}_a}{{\mathbf{a}}_i} + {{\mathbf{U}}_f}{{\mathbf{f}}_i}) \\ & {\alpha _{ti}} = \frac{{\exp ({e_{ti}})}}{{\sum\nolimits_{k = 1}^L {\exp ({e_{tk}})} }} \\ & {{\mathbf{cA}}_t} = \sum\nolimits_{i=1}^L {{\alpha _{ti}}{{\mathbf{a}}_i}} \end{align} where ${{\mathbf{a}}_{i}}$ denotes the element of low-resolution annotation sequence $\mathbf{A}$, ${e_{ti}}$ denotes the energy of ${{\mathbf{a}}_{i}}$ at time step $t$ conditioned on the prediction of current decoder state ${{\mathbf{\hat s}}_t}$ and coverage vector ${{\mathbf{f}}_i}$. The coverage vector is initialized as a zero vector and we compute it based on the summation of all past attention probabilities. Hence the coverage vector contains the information of alignment history. We append the coverage vector in the attention model so that the decoder is capable to know which part of input image has been attended or not~\cite{zhang2017watch,tu2016modeling}. Let $n'$ denote the attention dimension and $q$ denote the number of output channels of convolution function $\mathbf{Q}$; then ${{\bm{\nu }}_{\text{att}}} \in {\mathbb{R}^{{n'}}}$, ${{\mathbf{U}}_s} \in {\mathbb{R}^{{n'} \times n}}$, ${{\mathbf{U}}_a} \in {\mathbb{R}^{{n'} \times C}}$ and ${{\mathbf{U}}_{f}} \in {\mathbb{R}^{{n'} \times q}}$. The high-resolution context vector ${\mathbf{cB}_t}$ is computed based on another coverage based attention model $f_{\text{catt}}$ with different initialized parameters except the ${{\mathbf{U}}_s}$ transition matrix. \section{Training and Testing Details} \label{sec:Training and Testing Details} We validated the proposed model on CROHME 2014~\cite{mouchere2014icfhr} test set and CROHME 2016~\cite{mouchere2016icfhr2016} test set. The CROHME competition dataset is currently the most widely used public dataset for handwritten mathematical expression recognition. The training set has 8,836 expressions including 101 math symbol classes. The CROHME 2014 test set has 986 expressions while the CROHME 2016 test set has 1,147 expressions. \subsection{Training} \label{sec:Training} The training objective of the proposed model is to maximize the predicted symbol probability as shown in Eq. \eqref{eq:computePy} and we use cross-entropy (CE) as the objective function: \begin{equation}\label{eq:objective} O = - \sum\nolimits_{t=1}^T \log p({w_t}|{\mathbf{y}_{t-1},\mathbf{x}}) \end{equation} where $w_t$ represents the ground truth word at time step $t$. The implementation details of Dense encoder has been introduced in Section~\ref{sec:Multi-Scale Dense Encoder}. The decoder is a single layer with 256 forward GRU units. The embedding dimension $m$ and decoder state dimension $n$ are set to 256. The multi-scale attention dimension $n'$ is set to 512. The convolution kernel size for computing low-resolution coverage vector is set to $11 \times 11$ but $7 \times 7$ for high-resolution coverage vector, while their number of convolution filters are both set to 256. We utilized the adadelta algorithm \cite{zeiler2012adadelta} with gradient clipping for optimization. The best model is determined in terms of word error rate (WER) of validation set. We used a weight decay of ${10^{ - 4}}$ and we added a dropout layer~\cite{srivastava2014dropout} after each convolutional layer and set the drop rate to 0.2. \subsection{Decoding} \label{sec:Decoding} In the decoding stage, we aim to generate a most likely LaTeX string given the input image. However, different from the training procedure, we do not have the ground truth of previous predicted symbol. Consequently, a simple left-to-right beam search algorithm~\cite{cho2015natural} is employed to implement the decoding procedure. Here, we maintained a set of 10 partial hypotheses at each time step, ending with the end-of-sentence token $<eos>$. We also adopted the ensemble method \cite{dietterich2000ensemble} for improving the performance. We first trained 5 models on the same training set but with different initialized parameters. Then we averaged their prediction probabilities during the beam search process. \section{Experiments} \label{sec:Experiments} We designed a set of experiments to validate the effectiveness of the proposed method for handwritten mathematical expression recognition by answering the following questions: \begin{description} \item[Q1] Is the dense encoder effective? \item[Q2] Is the multi-scale attention model effective? \item[Q3] Does the proposed approach outperform state-of-the-arts? \end{description} \subsection{Metric} \label{sec:Metric} The participating systems in all of the CROHME competitions were ranked by expression recognition rates (ExpRate), i.e., the percentage of predicted mathematical expressions matching the ground truth, which is simple to understand and provides a useful global performance metric. The CROHME competition compared the competing systems not only by ExpRate but also those with at most one to three symbol-level errors. In our experiments, we first transferred the generated LaTeX strings into MathML representation and then computed these metrics by using the official tool provided by the organizer of CROHME. However, it seems inappropriate to evaluate an expression recognition system only at expression level. So we also evaluated our system at symbol-level by using WER metric. \subsection{Evaluation of dense encoder (Q1)} \label{sec:Evaluation of dense encoder (Q1)} We start the proposed multi-scale attention model with dense encoder from WAP~\cite{zhang2017watch}. As shown in Table~\ref{tab:1}, WAP achieves an ExpRate of 44.4\% on CROHME 2014 test set and an ExpRate of 42.0\% on CROHME 2016 test set. The WAP employs an encoder based on VGG architecture and its decoder is a unidirectional GRU equipped with coverage based attention model. Here, we only replace the VGG encoder by dense encoder with the other settings keeping unchanged and the new system is named as ``Dense'' in Table~\ref{tab:1}. The implementation details of the dense encoder is illustrated by the main branch in Fig.~\ref{fig:multi-scale-dense}. We can observe that the ExpRate increases about 5.7\% on CROHME 2014 and 5.5\% on CROHME 2016 by employing dense encoder. \begin{table}[h] \caption{\label{tab:1}{Comparison of recognition performance (in \%) on CROHME 2014 and CROHME 2016 when employing dense encoder and multi-scale attention model.}} \centering \begin{tabular}{c c c c c} \toprule \multirow{2}{*}{System} & \multicolumn{2}{c}{CROHME 2014} & \multicolumn{2}{c}{CROHME 2016} \\ \cmidrule(lr){2-3} \cmidrule(lr){4-5} & WER & ExpRate & WER & ExpRate\\ \midrule \textbf{WAP} & 19.4 & 44.4 & 19.7 & 42.0 \\ \textbf{Dense} & 13.9 & 50.1 & 15.4 & 47.5 \\ \textbf{Dense+MSA} & 12.9 & 52.8 & 13.7 & 50.1 \\ \bottomrule \end{tabular} \end{table} \subsection{Evaluation of multi-scale attention model (Q2)} \label{sec:Evaluation of multi-scale attention model (Q2)} In Table~\ref{tab:1}, the system ``Dense+MSA'' is the proposed multi-scale attention model with dense encoder. ``+MSA'' means that we only replace the single-scale coverage based attention model in system ``Dense'' by multi-scale coverage based attention model. The performance of multi-scale attention model is clear to be observed by the comparison between system ``Dense'' and system ``Dense+MSA''. The ExpRate increases from 50.1\% to 52.8\% on CROHME 2014 and from 47.5\% to 50.1\% on CROHME 2016 after the implementation of multi-scale attention model. More specifically, in the system ``Dense+MSA'', the multi-scale branch of dense encoder contains a dense block with depth $D=16$. We choose $D=16$ as we investigate the depth of block in multi-scale branch ($D=0, 8, 16, 24$) by experiments. In Table~\ref{tab:2}, $D=0$ means that we simply choose the output of the last transition convolutional layer in the main branch of dense encoder as the high-resolution annotations. The performance is only slightly improved compared with system ``Dense'' in Table~\ref{tab:1} which implies that more convolution operations are necessary to obtain more semantic high-resolution annotations. We can observe that $D=16$ is the best setting for both test sets of CROHME 2014 and 2016. The unpleasant results of $D=24$ indicate that too many convolution operations in the multi-scale branch can also lead to performance degradation. \begin{table}[h] \caption{\label{tab:2}{Comparison of recognition performance (in \%) on CROHME 2014 and CROHME 2016 when increasing the depth of dense block in multi-scale branch.}} \centering \begin{tabular}{c c c c c} \toprule \multirow{2}{*}{Depth} & \multicolumn{2}{c}{CROHME 2014} & \multicolumn{2}{c}{CROHME 2016} \\ \cmidrule(lr){2-3} \cmidrule(lr){4-5} & WER & ExpRate & WER & ExpRate\\ \midrule \textbf{0} & 13.5 & 50.8 & 14.3 & 48.5 \\ \textbf{8} & 13.3 & 51.3 & 13.9 & 49.3 \\ \textbf{16} & 12.9 & 52.8 & 13.7 & 50.1 \\ \textbf{24} & 13.1 & 51.4 & 14.1 & 48.9 \\ \bottomrule \end{tabular} \end{table} We also illustrate the performance of multi-scale attention model in Fig.~\ref{fig:attention_visualization}. The left part of Fig.~\ref{fig:attention_visualization} denotes the visualization of single-scale attention on low-resolution annotations and the right part denotes the visualization of multi-scale attention only on high-resolution annotations. Fig.~\ref{fig:attention_visualization} (a) is an example that the decimal point ``.'' is under-parsed by only relying on low-resolution attention model. However, the high-resolution attention in the multi-scale attention model successfully detects the decimal point. Fig.~\ref{fig:attention_visualization} (b) is an example that the math symbols ``- 1'' are mis-parsed as ``7'' due to the low-resolution attention model while the high-resolution attention model can correctly recognize them. \begin{figure} \centering \includegraphics[width=2.5in]{attention_visualization} \caption{Two examples of attention visualization on low-resolution annotations and on high-resolution annotations. The attention probabilities are shown through red color and the predicted symbols are shown on the right of images.} \label{fig:attention_visualization} \end{figure} \subsection{Comparison with state-of-the-arts (Q3)} \label{sec:Comparison with state-of-the-arts (Q3)} \begin{table}[h] \caption{\label{tab:3}{Comparison of ExpRate (in \%) on CROHME 2014, we erase system \uppercase\expandafter{\romannumeral3} namely MyScript because it used extra training data.}} \centering \begin{tabular}{c c c c c} \toprule \textbf{System} & \textbf{Correct(\%)} & \textbf{$\leq$ 1(\%)} & \textbf{$\leq$ 2(\%)} & \textbf{$\leq$ 3(\%)}\\ \midrule \uppercase\expandafter{\romannumeral1} & 37.2 & 44.2 & 47.3 & 50.2 \\ \uppercase\expandafter{\romannumeral2} & 15.0 & 22.3 & 26.6 & 27.7 \\ \uppercase\expandafter{\romannumeral4} & 19.0 & 28.2 & 32.4 & 33.4 \\ \uppercase\expandafter{\romannumeral5} & 19.0 & 26.4 & 30.8 & 33.0 \\ \uppercase\expandafter{\romannumeral6} & 25.7 & 33.2 & 35.9 & 37.3 \\ \uppercase\expandafter{\romannumeral7} & 26.1 & 33.9 & 38.5 & 40.0 \\ \midrule \textbf{WAP} & \textbf{44.4} & \textbf{58.4} & \textbf{62.2} & \textbf{63.1} \\ \textbf{CRNN} & \textbf{35.2} & \textbf{-} & \textbf{-} & \textbf{-} \\ \textbf{Ours} & \textbf{52.8} & \textbf{68.1} & \textbf{72.0} & \textbf{72.7} \\ \bottomrule \end{tabular} \end{table} The comparison among the proposed approach and others on CROHME 2014 test set is listed in Table~\ref{tab:3}. Systems \uppercase\expandafter{\romannumeral1} to \uppercase\expandafter{\romannumeral7} were submitted systems to CROHME 2014 competition and they were mostly based on the traditional two-dimensional context free grammar method. Details of these systems can refer to \cite{mouchere2014icfhr}. To make a fair comparison we erase the system \uppercase\expandafter{\romannumeral3} namely ``MyScript'' which achieved a high ExpRate of 62.7\% but used a large private dataset and the technical details were unrevealed. System ``WAP'', ``CRNN'' and our proposed system are all based on encoder-decoder model with attention that takes handwritten mathematical expressions input as images. As for the system ``CRNN'', it is declared in \cite{anh2017training} that the encoder employs a CNN+RNN architecture and the decoder is a unidirectional RNN with classic attention model. Meanwhile a novel data augmentation method for handwritten mathematical expression recognition was proposed in~\cite{anh2017training}. We can see that our proposed system achieves the best result with ExpRate of 52.8\% on CROHME 2014. Additionally, a gap existed between the correct and error percentages ($\leq$ 1\%), showing that the corresponding systems have a large room for further improvements. In contrast, the small differences between error ($\leq$ 2\%) and error ($\leq$ 3\%) illustrate that it is difficult to improve the ExpRate by incorporating a single correction. \begin{table}[h] \caption{\label{tab:CROHME2016}{Comparison of ExpRate (in \%) on CROHME 2016, we erase team MyScript because it used extra training data.}} \centering \begin{tabular}{c c c c c} \toprule \textbf{} & \textbf{Correct(\%)} & \textbf{$\leq$ 1 (\%)} & \textbf{$\leq$ 2 (\%)} & \textbf{$\leq$ 3 (\%)}\\ \midrule Wiris & 49.6 & 60.4 & 64.7 & -- \\ Tokyo & 43.9 & 50.9 & 53.7 & -- \\ S$\widetilde{\textbf{a}}$o Paolo & 33.4 & 43.5 & 49.2 & -- \\ Nantes & 13.3 & 21.0 & 28.3 & -- \\ \midrule \textbf{WAP} & \textbf{42.0} & \textbf{55.1} & \textbf{59.3} & \textbf{60.2} \\ \textbf{Ours} & \textbf{50.1} & \textbf{63.8} & \textbf{67.4} & \textbf{68.5} \\ \bottomrule \end{tabular} \end{table} To complement a more recent algorithm comparison and test the generalization capability of our proposed approach, we also validate our best system on CROHME 2016 test set as shown in Table \ref{tab:CROHME2016}, with an ExpRate of 50.1\% which is quite a promising result compared with other participating systems. The system ``Wiris'' was awarded as the first place on CROHME 2016 competition using only the CROHME training data with an ExpRate of 49.6\%, and it used a Wikipedia formula corpus, consisting of more than 592,000 LaTeX notations of mathematical expressions, to train a strong language model. The details of other systems can be found in \cite{mouchere2016icfhr2016}. \section{Conclusion} \label{sec:Conclusion} In this study we improve the performance of attention based encoder-decoder for handwritten mathematical expression by introducing the dense encoder and multi-scale attention model. It is the first work that employs densely connected convolutional networks for handwritten mathematical expression recognition and we propose the novel multi-scale attention model to alleviate the problem causing by pooling operation. We demonstrate through attention visualization and experiment results that the novel multi-scale attention model with dense encoder performs better than the state-of-the-art methods. \bibliographystyle{IEEEtran}
{ "redpajama_set_name": "RedPajamaArXiv" }
1,880
A couple of weeks ago I went to the Christmas Tree shops looking for bargains. I showed you a bunch that I got. What I didn't show you was this cute cotton red, white blue crab valance. Now, this design does not go with my kitchen what so ever. However, I really liked it as a fabric. When you are out bargain shopping and come across a design or pattern on a valance, curtain, sheet or any other fabric piece, don't just look at it for what it is, think of it as a partial finished piece of fabric. Fabric is so expensive, so finding alternatives that are bargains is a must for me. How cute is this fabric! And check out the price. Now seriously, you can't pass up .28 cents for this pretty fabric! Can you see what I see? Yup, an embarrassingly simple easy metamorphosis! A perfect table runner that is pretty much already made! I just need to remove this piece of solid blue fabric. Fortunately, there is already a finished seam so, I just need to cut it off carefully and I won't have to sew it. Now I am thinking I may take it one step further and cut this piece into 4 pieces and turn them into cloth napkins. I would only have to sew 2 napkins on two sides (or heat and bond it) and sew one side on the remaining two pieces. So, for .28 cents and very little time or effort involved, you can have a new table runner or 4 new napkins. So, keep this in mind when you are out there thrifting! Thanks for stopping by and I hope you enjoyed your visit! Wow! 29 cents for a table runner..............not bad girlfriend! Wow, that is cute material...can't pass up a deal like that! There is a fabric glue called " Fabri Tac" that I found at WalMart. It dries instantly and can be washed! I used it for all the detail work on my daughter's Marie Antoinette costume...I LOVE IT! I will let you know how it works for drapes but I am certain there will be no problems. First of all....28 CENTS!!!!! Second of all....CUTE FABRIC! so big time SCORE on this one! You are one of the ladies in blog land who have helped me look at things with an open mind and see the possibilities! Oh, now that is smart shopping, Diann! I love it! Cute fabric, too. Happy Blue Monday, Diann, you crafty YOU! gorgeous! and if you do make napkins you could use that blue fabric you cut off to make napkin rings! wouldn't they be cute with a starfish or sand dollar glued onto them! You are creative, wonderful ideas. Happy Blue Monday! Wow too cute i like sales hehehe. Happy blue Monday. It is finds like this that keep us hunting....LOVE it! I can make a table cover with this or table runner. Love the design and the color. Happy MOnday! You always amaze me with the thrifty things you find and then make. That was a great idea. Looks very nice. I have also made napkins from them, and sewed some together to make a tablecloth. Can't beat the price! Did you see the new flyer? I have to get there!
{ "redpajama_set_name": "RedPajamaC4" }
3,720
Q: Why tar flag --strip-components ignored sometimes? I have an automated docker images build where I download elasticsearch archive and extract it via: tar zxf archive.tar.gz --strip-components=1 -C /path/to/dir And it always worked until the latest releases (6.8.5 and 7.4.2). It no longer works for 6.8.5, meaning the flag --strip-components no longer has any effect. However, it works fine for 7.4.2. After comparing these two archives the only difference I've found is that 6.8.5 has a different ownership of files in the archive – 631:503 vs root:root in 7.4.2. However, if that was the issue flags --no-same-owner or --user should've resolved the issue by they didn't. I even created a user/group with those IDs and extracted the archive under this user but it also had no effect. This is how you can reproduce (replace 6.8.5 to 7.4.2 to try both): $ docker run --rm -ti alpine:3.10.3 sh ### from the container $ wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.8.5.tar.gz $ apk add --update tar $ mkdir elastic $ tar zxvf elasticsearch-6.8.5.tar.gz --strip-components=1 -C elastic $ ls -la elastic With 6.8.5 you'll the intermediary directory that wasn't stripped, with 7.4.2 you won't see it despite it exists in both archives. As you may notice I don't use tar from musl, I used the GNU version from alpine packages (version 1.32) that have been there for a few months already. I use this package with the same flags in many others builds and it works just fine for me. A: As was explained to me by Elastic staffer on github this happens due to a leading ./ on the paths within the archive: / # tar tvf elasticsearch-6.8.5.tar.gz --numeric-owner | head -n 2 drwxr-xr-x 631/503 0 2019-11-14 14:20 ./ drwxr-xr-x 631/503 0 2019-11-13 20:07 ./elasticsearch-6.8.5/ So, in this case --strip-components should be 2, not 1. To handle this kind of situations universally you can list the archive before extracting and if it has ./ you can dynamically change the --strip-components count: $ if tar tf elasticsearch-6.8.5.tar.gz | head -n 1 | grep -q '^./$'; then STRIP_COMPONENTS_COUNT=2; else STRIP_COMPONENTS_COUNT=1; fi $ tar zxvf elasticsearch-6.8.5.tar.gz --strip-components=$STRIP_COMPONENTS_COUNT -C elastic But, honestly, good archives should be created without any ./ which is super confusing because unless you list files in the archive you won't notice any difference.
{ "redpajama_set_name": "RedPajamaStackExchange" }
5,140
If this Pokémon has full HP, this attack does 40 more damage, and the Defending Pokémon is now Confused and Poisoned. Your opponent can't play any Supporter cards from his or her hand during his or her next turn. Heal 20 damage from each of your Pokémon that has any Energy attached to it. Once during your turn (before your attack), you may search your deck for a [R] Energy card and attach it to 1 of your Pokémon. If you do, put 1 damage counter on that Pokémon. Shuffle your deck afterward. If Zekrom is on your Bench, this attack does 40 more damage. Its spines provide protection. Its fins and bones are prized as traditional-medicine ingredients. Its cell composition is similar to water molecules. As a result, it can't be seen when it melts away into water. If this Pokémon has any [F] Energy attached to it, this attack does 30 more damage. The Retreat Cost of each of your Team Plasma Pokémon in play is [C][C] less. If the Defending Pokémon is a Pokémon-EX, that Pokémon can't attack during your opponent's next turn. If this Pokémon has any Special Energy attached to it, this attack does 30 more damage. Attach an Energy card from your discard pile to 1 of your Benched Team Plasma Pokémon. If this Pokémon has any Plasma Energy attached to it, discard an Energy attached to the Defending Pokémon. If Reshiram is on your Bench, this attack does 40 more damage. If this Pokémon has no damage counters on it, this attack does nothing. Flip a coin. If heads, switch 1 of your opponent's Benched Pokémon with the Defending Pokémon. The new Defending Pokémon is now Poisoned. If the Defending Pokémon is Poisoned, heal 60 damage from this Pokémon. Flip a coin until you get tails. For each heads, discard an Energy attached to the Defending Pokémon. It's so stinky! Muk's body contains toxic elements, and any plant will wilt when it passes by. This attack does 40 damage to 1 of your opponent's Pokémon. Also apply Weakness and Resistance for Benched Pokémon. Its fur is so sensitive, it can feel minute shifts in the air and predict the weather…and its foes' thoughts. Look at the top 4 cards of your deck and put them back on top of your deck in any order. Move 1 damage counter from any of your Pokémon to any of your opponent's Pokémon. Once during your turn (before your attack) you may Knock Out this Pokémon. If you do, put 3 damage counters on your opponent's Pokémon in any way you like. During your opponent's next turn, any damage done to this Pokémon by attacks is increased by 20 (after applying Weakness and Resistance). Each of your Team Plasma Pokémon in play get +20 HP. Discard as many Pokémon as you like from your hand. This attack does 30 damage times the number of Pokémon you discarded. If the Defending Pokémon is a [N] Pokémon, this attack does 40 more damage. Switch 1 of your opponent's Benched Pokémon with the Defending Pokémon. This attack does 40 damage to the new Defending Pokémon. Before doing damage, discard all Pokémon Tool cards attached to the Defending Pokémon. Flip a coin. If heads, choose a random card from your opponent's hand. Your opponent reveals that card and shuffles it into his or her deck. Does 20 damage times the number of [W] Energy cards and [L] Energy cards in your discard pile. Then, shuffle all of those cards back into your deck. Put a Team Plasma Pokémon, a Team Plasma Trainer card, and a Team Plasma Energy card from your discard pile into your hand. With its long fangs, this surprisingly violent Pokémon can gnaw even thick concrete with ease. It marks time precisely. Some countries consider it to be a wise friend, versed in the world's ways. Does 10 damage times the number of cards in your opponent's hand. It is said that kids who drink Miltank's milk grow up to become hearty, healthy adults. As long as this Pokémon is your Active Pokémon, this Pokémon is the same type as the your opponent's Active Pokémon. Choose 1 of the Defending Pokémon's attacks. If this Pokémon has the necessary Energy to use that attack, use it as this attack. This attack does 30 more damage for each Plasma Energy attached to this Pokémon. Whenever any player attaches an Energy from his or her hand to 1 of his or her Pokémon (excluding Team Plasma Pokémon) put 2 damage counters on that Pokémon. If the Pokémon this card is attached to is Knocked Out, your opponent takes 1 fewer Prize card.
{ "redpajama_set_name": "RedPajamaC4" }
416
Logistic and distribution services We are the leading platform of value-added solutions and technology products in Latin America and the Caribbean. In country presence with a network of 15 subsidiaries throughout the region plus 2 consolidation centers located in Miami and Panama, serving 41 countries. Intcomex currently serves over 50 thousand resellers, represented by value-added channels, retailers and telecom operators. Manufacturers with a portfolio of over 12,000 SKUs. Ft² of storage in 24 consolidation centers. We have gained the experience to understand the diversity of each Latin American economy and the flexibility to customize our multinational plan to the specific needs of local markets. A little bit of history In January, 1989, Anthony Shalom -Chairman of the Board of Directors- and his son, Michael Shalom -CEO, President and Director- founded Intcomex as a small local software distributor in South Florida. That same year, they began exporting IT products from the United States to several countries in Latin America. The first affiliate for sales and distribution opens in Mexico in the year 1990, adding two new subsidiaries in Panama and Chile four years later. By 1997, it expands its operations to Peru, Guatemala and Uruguay. In the year 2000, affiliates are established in El Salvador, Ecuador, Costa Rica and Jamaica, followed by Colombia, being the latest subsidiary to open its doors for business in 2004. In order to capitalize the export business of IT products in Latin America and the Caribbean, the company opens its headquarters in Miami, Florida, U.S.A., to serve those areas as well as other locations where it does not have local presence. Driven by a strong commitment to enhancing customer value, the company enters the mobile distribution marketplace in 2011, and since then it has further diversified its portfolio with the integration of cutting-edge cloud-based solutions and electronic software distribution platforms. Today, with over 25 years of leadership in the region, Intcomex has consolidated its presence by growing from a small Miami-based distribution company into a multinational platform of value-added solutions and technology products. Our organization continues to expand in the industry thanks to our partners, team of professionals and specialized divisions that focus on the various segments of the market. Our divisions are Security, Point of Sale, Accessories and Retail Services, OEM, Components, Computers, Portability, Networking, Software, Gaming, Peripherals, Printing, Consumables, Electronics, Mobile and Cloud technology solutions. Intcomex's vision is to expand its influence in the fast growing Latin America technology industry, while keeping its customers at the forefront of the organization's overall success. Excutive management team Intcomex's leadership team not only possesses vast and diverse industry knowledge and experience, but has also a well-rounded perspective to meet today's and future challenges. Anthony Shalom, Mike Shalom, Chief Executive Officer, President Director Danny Schachtel, Jose Biton, Ariel Engelsztajn, V.P. Product Strategy – Broadline Javier Pinzon, V.P. eCommerce and Infrastructure Joseph Bouhadana, V.P. Cloud Services Mike Krigsfeld, V.P. Business Development Naji S. Zakka, V.P. Business Process Transformation Nicolas Boffi, V.P. Retail Services Yali Luna, V.P. of Inventory & Purchasing Mauro Butelmann, Camilo Borda, Allan Escobar, Paul Bergmann, Helfido Juarez, Matthew Deleon, Jonathan Lerner, Jerry Pelosi, Eric Hachmann, © 2016 Intcomex Corp. | Intcomex is a registered Corp. All Rights Reserved.
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
3,963
\section{Supplemental material} \subsection{A. General remarks for the NNVB manifold of the square-kagome antiferromagnet} As shown in [\onlinecite{IoannisKagomeLikeS}] by a generalization of the so-called arrow representation~\cite{Elser1989S}, the dimension of the NNVB Hilbert space is $2\!\times\!2^{N/3}$, where $N$ is the total number of sites. Similar to the non-bipartite, triangular and kagome lattices, the NNVB manifold splits into four topological sectors for periodic boundary conditions in both directions. These sectors can be labeled by the parities ($p_x$,$p_y$) of the number of dimers intersecting any horizontal or vertical line~\cite{Moessner2001S,Fendley2002S,Ioselevich2002S,Ralko2005S}. Here, the sectors (0,1) and (1,0) are connected to each other by the four-fold symmetry of the lattice. \begin{table*}[b] \ra{1.3} \begin{ruledtabular} \begin{tabular}{@{}r|lllllll@{}} void plaquette & process & loop length $L_p$ & $\omega_p$ & ${\sf v}_p/\omega_p$ & $E_{0,p}$ & $t_p$ & $V_p$ \\ \hline AA-square &$\small{\Square}_{4}$& $4$ & $+2^{-1}$ & $-3$ & $-\frac{3}{2}$ & $-1$ & $+\frac{1}{2}$\\ &$\small{\Square}_{6a,b}$ & $6$ & $-2^{-2}$ & $-3x$ & $-\frac{3}{4}(2x+1)$ & $+\frac{1}{5}(2x-1)$ & $+\frac{1}{20}(2x-1)$\\ & $\small{\Square}_{8}$& $8$ & $+2^{-3}$ & $-3(2x-1)$ & $-3x$ & $+\frac{8}{21}(1-x)$ & $-\frac{1}{21}(1-x)$\\ \hline AB-octagon &$\small{\octagon}_{8}$& $8$ & $+2^{-3}$ & $-6x$ & $-3x$ & $-\frac{8}{21}x$ & $+\frac{1}{21}x$\\ & $\small{\octagon}_{10a-d}$& $10$ & $-2^{-4}$ & $-\frac{3}{2}(3x+1)$ & $-\frac{3}{4}(4x+1)$ & $+\frac{4}{85}(2x+1)$ & $+\frac{1}{340}(2x+1)$\\ &$\small{\octagon}_{12a-f}$ & $12$ & $+2^{-5}$ & $-3(x+1)$ & $-\frac{3}{2}(2x+1)$ & $-\frac{16}{341}$ & $+\frac{1}{682}$\\ & $\small{\octagon}_{14a-d}$& $14$ & $-2^{-6}$ & $-\frac{3}{2}(x+3)$ & $-\frac{3}{4}(4x+3)$ & $+\frac{16}{1365}(3-2x)$ & $+\frac{1}{5460}(3-2x)$\\ & $\small{\octagon}_{16}$& $16$ & $+2^{-7}$ & $-6$ & $-3(1+x)$ & $-\frac{128}{5461}(1-x)$ & $+\frac{1}{5461}(1-x)$ \end{tabular} \end{ruledtabular} \caption{QDM parameters $t_p$ and $V_p$ (in units of $J_{\sf AA}$) for the four processes around AA-squares and the 18 processes around AB-octagons, using the minimal $2\!\times\!2$ NNVB truncation approach. $L_p$ is the length of the loop in the transition graph of $|1_p\rangle$ and $|2_p\rangle$. For the dimer orientations we follow the convention that singlets are oriented clockwise in each even-length loop. All values correspond to the clusters of the first row of Fig.~\ref{fig:supp1}. If we use clusters of any other row, $E_{0,p}$ and ${\sf v}_p$ will change, but the overlap $\omega_p$ and the combination $h_p=v_p/\omega_p-E_{0,p}$ do not change, leading to the same $t_p$ and $V_p$.} \label{tab:qdm} \end{table*} \subsection{B. Basic elements of the minimal $2\!\times\!2$ NNVB truncation} The basic elements of the minimal $2\!\times\!2$ NNVB truncation method in the square-kagome AFM are given in [\onlinecite{IoannisKagomeLikeS}]. For convenience they are repeated here in Table~\ref{tab:qdm}. Let us consider any of the finite spin-1/2 Heisenberg clusters of the first row of Fig.~(2) of the main text (or Fig.~\ref{fig:supp1} for the octagonal processes) and consider the elementary tunneling process $p$ between the two dimer coverings $|1_p\rangle$ and $|2_p\rangle$ that can be accommodated by this cluster. For the dimer orientations~\cite{SMfootnote}, we take the singlets to be oriented clockwise in each void square and octagon. The transition graph~\cite{Sutherland88S} of our elementary process involves a single non-trivial loop of length $L_p$, surrounding a single square or a single octagon. Then, within our convention, the overlap between the two NNVB states is given by \begin{equation}\label{eq:omega1} \omega_p = \langle 1_p|2_p\rangle = (-1)^{\frac{L_p}{2}}~2^{1-\frac{L_p}{2}} ~. \end{equation} The diagonal and off-diagonal matrix elements of the Hamiltonian, \begin{equation} E_{0,p}=\langle 1_p | \mathcal{H}_{\text{Heis}} | 1_p\rangle=\langle 2_p | \mathcal{H}_{\text{Heis}} | 2_p\rangle, ~~~v_p=\langle 1_p | \mathcal{H}_{\text{Heis}} | 2_p\rangle, \end{equation} can be found using standard rules~\cite{Sutherland88S, RokhsarKivelsonS, ZengElser95S, MambriniMila2000S, Misguich02S, Misguich03S, Ralko2009S}. In the basis $\{|1_p\rangle, |2_p\rangle\}$ we have \begin{equation} \mathcal{H}_{\text{NNVB}}=(\mathcal{O}^{-\frac{1}{2}}\mathcal{H}_{\text{Heis}}\mathcal{O}^{-\frac{1}{2}})_{\text{NNVB}}= \left(\begin{array}{cc} 1 & \omega_p\\ \omega_p & 1 \end{array}\right)^{-\frac{1}{2}} \left(\begin{array}{cc} E_{0,p} & v_p\\ v_p & E_{0,p} \end{array}\right) \left(\begin{array}{cc} 1 & \omega_p\\ \omega_p & 1 \end{array}\right)^{-\frac{1}{2}} =\left(\begin{array}{cc} E_{0,p}+V_p & t_p\\ t_p & E_{0,p}+V_p \end{array}\right) \end{equation} with \begin{equation}\label{eq:tV} t_p = + h_p \frac{\omega_p}{1-\omega_p^2},~~V_p=-t_p ~\omega_p,~~\text{where}~h_p=v_p/\omega_p-E_{0,p}~. \end{equation} The quantity $h_p$ corresponds to Eq.~(12) of Schwandt {\it et al}~\cite{Schwandt2010S}, modulo a factor of 3/4 related to the redefinition of the Hamiltonian; see passage below Eq.~(10) in [\onlinecite{Schwandt2010S}]. So Eq.~(\ref{eq:tV}) is consistent with Eq.~(40) of [\onlinecite{Schwandt2010S}]. In other words, the above $2\!\times\!2$ truncation is equivalent with the infinite-order cluster expansion of [\onlinecite{Schwandt2010S}]. \subsection{C. Potential terms} Here we discuss the potential terms of the effective QDM. As explained in [\onlinecite{IoannisZ2S}], the largest contributions to the potential energy arise from processes involving a single defect triangle, and as such they can be absorbed in a global energy shift, since the total number of defect triangles is a constant in the NNVB basis. The remaining contributions can be divided into binding energies among two, three, etc. defect triangles. In the present case, the most important processes (i.e. the ones around the AA-squares) involve up to two nearby defect triangles, for which the binding energy is about 0.01~\cite{IoannisZ2S}, i.e. 10 times smaller than the dominant tunneling amplitudes. This is why we can safely disregard the potential terms in the effective QDM description of the square-kagome. \subsection{D. Finite clusters for the octagonal processes} Figure~\ref{fig:supp1} shows the finite-size spin-$\frac{1}{2}$ Heisenberg clusters that are used to extract the tunneling parameters of the 18 different octagonal processes (for the processes around AA-squares see main text). \begin{figure*}[!h] \includegraphics[width=0.52\textwidth,clip]{FigSupp01a} \\ \vspace{8px} \includegraphics[width=0.52\textwidth,clip]{FigSupp01b} \\ \vspace{8px} \includegraphics[width=0.50\textwidth,clip]{FigSupp01c} \\ \vspace{8px} \includegraphics[width=0.50\textwidth,clip]{FigSupp01d} \caption{Finite clusters used to extract the octagonal tunneling parameters of the effective Hamiltonian. The gray triangles are the defect triangles involved in each tunneling process. }\label{fig:supp1} \end{figure*}
{ "redpajama_set_name": "RedPajamaArXiv" }
8,216
{"url":"https:\/\/gmatclub.com\/forum\/math-revolution-approach-ds-219958-160.html","text":"GMAT Question of the Day - Daily to your Mailbox; hard ones only\n\n It is currently 16 Aug 2018, 16:45\n\n### GMAT Club Daily Prep\n\n#### Thank you for using the timer - this advanced tool can estimate your performance and suggest more practice questions. We have subscribed you to Daily Prep Questions via email.\n\nCustomized\nfor You\n\nwe will pick new questions that match your level based on your Timer History\n\nTrack\n\nevery week, we\u2019ll send you an estimated GMAT score based on your performance\n\nPractice\nPays\n\nwe will pick new questions that match your level based on your Timer History\n\n# Math Revolution Approach (DS)\n\nAuthor Message\nMath Revolution GMAT Instructor\nJoined: 16 Aug 2015\nPosts: 6020\nGMAT 1: 760 Q51 V42\nGPA: 3.82\nRe: Math Revolution Approach (DS)\u00a0 [#permalink]\n\n### Show Tags\n\n26 Feb 2017, 21:33\nIs a=b?\n\n1) $$a^2=b^2$$\n2) $$a=2$$\n\n==> In the original condition, there are 2 variables (a,b) and in order to match the number of variables to the number of equations, there must be 2 equations. Since there is 1 for con 1) and 1 for con 2), C is most likely to be the answer. By solving con 1) and con 2), from a=2, you get $$b^2=2^2=4$$, then b=\u00b12. Thus, (a,b)=(2,2) yes but (a,b)=(2,-2) no, hence it is not sufficient.\n\n_________________\n\nMathRevolution: Finish GMAT Quant Section with 10 minutes to spare\nThe one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy.\n\"Only $99 for 3 month Online Course\" \"Free Resources-30 day online access & Diagnostic Test\" \"Unlimited Access to over 120 free video lessons - try it yourself\" Math Revolution GMAT Instructor Joined: 16 Aug 2015 Posts: 6020 GMAT 1: 760 Q51 V42 GPA: 3.82 Re: Math Revolution Approach (DS) [#permalink] ### Show Tags 02 Mar 2017, 01:38 If mn\u22600, what is the value of $$\\frac{m+1}{m}-\\frac{n+1}{n}$$? 1) m=n 2) m=1 ==> If you modify the original condition and the question, you get $$\\frac{m+1}{m}-\\frac{n+1}{n}=\\frac{n(m+1)-m(n+1)}{mn}= \\frac{nm+n-mn-m}{mn}=\\frac{n-m}{mn}=?$$. Then, for con 1), m=n, so n-mmn=0, hence it is unique and sufficient. The answer is A. Answer: A _________________ MathRevolution: Finish GMAT Quant Section with 10 minutes to spare The one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy. \"Only$99 for 3 month Online Course\"\n\"Free Resources-30 day online access & Diagnostic Test\"\n\"Unlimited Access to over 120 free video lessons - try it yourself\"\n\nMath Revolution GMAT Instructor\nJoined: 16 Aug 2015\nPosts: 6020\nGMAT 1: 760 Q51 V42\nGPA: 3.82\nRe: Math Revolution Approach (DS)\u00a0 [#permalink]\n\n### Show Tags\n\n03 Mar 2017, 01:12\nIf a, b, and c are integers, is abc an even?\n\n1) a+b is an even\n2) b+c is an even\n\n==> In the original condition, there are 3 variables (a,b,c) and in order to match the number of variables to the number of equations, there must be 3 equations. Since there is 1 for con 1) and 1 for con 2), E is most likely to be the answer. By solving con 1) and con 2), if (a,b,c)=(1,1,1), you get a+b+c=1+1+1=3=odd, so no, but if (a,b,c)=(2,2,2), you get a+b+c=2+2+2=6=even, so yes, hence it is not sufficient.\n\n_________________\n\nMathRevolution: Finish GMAT Quant Section with 10 minutes to spare\nThe one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy.\n\"Only $99 for 3 month Online Course\" \"Free Resources-30 day online access & Diagnostic Test\" \"Unlimited Access to over 120 free video lessons - try it yourself\" Math Revolution GMAT Instructor Joined: 16 Aug 2015 Posts: 6020 GMAT 1: 760 Q51 V42 GPA: 3.82 Re: Math Revolution Approach (DS) [#permalink] ### Show Tags 08 Mar 2017, 01:13 What is the perimeter of a certain right triangle? 1) The hypotenuse\u2019s length is 10 2) The triangle\u2019s area is 24 ==> In the original condition, for a right triangle, there are 2 variables (2 legs) and in order to match the number of variables to the number of equations, there must be 2 equations. Since there is 1 for con 1) and 1 for con 2), C is most likely to be the answer. By solving con 1) and con 2), you get 6:8:10 and the perimeter of the right triangle becomes 6+8+10=24, hence unique and sufficient. Therefore, the answer is C. Answer: C _________________ MathRevolution: Finish GMAT Quant Section with 10 minutes to spare The one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy. \"Only$99 for 3 month Online Course\"\n\"Free Resources-30 day online access & Diagnostic Test\"\n\"Unlimited Access to over 120 free video lessons - try it yourself\"\n\nMath Revolution GMAT Instructor\nJoined: 16 Aug 2015\nPosts: 6020\nGMAT 1: 760 Q51 V42\nGPA: 3.82\nRe: Math Revolution Approach (DS)\u00a0 [#permalink]\n\n### Show Tags\n\n09 Mar 2017, 02:39\nIs a positive integer n greater than 1 divisible by 2?\n\n1) 24\/n is an integer\n2) 32\/n is an integer\n\n==> In the original condition, there is 1 variable (n) and in order to match the number of variables to the number of equations, there must be 1 equation. Since there is 1 for con 1) and 1 for con 2), D is most likely to be the answer. For con 1), n=2 yes, but n=3 no, hence it is not sufficient.\nFor con 2), from $$32=2^5$$, n always has 2 as its factor, hence yes, it is sufficient.\n\n_________________\n\nMathRevolution: Finish GMAT Quant Section with 10 minutes to spare\nThe one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy.\n\"Only $99 for 3 month Online Course\" \"Free Resources-30 day online access & Diagnostic Test\" \"Unlimited Access to over 120 free video lessons - try it yourself\" Math Revolution GMAT Instructor Joined: 16 Aug 2015 Posts: 6020 GMAT 1: 760 Q51 V42 GPA: 3.82 Re: Math Revolution Approach (DS) [#permalink] ### Show Tags 10 Mar 2017, 03:12 If the average (arithmetic mean) of set A is 10,000 and the average (arithmetic mean) of set B is 10,000, what is the range of set A and set B combined? 1) The range of set A is 6,000 2) The range of set B is 3,000 ==> If you modify the original condition and the question, from range= Max-min, you get set A: Ra =Ma-ma and set B: Rb=Mb-mb. Then, there are 6 variables and 2 equations, and in order to match the number of variables to the number of equations, there must be 4 more equations. Since there is 1 for con 1) and 1 for con 2), E is most likely to be the answer. By solving con 1) and con 2), the answer becomes E as well. Also, there is no relationship between the average and the range, and thus the answer is definitely E. Answer: E _________________ MathRevolution: Finish GMAT Quant Section with 10 minutes to spare The one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy. \"Only$99 for 3 month Online Course\"\n\"Free Resources-30 day online access & Diagnostic Test\"\n\"Unlimited Access to over 120 free video lessons - try it yourself\"\n\nMath Revolution GMAT Instructor\nJoined: 16 Aug 2015\nPosts: 6020\nGMAT 1: 760 Q51 V42\nGPA: 3.82\nRe: Math Revolution Approach (DS)\u00a0 [#permalink]\n\n### Show Tags\n\n12 Mar 2017, 18:44\nIs a positive integer x a factor of 12?\n\n1) 3x is a factor of 48\n2) 2x is a factor of 12\n\n==> In the original condition, there is 1 variable (x) and in order to match the number of variables to the number of equations, there must be 1 more equation. Since there is 1 for con 1) and 1 for con 2), D is most likely to be the answer.\nFor con 1), \u201c3x is a factor of 48\u201d becomes \u201cx is a factor of 16\u201d, and x=2 yes but x=8 no, hence it is not sufficient.\nFor con 2), \u201c2x is a factor of 12\u201d becomes \u201cx is a factor of 6\u201d, hence it is always yes and sufficient.\n\n_________________\n\nMathRevolution: Finish GMAT Quant Section with 10 minutes to spare\nThe one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy.\n\"Only $99 for 3 month Online Course\" \"Free Resources-30 day online access & Diagnostic Test\" \"Unlimited Access to over 120 free video lessons - try it yourself\" Math Revolution GMAT Instructor Joined: 16 Aug 2015 Posts: 6020 GMAT 1: 760 Q51 V42 GPA: 3.82 Re: Math Revolution Approach (DS) [#permalink] ### Show Tags 12 Mar 2017, 18:48 There are 30 consecutive integers. What is the sum of the integers? 1) The sum of the smallest integer and the greatest integer is -1 2) The greatest integer is 14. ==> In the original condition, there is 1 variable (n, n+1, n+2,\u2026\u2026n+29), and in order to match the number of variables to the number of equations, there must be 1 equation. Since there is 1 for con 1) and 1 for con 2), D is most likely to be the answer. For con 1), you get -15~14, so the sum=-15, hence unique and sufficient. For con 2), you also get -15~14, so the sum=-15, hence unique and sufficient. Thus, con 1) = con 2). The answer is D. Answer: D _________________ MathRevolution: Finish GMAT Quant Section with 10 minutes to spare The one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy. \"Only$99 for 3 month Online Course\"\n\"Free Resources-30 day online access & Diagnostic Test\"\n\"Unlimited Access to over 120 free video lessons - try it yourself\"\n\nMath Revolution GMAT Instructor\nJoined: 16 Aug 2015\nPosts: 6020\nGMAT 1: 760 Q51 V42\nGPA: 3.82\nRe: Math Revolution Approach (DS)\u00a0 [#permalink]\n\n### Show Tags\n\n15 Mar 2017, 02:09\nIs a positive integer n a multiple of 12\n\n1) n is a multiple of 6\n2) n is a multiple of 24\n\n==> In the original condition, there is 1 variable (n) and in order to match the number of variables to the number of equations, there must be 1 equation. Since there is 1 for con 1) and 1 for con 2), D is most likely to be the answer. For remainder questions, you always use direct substitution.\nFor con 1), n=6 no, n=12 yes, hence not sufficient.\nFor con 2), n=24,48,\u2026, hence it is always yes and sufficient.\n\n_________________\n\nMathRevolution: Finish GMAT Quant Section with 10 minutes to spare\nThe one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy.\n\"Only $99 for 3 month Online Course\" \"Free Resources-30 day online access & Diagnostic Test\" \"Unlimited Access to over 120 free video lessons - try it yourself\" Math Revolution GMAT Instructor Joined: 16 Aug 2015 Posts: 6020 GMAT 1: 760 Q51 V42 GPA: 3.82 Re: Math Revolution Approach (DS) [#permalink] ### Show Tags 16 Mar 2017, 01:24 Is x>y>z? 1) x>y 2) y>z ==> In the original condition, there are 3 variables (x,y,z) and in order to match the number of variables to the number of equations, there must be 3 equations. Since there is 1 for con 1) and 1 for con 2), E is most likely to be the answer. By solving con 1) and con 2), from x>y>z, it is always yes and sufficient. Therefore, the answer is C. Answer: C _________________ MathRevolution: Finish GMAT Quant Section with 10 minutes to spare The one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy. \"Only$99 for 3 month Online Course\"\n\"Free Resources-30 day online access & Diagnostic Test\"\n\"Unlimited Access to over 120 free video lessons - try it yourself\"\n\nMath Revolution GMAT Instructor\nJoined: 16 Aug 2015\nPosts: 6020\nGMAT 1: 760 Q51 V42\nGPA: 3.82\nRe: Math Revolution Approach (DS)\u00a0 [#permalink]\n\n### Show Tags\n\n19 Mar 2017, 17:58\nn=?\n\n1) twice n equals to n+1\n2) n times n equals to n\n\n==> In the original condition, there is 1 variable (n) and in order to match the number of variables to the number of equations, there must be 1 equation. Since there is 1 for con 1) and 1 for con 2), D is most likely to be the answer.\nFor con 1), from 2n=n+1, you get n=1, hence sufficient.\nFor con 2), from $$n^2=n$$ and $$n^2-n=0$$, n(n-1)=0, you get n=0,1, hence it is not unique and not sufficient.\n\n_________________\n\nMathRevolution: Finish GMAT Quant Section with 10 minutes to spare\nThe one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy.\n\"Only $99 for 3 month Online Course\" \"Free Resources-30 day online access & Diagnostic Test\" \"Unlimited Access to over 120 free video lessons - try it yourself\" Math Revolution GMAT Instructor Joined: 16 Aug 2015 Posts: 6020 GMAT 1: 760 Q51 V42 GPA: 3.82 Re: Math Revolution Approach (DS) [#permalink] ### Show Tags 19 Mar 2017, 17:59 Is a positive integer x a factor of 24? 1) 3x is a factor of 48 2) 2x is a factor of 24 ==> In the original condition, there is 1 variable (x) and in order to match the number of variables to the number of equations, there must be 1 equation. Since there is 1 for con 1) and 1 for con 2), D is most likely to be the answer. For con 1), x is a factor of 16, so x=8 yes, x=16 no, hence not sufficient. For con 2), x is a factor of 12, so it is always yes, hence sufficient. Therefore, the answer is B. Answer: B _________________ MathRevolution: Finish GMAT Quant Section with 10 minutes to spare The one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy. \"Only$99 for 3 month Online Course\"\n\"Free Resources-30 day online access & Diagnostic Test\"\n\"Unlimited Access to over 120 free video lessons - try it yourself\"\n\nMath Revolution GMAT Instructor\nJoined: 16 Aug 2015\nPosts: 6020\nGMAT 1: 760 Q51 V42\nGPA: 3.82\nRe: Math Revolution Approach (DS)\u00a0 [#permalink]\n\n### Show Tags\n\n20 Mar 2017, 03:40\nIf x and y are integers greater than 1 and x>y, what are the values of x and y?\n1) x+y=13\n2) xy=22\n\n==> In the original condition, there are 2 variables (x,y) and in order to match the number of variables to the number of equations, there must be 2 equations. Since there is 1 for con 1) and 1 for con 2), C is most likely to be the answer. By solving con 1) and con 2), you get x=11 and y=2, hence it is unique and sufficient. The answer is c. However, this is an integer question, one of the key questions, so you apply CMT 4(A).\nFor con 1), from (x,y)=(11,2),(10,3), it is not unique and not sufficient.\nFor con 2), you only get (x,y)=(2,11), hence it is unique.\n\nTherefore, the answer is B, not C.\n_________________\n\nMathRevolution: Finish GMAT Quant Section with 10 minutes to spare\nThe one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy.\n\"Only $99 for 3 month Online Course\" \"Free Resources-30 day online access & Diagnostic Test\" \"Unlimited Access to over 120 free video lessons - try it yourself\" Math Revolution GMAT Instructor Joined: 16 Aug 2015 Posts: 6020 GMAT 1: 760 Q51 V42 GPA: 3.82 Re: Math Revolution Approach (DS) [#permalink] ### Show Tags 22 Mar 2017, 02:48 In the x-y plane there is a line K, (x\/a)+(y\/b)=1. What is the x-intercept of line K? 1) a=b 2) a=1 ==> If you modify the original condition and the question, the x-intercept is the value of x when y=0, hence from (x\/a)=1, you get x=a, so you only need to find a. Therefore, the answer is B. Answer: B _________________ MathRevolution: Finish GMAT Quant Section with 10 minutes to spare The one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy. \"Only$99 for 3 month Online Course\"\n\"Free Resources-30 day online access & Diagnostic Test\"\n\"Unlimited Access to over 120 free video lessons - try it yourself\"\n\nMath Revolution GMAT Instructor\nJoined: 16 Aug 2015\nPosts: 6020\nGMAT 1: 760 Q51 V42\nGPA: 3.82\nRe: Math Revolution Approach (DS)\u00a0 [#permalink]\n\n### Show Tags\n\n23 Mar 2017, 01:21\nIs wp+st>0?\n1) ws+pt>0\n2) wt+ps>0\n\n==> In the original condition, there are 4 variables (w,p,s,t) and in order to match the number of variables to the number of equations, there must be 4 equations. Since there is 1 for con 1) and 1 for con 2), E is most likely to be the answer. By solving con 1) and con 2), you get (w,s,p,t)=(1,1,1,1) yes, but (w,s,p,t)=(3,-1,-4,-1) no, hence it is not sufficient.\n\n_________________\n\nMathRevolution: Finish GMAT Quant Section with 10 minutes to spare\nThe one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy.\n\"Only $99 for 3 month Online Course\" \"Free Resources-30 day online access & Diagnostic Test\" \"Unlimited Access to over 120 free video lessons - try it yourself\" Math Revolution GMAT Instructor Joined: 16 Aug 2015 Posts: 6020 GMAT 1: 760 Q51 V42 GPA: 3.82 Re: Math Revolution Approach (DS) [#permalink] ### Show Tags 24 Mar 2017, 03:53 How much is the total price of the 3 products? 1) The sum of any two prices of these 3 products is$8,000\n2) At least one of them is 4,000\n\n==> In the original condition, there are 3 variables, and in order to match the number of variables to the number of equations, there must be 3 equations. Since there is 1 for con 1) and 1 for con 2), E is most likely to be the answer. By solving con 1) and con 2), the price of each 3 products becomes $4,000, hence it is unique and sufficient. This is an inequality question, one of the key questions, so you apply CMT 4 (A: if you get C too easily, consider A or B). For con 1), the price of each 3 products always becomes$4,000, hence it is unique and sufficient. For con 2), it is unknown, hence it is not sufficient.\n\n_________________\n\nMathRevolution: Finish GMAT Quant Section with 10 minutes to spare\nThe one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy.\n\"Only $99 for 3 month Online Course\" \"Free Resources-30 day online access & Diagnostic Test\" \"Unlimited Access to over 120 free video lessons - try it yourself\" Math Revolution GMAT Instructor Joined: 16 Aug 2015 Posts: 6020 GMAT 1: 760 Q51 V42 GPA: 3.82 Re: Math Revolution Approach (DS) [#permalink] ### Show Tags 26 Mar 2017, 18:30 m=? 1) 5 is a factor of m 2) m is a prime number ==> In the original condition, there is 1 variable (m) and in order to match the number of variables to the number of equations, there must be 1 equation. Since there is 1 for con 1) and 1 for con 2), D is most likely to be the answer. For con 1), from m=5,10\u2026, it is not unique and not sufficient. For con 2), from m=5,7\u2026, it is not unique and not sufficient. By solving con 1) and con 2), you get m=5, hence it is unique and sufficient. Therefore, the answer is C. Answer: C _________________ MathRevolution: Finish GMAT Quant Section with 10 minutes to spare The one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy. \"Only$99 for 3 month Online Course\"\n\"Free Resources-30 day online access & Diagnostic Test\"\n\"Unlimited Access to over 120 free video lessons - try it yourself\"\n\nMath Revolution GMAT Instructor\nJoined: 16 Aug 2015\nPosts: 6020\nGMAT 1: 760 Q51 V42\nGPA: 3.82\nRe: Math Revolution Approach (DS)\u00a0 [#permalink]\n\n### Show Tags\n\n26 Mar 2017, 18:31\nis an integer n even?\n\n1) n(n+2)=even\n2) n(n+3)=even\n\n==> In the original condition, there is 1 variable (n) and in order to match the number of variables to the number of equations, there must be 1 equation. Since there is 1 for con 1) and 1 for con 2), D is most likely to be the answer.\n\nFor con 1), in order to get n(n+2)=even, you get n=even, hence yes, it is sufficient.\nFor con 2), n=2 yes, but x=3 no, hence it is not sufficient.\n\n_________________\n\nMathRevolution: Finish GMAT Quant Section with 10 minutes to spare\nThe one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy.\n\"Only $99 for 3 month Online Course\" \"Free Resources-30 day online access & Diagnostic Test\" \"Unlimited Access to over 120 free video lessons - try it yourself\" Math Revolution GMAT Instructor Joined: 16 Aug 2015 Posts: 6020 GMAT 1: 760 Q51 V42 GPA: 3.82 Re: Math Revolution Approach (DS) [#permalink] ### Show Tags 29 Mar 2017, 01:44 When a positive integer $$m^2$$ is divided by 4, what is the remainder? 1) When m is divided by 3, the remainder is 1 2) When m is divided by 2, the remainder is 1 ==> In the original condition, there is 1 variable (m) and in order to match the number of variables to the number of equations, there must be 1 equation. Since there is 1 for con 1) and 1 for con 2), D is most likely to be the answer. For remainder questions, it is best to use direct substitution. For con 1), if you substitute from m=3p+1=1,4,7\u2026, from m=1, 1^2=1=4(0)+1, the remainder=1, and if m=4, from 4^2=16=4(4)+0, the remainder=0, hence it is not unique and not sufficient. For con 2), from m=2q+1=1,3,5,7,\u2026, you get m2=1,9,25,49.., and the remainder divided by 4 always becomes 1, hence it is unique and sufficient. Therefore, the answer is B. Answer: B _________________ MathRevolution: Finish GMAT Quant Section with 10 minutes to spare The one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy. \"Only$99 for 3 month Online Course\"\n\"Free Resources-30 day online access & Diagnostic Test\"\n\"Unlimited Access to over 120 free video lessons - try it yourself\"\n\nMath Revolution GMAT Instructor\nJoined: 16 Aug 2015\nPosts: 6020\nGMAT 1: 760 Q51 V42\nGPA: 3.82\nRe: Math Revolution Approach (DS)\u00a0 [#permalink]\n\n### Show Tags\n\n30 Mar 2017, 02:16\nIs x<1<y?\n\n1) x<\u221ax<y\n2) x<\u221ay<y\n\n==> In the original condition, there are 2 variables (x,y) and in order to match the number of variables to the number of equations, there must be 2 equations. Since there is 1 for con 1) and 1 for con 2), C is most likely to be the answer. By solving con 1) and con 2), from x<\u221ax, you get 0<x<1, and from \u221ay<y, you get y>1. Then, you get x<1<y, hence always yes.\n\n_________________\n\nMathRevolution: Finish GMAT Quant Section with 10 minutes to spare\nThe one-and-only World\u2019s First Variable Approach for DS and IVY Approach for PS with ease, speed and accuracy.\n\"Only \\$99 for 3 month Online Course\"\n\"Free Resources-30 day online access & Diagnostic Test\"\n\"Unlimited Access to over 120 free video lessons - try it yourself\"\n\nRe: Math Revolution Approach (DS) &nbs [#permalink] 30 Mar 2017, 02:16\n\nGo to page \u00a0 Previous \u00a0 \u00a01\u00a0\u00a0...\u00a0\u00a06\u00a0\u00a0\u00a07\u00a0\u00a0\u00a08\u00a0\u00a0\u00a09\u00a0\u00a0\u00a010\u00a0\u00a0\u00a011\u00a0\u00a0\u00a012\u00a0\u00a0...\u00a0\u00a027\u00a0 \u00a0 Next \u00a0[ 529 posts ]\n\nDisplay posts from previous: Sort by\n\n# Events & Promotions\n\n Powered by phpBB \u00a9 phpBB Group | Emoji artwork provided by EmojiOne Kindly note that the GMAT\u00ae test is a registered trademark of the Graduate Management Admission Council\u00ae, and this site has neither been reviewed nor endorsed by GMAC\u00ae.","date":"2018-08-16 23:45:59","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 1, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.7174180746078491, \"perplexity\": 4815.630223536811}, \"config\": {\"markdown_headings\": true, \"markdown_code\": false, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2018-34\/segments\/1534221211316.39\/warc\/CC-MAIN-20180816230727-20180817010727-00678.warc.gz\"}"}
null
null
glibc ( — GNU бібліотека Сі) — стандартна бібліотека мови C з проекту GNU, яка забезпечує системні виклики та основні функції. Вона написана Фондом вільного програмного забезпечення для операційних систем родини GNU. glibc повністю відповідає вимогам стандартів ISO C99, C11 та POSIX.1-2008, і випущена під ліцензією GNU LGPL. glibc є основою більшості Linux-дистрибутивів, за винятком OpenWrt. Історія Бібліотека створена Фондом вільного програмного забезпечення (Free Software Foundation, FSF) для операційних систем GNU. Розробка бібліотеки була переглянута комітетом з 2001 на чолі з провідним розробником і підтримувачем Ульріхом Дреппером (Ulrich Drepper) з Red Hat. Спочатку glibc була написана Роландом Мак-Гратом (Roland McGrath), котрий працював у FSF в 1980-x роках. У лютому 1988 року FSF представила glibc як бібліотеку, що має найповнішу функціональність, необхідну для ANSI C. У 1992 були імплементовані функції ANSI C-1989 та POSIX.1-1990 і робота переведена у спосіб POSIX.2. Форки Тимчасовий форк glibc На початку 1990-х років розробники ядра Linux створили форк glibc. Він був названий «Linux libc». Коли FSF випустила в 1996 році glibc 2.0, яка підтримувала IPv6, 64-бітовий доступ до даних, багатонитеві програми, сумісність з майбутніми версіями і більше стерпний початковий код, розробники Linux перервали розробку Linux libc і почали використовувати glibc від FSF. eglibc Деякий час існував eglibc — форк glibc, повністю сумісний з нею на рівні API і ABI. Eglibc відрізнялась інтеграцією деяких додаткових напрацювань для вбудовуваних систем, нижчими системними вимогами (підтримка компіляції з деякими відключеними компонентами), можливістю гнучкого налаштування компонентів, поліпшеною підтримкою крос-компіляції і крос-тестування. Напрацювання eglibc було інтегровано у glibc. Підтримувані архітектури та ядра Glibc використовується в системах, на яких працює багато різних ОС, і на різних архітектурах. Найбільш часто glibc використовується на машинах x86-архітектури з ОС Linux. Офіційно також підтримуються архітектури: ARM DEC Alpha ETRAX CRIS Motorola 68k PowerPC s390 SPARC Критика glibc критикують за її «роздутість» і повільну роботу в порівнянні з іншими минулими бібліотеками. Тому були створені кілька альтернативних стандартних бібліотек мови Сі (dietlibc, uClibc і Newlib). Виноски Див. також Linux Standard Base Посилання GNU libc homepage GNU libc developers' page Бібліотеки C Системне програмне забезпечення Багатоплатформне вільне програмне забезпечення
{ "redpajama_set_name": "RedPajamaWikipedia" }
4,661
// // Copyright (c) Microsoft. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // using System; using System.Collections.Generic; using System.Net; using System.Security.Cryptography; using System.Text; // using System.Web.Script.Serialization; using Microsoft.WindowsAzure.Management.RecoveryServices; using Microsoft.WindowsAzure.Management.SiteRecovery; using Microsoft.WindowsAzure.Management.SiteRecovery.Models; using Microsoft.Azure.Common.Internals; using Hyak.Common.TransientFaultHandling; using System.Diagnostics.CodeAnalysis; using System.IO; using System.Xml; using Xunit; using Microsoft.Azure.Test; using Newtonsoft.Json; using Microsoft.Azure.Test.HttpRecorder; namespace SiteRecovery.Tests { public class SiteRecoveryTestsBase : TestBase { public static string VaultKey; public static string VaultLocation = "Southeast Asia"; public static string MyCloudService; public static string MyVaultName; protected static CustomRequestHeaders RequestHeaders = new CustomRequestHeaders { ClientRequestId = Guid.NewGuid().ToString(), }; protected readonly RecordedDelegationHandler CustomHttpHandler = new RecordedDelegationHandler { StatusCodeToReturn = HttpStatusCode.OK }; public RecoveryServicesManagementClient GetRecoveryServicesClient(RecordedDelegationHandler handler) { handler.IsPassThrough = true; return this.GetRecoveryServicesManagementClient().WithHandler(handler); ; } public SiteRecoveryManagementClient GetSiteRecoveryClient(RecordedDelegationHandler handler) { handler.IsPassThrough = true; return this.GetSiteRecoveryManagementClient().WithHandler(handler); } public string GenerateAgentAuthenticationHeader(string clientRequestId) { CikTokenDetails cikTokenDetails = new CikTokenDetails(); DateTime currentDateTime = DateTime.Now; currentDateTime = currentDateTime.AddHours(-1); cikTokenDetails.NotBeforeTimestamp = TimeZoneInfo.ConvertTimeToUtc(currentDateTime); cikTokenDetails.NotAfterTimestamp = cikTokenDetails.NotBeforeTimestamp.AddHours(6); cikTokenDetails.ClientRequestId = clientRequestId; cikTokenDetails.Version = new Version(1, 2); cikTokenDetails.PropertyBag = new Dictionary<string, object>(); string shaInput = JsonConvert.SerializeObject(cikTokenDetails); HMACSHA256 sha = new HMACSHA256(Encoding.UTF8.GetBytes(VaultKey)); cikTokenDetails.Hmac = Convert.ToBase64String(sha.ComputeHash(Encoding.UTF8.GetBytes(shaInput))); cikTokenDetails.HashFunction = CikSupportedHashFunctions.HMACSHA256.ToString(); return JsonConvert.SerializeObject(cikTokenDetails); } #region CIK public class CikTokenDetails { public DateTime NotBeforeTimestamp { get; set; } public DateTime NotAfterTimestamp { get; set; } public string ClientRequestId { get; set; } public string HashFunction { get; set; } public string Hmac { get; set; } public Version Version { get; set; } public Dictionary<string, object> PropertyBag { get; set; } public override string ToString() { StringBuilder sb = new StringBuilder(); sb.AppendLine("NotBeforeTimestamp: " + NotBeforeTimestamp); sb.AppendLine("NotAfterTimestamp: " + NotAfterTimestamp); sb.AppendLine("ClientRequestId: " + ClientRequestId); sb.AppendLine("Hmac: " + Hmac); return sb.ToString(); } } public enum CikSupportedHashFunctions { HMACSHA256, HMACSHA384, HMACSHA512 } #endregion protected void ValidateResponse(JobResponse response) { Assert.NotNull(response.Job); Assert.NotNull(response.Job.ID); Assert.True(response.Job.Errors.Count < 1, "Errors found while doing planned failover operation"); Assert.Equal(HttpStatusCode.OK, response.StatusCode); } protected void WaitForJobToComplete(SiteRecoveryManagementClient client, string jobId) { var responseJob = client.Jobs.Get(jobId, RequestHeaders); while (responseJob.Job.StateDescription != "Completed") { // Sleep for 1 min System.Threading.Thread.Sleep(60 * 1000); responseJob = client.Jobs.Get(jobId, RequestHeaders); } Assert.NotEqual(responseJob.Job.State, "Failed"); } } }
{ "redpajama_set_name": "RedPajamaGithub" }
4,333
TOPIC: It's possible inegrate with ipay88? It's possible integrate with ipay88 payment gateway (www.ipay88.com/) ? if yes, how to set it? Sorry, the feature to integrate ipay88 is not available. Anyway i will pass this to our product management team. They will analyse and add this in future.
{ "redpajama_set_name": "RedPajamaC4" }
3,777
Q: Multiple streams on TcpClient on different ports? I am a bit confused with the TcpClient class. I want to connect to my server and have 2 streams. One SSL and one regular non secure TCP. So i connect like this currently: await _tcpClient.ConnectAsync(address,port); IsConnected = true; _networkStream = _tcpClient.GetStream(); _sslStream = new SslStream(_tcpClient.GetStream()); The problem is my ssl data i want to use on a different port. So do i need to have two TcpClient instances one for secure and one for non secure? Or can i have multiple streams on different ports with this class. I am confused how its done? I am really confused how this is meant to be setup properly. A: Yes, you will need two separate Socket instances. You could to that via two instances of TcpClient - personally I tend to prefer raw sockets, but that's up to you.
{ "redpajama_set_name": "RedPajamaStackExchange" }
532
To run example to this steps: ```bash cd example python3 -m venv env . env/bin/activate pip install -r requirements.txt createdb django_admin_json_editor python manage.py migrate python manage.py createsuperuser python manage.py runserver ``` Open `http://localhost:8000/admin/app/jsonmodel/add/`. You will see: ![Example](example.png)
{ "redpajama_set_name": "RedPajamaGithub" }
1,402
\section{Introduction} The first experimental results from LHC once again raise the question at what transverse momenta particle production in $pp$ collisions is dominated by hard parton--parton interactions. A quantitative understanding of the relevant mechanisms is important not only for future studies of QCD phenomena, but also for controlling the strong interaction background in new particle searches. The challenge lies in the fact that the growth of the average multiplicities makes it very difficult to observe jets with moderate $p_T$, while at the same time the properties of non--perturbative semi--hard dynamics and its ability to produce particles with $p_T \sim$ few GeV are not well understood. In an earlier article \cite{Frankfurt:2003td}, we demonstrated that the nucleon's transverse partonic structure plays an essential role in the theoretical analysis of $pp$ collisions with hard processes. Experiments in hard exclusive electroproduction of vector mesons $\gamma^\ast p \rightarrow V + p$ and photoproduction of heavy quarkonia $\gamma p \rightarrow J/\psi + p$ have shown that the gluons with $10^{-4} < x < 10^{-1}$ are localized at small transverse distances of $0.4-0.5 \, \text{fm}$ (median, depending on $x$ and $Q^2$), much smaller than the characteristic range of soft interactions at high energies, see Fig.~\ref{fig:percent}a. Qualitatively, this is explained by the fact that Gribov diffusion in the partonic wave function, which causes the range of soft interactions to grow with energy \cite{Gribov:1973jg}, is suppressed for highly virtual constituents. In $pp$ scattering this two--scale picture implies that hard processes mostly occur in central collisions, where the areas occupied by partons in the relevant $x$--range overlap. Peripheral collisions constitute the dominant part of the overall inelastic cross section without contributing much to inclusive jet production, see Fig.~\ref{fig:percent}b. A trigger on a hard process thus, on average, selects central $pp$ collisions \cite{Frankfurt:2003td}. Numerical studies show that at a center--of--mass energy $\surd s = 14 \, \textrm{TeV}$ a dijet trigger on $p_T \sim \textrm{few 10 GeV}$ reduces the median $pp$ impact parameter $b$ by a factor of $\sim 2$ compared to minimum--bias inelastic collisions; the reduction is nearly as strong at the current LHC energy of $7 \, \textrm{TeV}$ (see below). \begin{figure}[b] \includegraphics[width=0.42\textwidth]{percent.eps} \caption{(a) The two--scale picture of transverse nucleon structure at high energies (transverse view). (b) Its implication for $pp$ collisions. Peripheral collisions constitute the dominant part of the overall inelastic cross section. Hard processes happen predominantly in central collisions, where the areas occupied by large--$x$ partons overlap.} \label{fig:percent} \end{figure} Here we point out that these insights into the transverse geometry of $pp$ collisions can be used to address the question at what transverse momenta particle production is governed by hard parton--parton processes. The key observation is that the transverse multiplicity, measured in the direction perpendicular to the transverse momentum of the trigger particle (or jet), is correlated with the average impact parameter in the underlying $pp$ event. If the trigger particle originates from a hard parton--parton process, the underlying $pp$ collision is central, and the average impact parameter depends only weakly on $p_T$. The transverse multiplicity thus should be practically independent of $p_T$, and substantially larger than that in minimum bias events. If, however, the trigger particle is produced by soft interactions, the transverse multiplicity should be substantially smaller and reflect the average multiplicity in minimum--bias inelastic collisions. In this sense, the transverse multiplicity can serve as a diagnostic of the dynamics in the production of the trigger particle at given $p_T$. First theoretical suggestions for studies of the transverse multiplicity were put forward in Ref.~\cite{Marchesini:1988hj}. Experimental investigation of the correlation between the jet production and the structure of the underlying event were pioneered by the CDF experiment at the Tevatron \cite{Affolder:2001xt}. The first data on underlying event structure in collisions with hard processes at the LHC were recently reported by ATLAS \cite{EmilyNurse} and CMS \cite{Khachatryan:2010pv,Lucaroni}. Additional tests of the geometric correlations described here become possible with measurements of the dependence of the transverse multiplicity on the rapidity of the trigger jets. In particular, we predict that in the rapidity region not affected by the fragmentation of the trigger jets the enhancement of the multiplicity will persist and be isotropic in transverse space. In this way one could verify the universality of particle production in the central $pp$ collisions selected by hard processes. This article is organized as follows. In Sec.~\ref{sec:parton} we summarize our knowledge of the nucleon's transverse partonic structure and update our parametrization of the transverse gluonic size as a function of $x$. In Sec.~\ref{sec:impact} we use this information to study the impact parameter distributions of $pp$ events with hard processes in dependence on the transverse momentum $p_T$ of the trigger particle (jet), for the kinematics currently covered by LHC, and find that the median $b$ weakly depends on the trigger $p_T$ and is substantially smaller than that in minimum--bias inelastic collisions. In Sec.~\ref{sec:multiplicity} we discuss the connection between centrality and the transverse multiplicity, and how measurement of the latter provides an effective means of quantifying at what $p_T$ particle production is dominated by hard QCD processes. In Sec.~\ref{sec:rapidity} we consider the dependence of the multiplicity on the rapidity of the trigger, and how it can be used for additional tests of the dominance of small impact parameters in $pp$ collisions with hard processes. In Sec.~\ref{sec:future} we present several suggestions for further analysis of the $pp$ event structure data. A summary and discussion of our results are given in Sec.~\ref{sec:summary}. Our analysis relies on information on the nucleon's transverse partonic structure obtained from hard exclusive processes in $ep/\gamma p$ scattering. Extending our previous study \cite{Frankfurt:2003td}, we present here an updated parametrization of the transverse distribution of gluons, which takes into account the more recent HERA data \cite{Chekanov:2004mw,Aktas:2005xu} and permits realistic uncertainty estimates. The numerical results are nevertheless close to those obtained in our previous study. Current Monte Carlo (MC) generators for $pp$ events usually do not take into account the available experimental information on transverse nucleon structure and treat the distribution of gluons over transverse position as a free function. The typical setting for PYTHIA \cite{PYTHIA} and HERWIG \cite{HERWIG} correspond to a transverse area occupied by gluons which is a factor $\sim 2$ smaller than what is indicated by the HERA data (see below). In the analysis of experimental data, the shape of the transverse gluon distribution is usually treated as one of the tuning parameters, see e.g.\ Refs.~\cite{Affolder:2001xt,Khachatryan:2010pv}. While we do not directly address these technical issues here, our results certainly have implications for the design of future MC generators for $pp$ events at LHC. \section{Transverse partonic structure of nucleon} \label{sec:parton} Information on the transverse spatial distribution of gluons in the nucleon comes from the study of hard exclusive processes such as electroproduction of vector mesons, $\gamma^\ast p \rightarrow V + p$, or the photoproduction of heavy quarkonia, $\gamma p \rightarrow J/\psi + p$. Thanks to a QCD factorization theorem \cite{Collins:1996fb}, the amplitude of these processes in the leading--twist approximation can be expressed in terms of the gluon generalized parton distribution (or GPD), which parametrizes the matrix element for the emission and absorption of a gluon by the target. Of particular interest is its $t$--dependence in the ``diagonal'' case of equal momentum fraction of the emitted and absorbed gluons. It is described by the normalized two--gluon form factor $F_g (x, t| Q^2)$, where $x$ is the gluon momentum fraction and $t = -\bm{\Delta}_\perp^2$ the transverse momentum transfer to the target. This function can be regarded as the transverse form factor of gluons with longitudinal momentum fraction $x$ in the nucleon. Its Fourier transform describes the transverse spatial distribution of gluons with given $x$, \begin{equation} F_g (x, \rho | Q^2) \; \equiv \; \int\!\frac{d^2 \Delta_\perp}{(2 \pi)^2} \; e^{i (\bm{\Delta}_\perp \bm{\rho})} \; F_g (x, t = -\bm{\Delta}_\perp^2 | Q^2) , \label{rhoprof_def} \end{equation} where $\rho \equiv |\bm{\rho}|$ measures the distance from the transverse center--of--momentum of the nucleon, and the distribution is normalized such that $\int d^2\rho \, F_g (x, \rho | Q^2) = 1$. Experiments in hard exclusive processes actually probe the gluon GPD in the non--diagonal case (different momentum fraction of the emitted and absorbed gluon), because of the longitudinal momentum transfer required by kinematics. At $x \lesssim 10^{-2}$, QCD evolution allows one to relate the non-diagonal gluon GPD to the diagonal one at the input scale, and the diagonal two--gluon form factor can be directly inferred from the $t$--dependence of the measured cross sections. At larger $x$, the relation between the non--diagonal and diagonal GPDs generally becomes model--dependent, but useful information can still be extracted using GPD parametrizations. The $t$--dependence of the measured differential cross sections of exclusive processes at $|t| < 1 \, \text{GeV}^2$ is commonly described either by an exponential, or by a dipole form inspired by analogy with the nucleon elastic form factors. Correspondingly, we consider here two parametrizations of the two--gluon form factor: \begin{equation} F_g (x, t|Q^2) \;\; = \;\; \left\{ \begin{array}{l} \displaystyle \exp (B_g t/2) , \\[2ex] \displaystyle (1 - t/m_g^2)^{-2} , \end{array} \right. \label{twogl_exp_dip} \end{equation} where the parameters $B_g$ and $m_g$ are functions of $x$ and $Q^2$. The two parametrizations give very similar results if the functions are matched at $|t| = 0.5 \, \text{GeV}^2$, where they are best constrained by present data (see Fig.~3 of Ref.~\cite{Frankfurt:2006jp}); this corresponds to \begin{equation} B_g \;\; = \;\; 3.24/m_g^2 . \label{dip_exp} \end{equation} We use both parametrizations in our studies below; the difference between the results serves as an estimate of the uncertainty due to our lack of precise knowledge of the shape. The corresponding spatial distributions of gluons in the transverse plane, Eq.~(\ref{rhoprof_def}), are given by \begin{equation} F_g (x, \rho | Q^2) \;\; = \;\; \left\{ \begin{array}{l} \displaystyle (2 \pi B_g)^{-1} \, \exp [-\rho^2 / (2 B_g)] , \\[2ex] \displaystyle [m_g^2/(2\pi)] \; (m_g \rho/2) \; K_1 (m_{g} \rho ) , \end{array} \right. \label{f_rho_param} \end{equation} where $K_1$ denotes the modified Bessel function. Most of the experimental information on the nucleon's two--gluon form factor comes from $J/\psi$ photoproduction, which probes the gluon GPD at an effective scale $Q^2 \approx \, 3 \, \text{GeV}^2$, determined by the average transverse size of the $c\bar c$ pair during its interaction with the target, and momentum fractions of the order $x \sim M_{J/\psi}^2/W^2$ \cite{Frankfurt:1997fj}. When extracting the two--gluon form factor from the slope of the $J/\psi$ differential cross section, a correction is made for the effect of the finite $J/\psi$ size on the observed $t$--dependence, \begin{equation} B_{J/\psi} \;\; = \;\; B_g + \Delta B , \label{bpsi_bg_corr} \end{equation} where $\Delta B \approx 0.3 \, \text{GeV}^2$ from a dipole model estimate \cite{Frankfurt:1997fj}. The data from the FNAL E401/E458 broadband beam experiment at $\langle x \rangle = 0.05$ \cite{Binkley:1981kv}, in which the recoiling proton was detected, are described by an exponential two--gluon form factor with $B_g = 3.0 \, \text{GeV}^{-2}$ (see Fig.~\ref{fig:bpsi}), albeit with large errors, or a corresponding dipole form factor with $m_g^2 = 1.1 \, \text{GeV}^2$. Comparison with the mass parameter in the dipole parametrization of the nucleon's electromagnetic (Dirac) form factor, $m_{\rm em}^2 = 0.7\, \text{GeV}^2$, indicates that at these values of $x$ the average transverse gluonic radius squared is only $\sim 0.6$ of the transverse electromagnetic radius squared. \begin{figure} \includegraphics[width=.48\textwidth]{bpsi_xdep_param.eps} \caption[]{The exponential $t$--slope, $B_{J/\psi}$, of the differential cross section of exclusive $J/\psi$ photoproduction measured in the FNAL E401/E458 \cite{Binkley:1981kv}, HERA H1 \cite{Aktas:2005xu}, and ZEUS \cite{Chekanov:2002xi} experiments, as a function of $x = M_{J/\psi}^2/W^2$. (In the H1 and ZEUS results the quoted statistical and systematic uncertainties were added linearly.) The dashed lines represent the published two--dimensional fits to the H1 and ZEUS data \cite{Aktas:2005xu,Chekanov:2002xi}. The parameter $B_g$ in the exponential two--gluon form factor Eq.~(\ref{twogl_exp_dip}) is related to the measured $J/\psi$ slope by Eq.~(\ref{bpsi_bg_corr}). Our parametrization Eqs.~(\ref{bg_param})--(\ref{bg_param_last}) is shown by the solid line.} \label{fig:bpsi} \end{figure} The HERA data at $x < 10^{-2}$ \cite{Aktas:2005xu,Chekanov:2002xi} indicate that the transverse gluonic size of the nucleon increases with decreasing $x$ (see Fig.~\ref{fig:bpsi}). In the region $x \sim \textrm{few} 10^{-2}$ this effect is partly due to the contribution of the nucleon's pion cloud, which is strongly suppressed at $x > 0.1$ and fully developed only for $x < 0.01$ \cite{Strikman:2003gz}. Another contribution may arise from Gribov diffusion, which is suppressed by the hard scale $Q^2$ but still not negligible. Over the range covered by HERA, the rate of increase of the gluonic size can be parametrized by an effective Regge slope, $\alpha'_g$, which is smaller than that for corresponding soft processes, $\alpha'_{\rm soft} = 0.25 \, \text{GeV}^{-2}$. Averaging the fits to the HERA H1 and ZEUS data \cite{Aktas:2005xu,Chekanov:2002xi}, and accounting for the correction Eq.~(\ref{bpsi_bg_corr}), we parametrize the $x$--dependence of the gluonic exponential slope as (here $Q^2 = 3 \, \text{GeV}^2$) \begin{eqnarray} B_g (x) &=& B_{g0} \; + \; 2 \alpha'_g \; \ln (x_0/x) , \label{bg_param} \\ x_0 &=& 0.0012, \\ B_{g0} &=& 4.1 \; ({}^{+0.3}_{-0.5}) \; \text{GeV}^{-2}, \\ \alpha'_g &=& 0.140 \; ({}^{+0.08}_{-0.08}) \; \text{GeV}^{-2}. \label{bg_param_last} \end{eqnarray} The uncertainties in parentheses represent a rough estimate based on the range of values spanned by the H1 and ZEUS fits, with statistical and systematic uncertainties added linearly. One sees from Fig.~\ref{fig:bpsi} that the fit to the HERA data consistently extrapolates to the FNAL data point. The corresponding dipole parametrization obtained via Eq.~(\ref{dip_exp}) is close to the one used in our previous study \cite{Frankfurt:2003td}. The transverse spatial distribution of partons also changes with the resolution scale, $Q^2$, as a result of DGLAP evolution. Generally, the partons observed at a given momentum fraction $x$ and scale $Q^2$ are decay products of partons with $x' > x$ which existed at a lower scale, $Q_0^2$. In the leading--twist approximation the decay happens locally in transverse space. As a result, the transverse size observed at fixed $x$ shrinks with increasing $Q^2$, because the decaying partons at the lower scale had larger momentum fractions and were localized in a smaller transverse area. In order to calculate the change of the transverse spatial distribution of gluons with $Q^2$ one would need to know the spatial distributions of both gluons and singlet quarks at the input scale for all $x' > x$, where they are only poorly constrained by present data. Numerical studies based on a simple parametrization \cite{Frankfurt:2003td} suggest that the evolution effect is small for $Q^2 > 3 \, \text{GeV}^2$. The average transverse size $\langle \rho^2 \rangle_g$ at $x \sim 10^{-3}$ decreases by $\sim 15\%$ between $Q^2 = 3$ and $10^4 \, \textrm{GeV}^2$, while the effective value of $\alpha'_g$ in this $x$--region drops by about half. We note that the $J/\psi$ electroproduction data from HERA \cite{Chekanov:2004mw,Aktas:2005xu} provide some indication that the effective $\alpha'_g$ may be smaller than in photoproduction, although the results are not fully conclusive. In any case, the $\alpha'_g$ in the parametrization Eqs.~(\ref{bg_param})--(\ref{bg_param_last}) can be considered as an upper limit at values of $Q^2 \gtrsim 10\, \textrm{GeV}^2$, as are of interest in the applications here. Comparatively little is known about the transverse distribution of singlet quarks ($q + \bar q$) at small $x$. Comparison of the HERA deeply--virtual Compton scattering \cite{Aaron:2007cz} and $J/\psi$ production data indicates that singlet quarks at $x < 10^{-2}$ are distributed over a larger transverse area than the gluons, in qualitative agreement with theoretical arguments based on the pion cloud contribution to the parton densities at large $b$ \cite{Strikman:2009bd}. In the applications here we are concerned with gluon--induced processes; parametrizations similar to Eq.~(\ref{twogl_exp_dip}) could be formulated also for the quark distributions. \section{Impact parameter distribution of proton--proton collisions} \label{sec:impact} Using the information on the transverse spatial distribution of partons in the nucleon, one can infer the distribution of impact parameters in $pp$ collisions with hard parton--parton processes. While not directly observable, the latter determines the spectator interactions in such collisions and thus can be studied indirectly through measurements of the correlation of hard processes with final--state properties. The hard parton--parton process is effectively pointlike in transverse space compared to the typical scale of variation of the transverse distributions of partons in the colliding hadrons. The impact parameter distribution of $pp$ events with a hard gluon--gluon process is thus given by the normalized geometric probability for two gluons to collide at the same point in transverse space: \begin{eqnarray} P_2 (x_1, x_2, b|Q^2) &\equiv& \int \! d^2\rho_1 \int \! d^2\rho_2 \; \delta^{(2)} (\bm{b} - \bm{\rho}_1 + \bm{\rho}_2 ) \nonumber \\ &\times& F_g (x_1, \rho_1 |Q^2 ) \; F_g (x_2, \rho_2 |Q^2) , \label{P_2_def} \end{eqnarray} where $b \equiv |\bm{b}|$ is the $pp$ impact parameter and $\rho_{1, 2} \equiv |\bm{\rho}_{1,2}|$ the transverse distances of the two gluons from the center of their parent protons (see Fig.~\ref{fig:overlap}) \cite{Frankfurt:2003td}. It satisfies the normalization condition $\int d^2b \, P_2 (x_1, x_2, b |Q^2) = 1$. With the parametrizations of Eq.~(\ref{twogl_exp_dip}) the convolution integral in Eq.~(\ref{P_2_def}) can easily be evaluated analytically. In the case of symmetric collisions ($x\equiv x_1 = x_2$) we find \begin{equation} P_2 (x, b| Q^2) \; = \; \left\{ \begin{array}{l} \displaystyle (4\pi B_g)^{-1} \, \exp [-b^2/(4 B_g)] , \\[2ex] \displaystyle [m_g^2 /(12\pi)] \, (m_g b/2)^3 \, K_3 (m_{g} b) , \label{P_2_exp_dip} \end{array} \right. \end{equation} where the parameters $B_g$ and $m_g$ are taken at the appropriate values of $x$ and $Q^2$. \begin{figure} \includegraphics[width=.22\textwidth]{overlap.eps} \caption[]{Overlap integral of the transverse spatial parton distributions, defining the impact parameter distribution of $pp$ collisions with a hard parton--parton process, Eq.~(\ref{P_2_def}).} \label{fig:overlap} \end{figure} The impact parameter distribution in minimum--bias inelastic $pp$ collisions can be inferred from the $pp$ elastic scattering amplitude, which incorporates the information on the $pp$ total cross section through the unitarity relation. It is given by \begin{equation} P_{\text{in}} (s, b) \;\; = \;\; [ 1 - |1 - \Gamma (s, b)|^2]\, / \sigma_{\text{in}}(s) , \label{P_in_def} \end{equation} where $\Gamma (s, b)$ is the profile function of the $pp$ elastic amplitude in the conventions of Ref.~\cite{Frankfurt:2003td} and the inelastic cross section $\sigma_{\rm in} (s)$ is given by the integral $\int d^2 b$ of the expression in brackets, such that $\int d^2 b \, P_{\text{in}} (s, b) = 1$. For the purpose of the present study we employ a simple analytic parametrization of the profile function which satisfies unitarity and reflects the approach to the black--disk regime ($\Gamma \rightarrow 1$) at small impact parameters: \begin{equation} \Gamma (s, b) \;\; = \;\; \Gamma_0 \, \exp \{ -b^2/ [2 B(s)] \} , \label{Gamma_gaussian} \end{equation} where $\Gamma_0 = 1$ and the slope parameter is given in terms of the total cross section as $B(s) = \sigma_{\rm tot}(s)/(4\pi)$; the inelastic cross section for this profile is $\sigma_{\rm in}(s) = 3\pi B(s)$. For the total cross section we use the extrapolation suggested by the COMPETE Collaboration \cite{Cudell:2002xe}, which gives $B(s) = 20.2 \; (22.8) \; \text{GeV}^{-2}$ at $\surd s = 7 \; (14) \; \text{TeV}$. The uncertainty in the profile function at LHC energies is dominated by that of the total cross section. The impact parameter distributions calculated with Eq.~(\ref{Gamma_gaussian}) provide a fully satisfactory representation of those obtained with more elaborate parametrizations of the $pp$ elastic amplitude, see Fig.~1 of Ref.~\cite{Frankfurt:2006jp} and references therein. Using the above expressions we can now study the influence of the trigger conditions on the impact parameter distribution of $pp$ events at the current LHC energy $\surd s = 7 \, \textrm{TeV}$. The present experiments typically consider a jet trigger near zero rapidity, $y_1 \approx 0$, and study the characteristics of the underlying events as a function of the transverse momentum $p_T$ of the highest--momentum particle in the pseudorapidity interval $-2.5 < \eta < 2.5$. In this setting one integrates over the energy of the balancing jet (as well as that of other jets which might arise from higher--order processes), which effectively amounts to integrating over the momentum fraction of the second parton, $x_2$, at fixed $x_1$. Since the distribution is symmetric in the rapidity of the balancing jet, $y_2$, and the variation of the transverse distribution of partons with $x$ is small, \textit{cf.}\ Eqs.~(\ref{bg_param})--(\ref{bg_param_last}) and Fig.~\ref{fig:bpsi}, we can to a good approximation set $y_2 = 0$ and thus take $x_{1, 2}$ at the average point \begin{equation} x_1 \; = \; x_2 \; = \; 2 p_T/\sqrt{s} . \end{equation} \begin{figure} \includegraphics[width=0.48\textwidth]{pb_comb.eps} \caption[]{Impact parameter distributions of inelastic $pp$ collisions at $\surd s = 7 \, \text{TeV}$. \textit{Solid (dashed) line:} Distribution of events with a dijet trigger at zero rapidity, $y_{1, 2} = 0$, \textit{cf.}\ Eq.~(\ref{P_2_exp_dip}), for $p_T = 100 \, (10) \, \text{GeV}$ . \textit{Dotted line:} Distribution of minimum--bias inelastic events, \textit{cf.}\ Eq.~(\ref{P_in_def}).} \label{fig:pb} \end{figure} The scale at which the parton densities are probed is of the order $Q^2 \sim p_T^2$, with a coefficient which remains undetermined at leading--order accuracy. Generally, we expect the impact parameter distribution in events with such a jet trigger to become narrower with increasing $p_T$, because the transverse distribution of partons shrinks both with increasing $x_{1,2}$ and with increasing $Q^2$. The impact parameter distributions with a jet trigger of $p_T = 10$ and $100 \, \textrm{GeV}$ are presented in Fig.~\ref{fig:pb}. Shown are the results obtained with the exponential parametrization of Eq.~(\ref{P_2_exp_dip}) and Eqs.~(\ref{bg_param})--(\ref{bg_param_last}); the dipole form leads to comparable results. One sees that the change of the width of this distribution with $p_T$ is rather small, because the transverse distribution of gluons changes only little with $x$ in the range explored here; account of the $Q^2$ dependence of the transverse distribution of gluons would lead to an additional small change. One also sees that the impact parameter distributions with the jet trigger are much narrower than that in minimum--bias inelastic events at the same energy. This quantifies the two--scale picture of transverse nucleon structure summarized in Fig.~\ref{fig:percent}. \begin{figure} \includegraphics[width=.48\textwidth]{medp2.eps} \caption[]{Median impact parameter $b\textrm{(median)}$ of events with a dijet trigger, as a function of the transverse momentum $p_T$, \textit{cf.}\ Fig.~\ref{fig:pb}. \textit{Solid line:} Dijet at zero rapidity $y_{1,2} = 0$. \textit{Dashed line:} Dijet with rapidities $y_{1,2} = \pm 2.5$. The arrow indicates the median $b$ for minimum--bias inelastic events.} \label{fig:medp2} \end{figure} The median impact parameter in dijet events, defined as the value of $b$ for which the integral over $P_2$ reaches the value 1/2, is shown in Fig.~\ref{fig:medp2} as a function of $p_T$. For the parametrizations of Eq.~(\ref{P_2_exp_dip}) it is given by $b \text{(median)} = 1.67 \, \surd B_g$ and $3.08 \, m_g^{-1}$, respectively. The results obtained with the exponential and dipole form factors differ only by a few percent if the parameters are related by Eq.~(\ref{dip_exp}), indicating that the uncertainty resulting from our imperfect knowledge of the shape of the transverse spatial distribution of gluons is small. The uncertainty in $b \text{(median)}$ resulting from the uncertainty of $B_{g0}$ in the parametrization Eqs.~(\ref{bg_param})--(\ref{bg_param_last}) is less than $\pm 10\%$ at $p_T \sim \textrm{few GeV}$. It is seen that the median $b$ in jet events drops only very weakly as a function of $p_T$ for all values above $\sim 2 \, \textrm{GeV}$. We estimate that account of the $Q^2$ dependence of the transverse distributions due to DGLAP evolution would change the results in Fig.~\ref{fig:medp2} by less than $\sim 5 \%$. Also shown is the median $b$ with a trigger on a jets at non-zero rapidity $y_1 = - y_2 = 2.5$, which amounts to an effective increase of $x_{1, 2}$ by a factor $\cosh y \approx 6$, \textit{cf.}\ Eq.~(\ref{x_1_2_y}) and the discussion in Sec.~\ref{sec:rapidity}. In all cases, the median impact parameter in jet events is far smaller than that in minimum--bias collisions, which is given by $b\text{(median)} = 1.32 \, \surd B$ for the parametrization of Eq.~(\ref{Gamma_gaussian}). To conclude this discussion, a comment is in order concerning the interpretation of the impact parameter distributions in $pp$ events with hard processes. Our analysis based on Eq.~(\ref{P_2_def}) shows that $pp$ events with at least one hard process (and no other requirements) are on average more central than minimum--bias inelastic events. This statement concerns the \textit{relative} distribution of impact parameters in a collective of inelastic $pp$ events and how it is changed by imposing the requirement of a hard process. One should not confuse this with statements about the \textit{absolute} probability for a hard process (in a certain rapidity interval) in a $pp$ collision at certain impact parameters. In fact, the analysis of Refs.~\cite{Rogers:2008ua,Rogers:2009ke} shows that there can be a substantial absolute probability for a hard process in $pp$ collisions at large $b$, and that unitarity places non--trivial restrictions on the dynamics of hard interactions in peripheral collisions. \section{Transverse multiplicity as an indicator of hard dynamics} \label{sec:multiplicity} The estimates of the previous section show that $pp$ events with a hard parton--parton collision are much more central than minimum--bias events, and that the average impact parameters change only very little for $p_T$ above $\sim 2 \, \text{GeV}$. At the same time, it is known that the overall event characteristics, such as the average multiplicity, depend strongly on the centrality of the underlying $pp$ collision. Combining these two observations, we can devise a practical method to determine down to which values of $p_T$ mid--rapidity particle production is predominantly due to hard parton--parton collisions. The observable of interest is the transverse multiplicity, measured in the direction perpendicular to the transverse momentum of the trigger particle or jet. It is not \textit{directly} affected by the multiplicity associated with the trigger or balancing jets, but is \textit{indirectly} correlated with the presence of a hard process because of its dependence on the centrality. Based on the results of Figs.~\ref{fig:pb} and \ref{fig:medp2} we predict that the transverse multiplicity should be practically independent of $p_T$ of the trigger as long as the trigger particle originates from a hard parton--parton collision which ``centers'' the $pp$ collision. Furthermore, the transverse multiplicity in such events should be significantly higher than in minimal--bias inelastic events, since the known mechanisms of particle production --- minijet interactions, multiple soft interactions, \textit{etc.} --- are much more effective in central collisions. When measuring the transverse multiplicity as a function of $p_T$ of the trigger, we thus expect it to increase from its minimum--bias value at low $p_T$ and become approximately constant at $p_T \sim \textrm{few GeV}$ (see Fig.~\ref{fig:multpt}). The point where the transition happens, $p_{T, {\rm crit}}$, indicates the critical value of $p_T$ above which particle production is dominated by hard parton--parton processes. \begin{figure} \includegraphics[width=0.48\textwidth]{multpt.eps} \caption{Schematic illustration of the expected dependence of the transverse multiplicity, $N(p_T)$, on the $p_T$ of the trigger.} \label{fig:multpt} \end{figure} Interestingly, the predicted increase and eventual flattening of the transverse multiplicity agrees well with the pattern observed in the existing data. At $\surd s = 0.9\, \textrm{TeV}$ the transition occurs approximately at $p_{T, {\rm crit}} \approx 4\, \textrm{GeV}$ \cite{Khachatryan:2010pv}, at $\surd s = 1.8\, \textrm{TeV}$ at $p_{T, {\rm crit}} \approx 5\, \textrm{GeV}$ \cite{Affolder:2001xt}, and the preliminary data at $7\, \textrm{TeV}$ indicate somewhat larger values of $p_{T, {\rm crit}} = 6-8\, \textrm{GeV}$ \cite{EmilyNurse,Lucaroni}. We thus conclude that the minimum $p_T$ for hard particle production increases with the collision energy. Note that we consider here an inclusive trigger; the procedure adopted in the experimental analysis (selection of the fastest particle in the measured rapidity interval) somewhat enhances the contribution of soft mechanisms in particle production. It is worth noting that the overall pattern described here is reproduced by the tunes of current MC models; \textit{cf.}\ the comparisons in Refs.~\cite{Affolder:2001xt,EmilyNurse,Khachatryan:2010pv,Lucaroni}. This is because these models effectively include the key feature used in our analysis --- the narrow impact parameter distribution of dijet events (although $\langle b^2 \rangle$ in these models is too small by a factor $\sim 2$), and impose a cutoff on the minimal $p_T$ of the minijets. Our point here is that the observed pattern can be explained naturally on the basis of the transverse geometry of $pp$ collisions with hard processes, without involving detailed models. This allows one to determine in a model--independent way where the dominant dynamics in particle production changes from soft interactions to hard parton--parton processes. For $p_T$ lower than $p_{T, {\rm crit}}$ the relative contribution of hard processes to particle production starts to decrease. In terms of the transverse geometry, this means that the observed trigger particle can, with some probability, originate from either peripheral or central collisions in the sense of Fig.~\ref{fig:percent}. We can estimate the fraction of particles produced by hard interactions in this ``mixed'' region in a simple two--component model, based on the observation that the effective impact parameters in soft collisions are much larger than those in hard events and do not change much with transverse momenta of the produced particles \footnote{This assumption is certainly not correct if the maximum $p_T$ of particles produced in a rapidity interval of a few units is taken too low. This would push the impact parameter to very large values and would likely enhance the contribution of double diffraction.}. Thus, we assume that: (\textit{i}) a trigger particle observed at given $p_T$ originated with a probability $\lambda_{\rm hard}(p_T)$ from a hard process, and with probability $1- \lambda_{\rm hard}(p_T)$ from soft interactions; (\textit{ii}) the average impact parameters in both classes of collisions do not depend of the $p_T$ of the trigger. This allows us to write the $p_T$ dependence of the transverse multiplicity in the form \begin{equation} N(p_T) \;\; = \;\; \lambda_{\rm hard}(p_T) N_{\rm hard} + [1 - \lambda_{\rm hard}(p_T)] N_{\rm soft} , \label{N_mixed} \end{equation} where $N_{\rm hard}$ and $N_{\rm soft}$ are independent of $p_T$. Assuming that for some sufficiently small $p_T$ cutoff $\lambda_{\rm hard}(p_T)$ is close to zero, we can determine $N_{\rm soft}$, which corresponds to the minimum--bias impact parameter distribution presented in Fig.\ref{fig:pb}, and use it to determine $\lambda_{\rm hard} (p_T)$ for $p_T$ smaller than $p_{T, {\rm crit}}(s)$ via Eq.~(\ref{N_mixed}). The data indeed indicate that $N_{\rm hard} \gg N_{\rm soft}$; so in the region of $p_T$ where $N(p_T) /N_{\rm hard} \ge 1/3$ our estimate is not sensitive to the exact value of $N_{\rm soft}$. By inspection of the data we conclude that the contribution of the hard mechanism drops to about half of the total yield for $p_T \approx 1.5-2, 2-2.5, 3-4 \, \textrm{GeV}$ for $\surd s = 0.9, 1.8, 7 \, \textrm{TeV}$. It is also of interest that for $p_T > p_{T, {\rm crit}}$ the transverse multiplicity appears to increase with the collision energy faster than the average multiplicity \cite{EmilyNurse,Lucaroni}. In the leading--twist approximation the perturbative contribution is proportional to the product of the gluon densities at small $x_{1, 2}$ and thus scales as $(\sqrt{s})^{2\lambda}$, where $\lambda$ is the exponent of the gluon density, $x g(x, Q^2) \propto x^{-\lambda}$, and takes on values $\lambda \sim 0.2-0.3$ in the $Q^2$ region of interest here. This is roughly consistent with the factor $\sim 2$ increase of the observed transverse multiplicity between $\surd s = 0.9$ and 7 TeV, which suggests scaling as $(\sqrt{s})^{0.34}$ \footnote{In the case when no particles with $p_T > 2\, {\rm GeV}$ are produced in the measured rapidity interval the multiplicity does not increase with $s$. Presumably this selection of events corresponds to collisions at very large impact parameters, where soft interactions dominate and the change with energy is the slowest.}. We note that at very small values of $x_1$ or $x_2$ the leading--twist approximation breaks down because of the onset of the black--disk regime in hard interactions, which generates a new dynamical scale in the form of the gluon density per transverse area; see Ref.~\cite{Frankfurt:2003td} for an estimate of the relevant values of $x_{1, 2}$ and $p_T$. \section{Rapidity dependence as a test of universality} \label{sec:rapidity} The basic idea of our approach is that hard processes influence the event characteristics by selecting $pp$ collisions with small impact parameters. Further interesting tests of these geometric correlations can be performed by measuring the dependence of event characteristics on the rapidity of the jets in the trigger. In production of dijets at non-zero rapidity only part of the center--of--mass energy of the colliding partons is converted to transverse energy, allowing one to probe larger momentum fractions $x_{1,2}$ at the same $p_T$. For jets with symmetric rapidities $y_1 = -y_2 \equiv y$, \begin{equation} x_1 \; = \; x_2 \; = \; (2 p_T \cosh y) /\sqrt{s} . \label{x_1_2_y} \end{equation} Because partons with larger $x_{1,2}$ sit at smaller transverse distances, the average impact parameters in $pp$ collisions with a dijet trigger decrease with increasing $y$; however, the effect is small (see Fig.\ref{fig:medp2}). Observing the approximate $y$--independence of the transverse multiplicity would test that the selection of central collisions does not depend on the details of the hard process. Beyond that, we predict a small increase in the transverse multiplicity if $y$ is increased away from zero at fixed $p_T$. In particular, such measurements could separate the effects of the $x$-- and $Q^2$--dependence of the transverse distribution of partons on the average impact parameters. At lower $p_T$, the dependence of the transverse multiplicity on the rapidity would help to distinguish between the minijet and soft mechanisms of hadron production, as minijets are much more centered at small rapidities, while typical soft multi--ladder--type interactions lead to correlations over large rapidity intervals. An additional advantage of measurements with the $y \neq 0$ trigger is that the difference between the transverse, forward, and away--side regions of particles produced at mid--rapidity is much smaller than for the $y = 0$ trigger. The selection of central $pp$ impact parameters by hard parton--parton processes could in principle be verified not only through the transverse multiplicity, but also by measuring event characteristics in \textit{rapidity regions} which are not directly affected by the jet fragmentation of the partons in the trigger process. The identification of the jet fragmentation regions requires detailed modeling beyond the scope of the present investigation. Assuming that one could reliably remove the fragmentation regions, several types of interesting correlation measurements become possible. First, we predict that in the remaining rapidity region the multiplicity should become, on average, isotropic in the transverse direction, \textit{i.e.}, it should attain the value of the previously considered transverse multiplicity in all directions and be substantially higher than in minimum bias events. Second, this multiplicity should not depend on the rapidity of the trigger, $y$, if $p_T > p_{T, {\rm crit}}$. Both measurements would directly attest to the universality of particle production in central $pp$ collisions. The present LHC experiments use the central detectors to study the underlying event structure in the production of high--$p_T$ particles in the pseudorapidity interval $\eta = \pm 2.5$ (corresponding roughly to the same range in rapidity proper, $y$); the measurements may be extended to $-5 \le \eta \le 5$ (CMS) and $-4.9 \le \eta \le 4.9$ (ATLAS) using forward detectors. In production of two jets at $y_1 = -y_2 = y \approx 2$, assuming a rapidity interval of approximately $\pm 0.7$ for the fragmentation of either jet, the rapidity region $\pm 1$ should be free of direct jet fragments and could be used for the envisaged multiplicity measurements. Alternatively, one may consider a pair of jets at the same positive rapidity, $y_1 = y_2 \equiv \bar y > 0$, and study the multiplicity in the negative rapidity region as a function of $\bar y$. The latter choice would have the advantage that the parton momentum fractions $x_{1, 2}$ change in different directions when increasing $\bar y$ from zero, compensating the effect on the width of the impact parameter distribution to first order. An interesting phenomenon should occur when extending such measurements with symmetric jets at $y_1 = -y_2 = y$ to larger rapidities. As discussed in Sec.~\ref{sec:parton}, the transverse size of the parton distribution decreases with increasing $x$. This leads to a seemingly paradoxical prediction, that the larger the rapidity interval $y_1 - y_2 = 2y$ between the jets, the larger the multiplicity in the mid--rapidity region. In other words, one expects long--range correlations in rapidity which are becoming stronger with increase of the rapidity interval. However, the effect is rather small; at $p_T = 5 \, \textrm{GeV}$ the median $pp$ impact parameter changes from $0.66 \, \textrm{fm}$ at $y = 0$ to $6.1 \, (0.55) \, \textrm{fm}$ at $y = 2.5 \, (5)$. Therefore the ability to measure over a wide range of pseudorapidities $|\eta| < 5$ would be very helpful for studying this effect. Deviations from the predicted universality can arise as a result of spatial correlations between partons involved in the hard collisions and those participating in spectator interactions \cite{Frankfurt:2004kn}. At the relatively small $x$--values probed with the central detectors at LHC ($x < 10^{-2}$), such correlations are likely to depend weakly on $x$ and would not significantly affect the rapidity dependence. In principle, the study of these deviations from universality may provide a new window on correlations in the nucleon's partonic wave function. \section{Suggestions for future measurements} \label{sec:future} In addition to the studies described in Secs.~\ref{sec:multiplicity} and \ref{sec:rapidity}, several other kinds of measurements could further explore the proposed connection between hard processes and the transverse multiplicity, or use it to investigate interesting aspects of QCD and nucleon structure. \textit{Energy dependence of transverse multiplicity.} Measurement of the energy dependence of the transverse multiplicity in jet events would, in effect, reveal the energy dependence of the average multiplicity in central $pp$ collisions, which is of interest beyond the specific applications considered here. In order to avoid change of the average impact parameters due to the $x$--dependence of the transverse distribution of partons, it would be desirable to compare data at the same values of $x_{1,2}$. When increasing the collision energy from $\surd s_0$ to $\surd s$ one thus needs to trigger on jets with rapidities scaled by $(1/2)\ln(s/s_0)$. \textit{Double dijet trigger.} Further reduction of the effective impact parameters in $pp$ collisions can be achieved with a trigger on multiple hard processes \cite{Frankfurt:2003td}. In the absence of transverse correlation between partons, the effective impact parameter distribution of events with two dijets would be given by the (properly normalized) product of distributions $P_2$ in Eq.~(\ref{P_2_def}). In the simplest case of two dijets with $x_1 = x_2 = x_3 = x_4$, the median $b$ in such events would be $1.18 \, \surd B_g \; (1.97 \, m_g^{-1})$, to be compared to $1.67 \, \surd B_g \;(3.08 \, m_g^{-1})$ for single jet events. Account of transverse correlations between partons reduces the difference in effective impact parameters by about half \cite{Frankfurt:2003td}. In all, we expect a $15-20\, \%$ reduction of the median $b$ with a double dijet trigger, which should manifest itself in a further increase of the transverse multiplicity compared to single dijet events. \textit{Other centrality triggers.} Knowledge of the dependence of the transverse multiplicity on $b$ would allow one to calibrate other triggers on central $pp$ collisions. In particular, it would be interesting explore triggers related to particle production in the nucleon fragmentation regions, for example leading neutrons. Ultimately one aims here at designing a trigger on ultra--central $pp$ collisions, in which the effective gluon densities would be comparable to those reached in heavy--ion collisions \cite{Drescher:2008zz}. \textit{Quark vs.\ gluon--induced jets.} It would be interesting to compare the transverse multiplicities and other underlying event characteristics for quark--antiquark induced hard processes like $W^{\pm}, Z^0$ production and gluon--gluon induced processes. Another possibility would be to consider large--$|\eta|$ dijet production and separate quark-- and gluon induced jets using the different jet shapes. This would allow one to observe a possible difference between the transverse distributions of quarks and gluons, complementing studies of hard exclusive processes in $ep/\gamma p$ scattering. \textit{Deconvolution of impact parameter dependence.} If the fluctuations of the multiplicity between events at a given impact parameter are not too large, one can attempt a program of deconvolution of the impact parameter dependence of the multiplicity, using information on the impact parameter dependence of dijet and 4--jet rates \cite{Rogers:2009ke}. \section{Summary and discussion} \label{sec:summary} The transverse multiplicity in $pp$ collisions with a jet trigger provides an effective way to determine the rate of processes initiated by hard parton--parton interactions down to rather small transverse momenta. Further analysis of the data can then establish whether the observed particle production rates are consistent with perturbative QCD predictions. Studies of the dependence of the transverse multiplicity on the transverse momenta and rapidities of the jets, and on the collision energy, can provide a better understanding of the impact parameter dependence of the underlying event characteristics and allow one to refine the use of the transverse multiplicity as an indicator of the dynamics in particle production. The results of the present study have implications also for the use of hard processes in new particle searches. In the production of a Higgs boson with a mass $M_H \sim 100 \, \text{GeV}$ the parton momentum fractions $x_{1, 2}$ are the same as in dijet events at zero rapidity with $p_T = M_H/2 = 50 \, \text{GeV}$. The two types of events thus involve the same average $pp$ impact parameters. This allows one to describe the background to new particle production processes much more accurately than on the basis of minimum--bias event characteristics. Our analysis relies crucially on information about the transverse spatial distribution of gluons from exclusive $J/\psi$ photo/electroproduction and similar processes. While the region $x < 0.01$ has been covered by HERA, no data of comparable precision are available at larger $x$ (see Sec.~\ref{sec:parton}). This is unfortunate, as the production of new particle with masses $\sim 10^2 \, \text{GeV}$ requires partonic collisions at precisely such momentum fractions. The region $x > 0.01$ is also particularly interesting for nucleon structure studies \cite{Strikman:2003gz,Strikman:2009bd}. The $J/\psi$ results expected from the COMPASS experiment, as well as measurements with a future Electron--Ion Collider (EIC), thus would have a major impact on both areas of study. \begin{acknowledgments} We thank J.~D.~Bjorken for inspiring conversations on related aspects of $pp$ collisions, and P.~Bartalini, E.~Nurse, P.~Skands, and B.~Webber for very useful discussions of current MC models and the LHC data. Two of us (LF and MS) would like to thank the Yukawa International Program for Quark--Hadron Sciences for hospitality during the initial stage of this study. MS also acknowledges the hospitality of the CERN Theory Institute ``The first heavy--ion collisions at the LHC'' during part of the work on this project. This research was supported by the United States Department of Energy and the Binational Science Foundation. Notice: Authored by Jefferson Science Associates, LLC under U.S.\ DOE Contract No.~DE-AC05-06OR23177. The U.S.\ Government retains a non--exclusive, paid--up, irrevocable, world--wide license to publish or reproduce this manuscript for U.S.\ Government purposes. \end{acknowledgments}
{ "redpajama_set_name": "RedPajamaArXiv" }
1,155
\section{Introduction} With large scale sky survey telescopes, e.g. SDSS, LAMOST etc., spectra are acquired at PB or even TB level per minutes. Confronting high volume of spectra, classification is the first step for astronomers to carry out research on various types of objects on the sky. Currently, both SDSS and LAMOST data release use model matching method to divide the spectra into star, galaxy, quasar and unknown categories. More specifically, in SDSS, stars and galaxy are further classified into subclass. As such survey goes along, astronomers will get much more spectra data. Meanwhile, automated classification of spectra into subclass with less human intervention are highly in need. \section{Classification of Galaxy Spectra} In this paper, we focus on the classification of galaxy spectra as automatically classifying emission line galaxies is complex and it believed to be meaningful for understanding galaxy formation and evolution. Machine learning approach has been applied to enable automated classification of galaxy spectra. \citet{2002SPIE.4847..371Z} \citet{2015MNRAS.451..629S,2015MNRAS.453..122S} applied Support Vector Machine (SVM) and Artificial Neural Networks (ANN) to a series of line ratio features of galaxy spectra provided by MPA/JHU, and classify galaxies as AGN, composite or SFG. Emission line galaxies are usually using Baldwin-Phillips-Terlevich (BPT) diagram, the features of which are line ratios. These works acquire highly acceptable classification accuracy but the problem is measure the emission-line ratios from original spectra needs to be done accurately by astronomers in advance. Therefore, we are interested in automated extraction of features from original spectra. Specifically, in this poster paper, we address the following research questions:How can classification of AGNs be done through automated machine learning algorithms using cloud computing platform and services? How does each algorithms perform in galaxy spectra classification? \section{Data} \label{sec:data} The dataset for this experiment consists of over 10000 galaxy spectra from SDSS DR14. These spectra were labeled according to their subclass field. AGN were coded as 1, and the rest types including STARFORMING, STARBURST and No Tags were coded as 0. Eighty percent of the dataset were split as training set and the rest were used as test set. To get the common window of rest-frame spectra and retain the majority of emission lines, the spectra wavelength were truncated to 2642 pixels from 3700-6800 $\overset{\circ}{A}$. Feature selection is an important aspect of machine learning. As the spectra usually contains thousands of pixels, dimension reduction based on appropriate feature representation is a valuable research question. Principle Components Analysis (PCA) is a technique to extract feature and compress the spectra, which is commonly used in spectra classification \citep{1998MNRAS.298..361B,1998MNRAS.295..312S,2010AJ....139.1261M,2013hell.confQ..24K,2014IAUS..306..301B}. PCA with 20 components were applied to the original flux and used as features in classification. \section{Experiments on PAI} Alibaba PAI is a platform designed for artificial intelligence which incorporate a series of data preprocessing, feature engineering and commonly used machine learning algorithms. In this experiment, we employed 3 commonly used algorithms, namely logistic regression, random forest and linear support vector machine (SVM), to explore their effectiveness in automated classification of galaxy spectra on a selected SDSS dataset as described in section \ref{sec:data}. \articlefigure{Experiment_Flow.pdf}{fig:Experiment_Flow}{The overall process of the experiment on PAI} The overall flow of the experiment on PAI is shown in Figure \ref{fig:Experiment_Flow}. The experiment start from generating 10000 random samples from around 10600 pre-processed spectra data. \section{Results and Discussion} In this section, we present the results of the experiment and compare the performance of the three different algorithms in terms of precision, recall and F1-score. The efficiency of algorithms is not presented as the execution time for each algorithms are almost the same. This might because that our dataset is relatively small and PAI is powerful to produce results in a few minutes. In the following, we discuss the classification accuracy and feature importance in more detail. \subsection{Classification Results} As Table \ref{tab:results} shows, all three classification algorithms (i.e linear regression, linear SVM and random forest) with 20 components PCA reached higher than 92\% on precision and recall. More specifically, the three methods have nearly the same F1-score. This might because that the limitation of classification algorithms lies in the feature representation, i.e. linear PCA. \begin{table}[!h] \caption{Comparison of classification results} \label{tab:results} \smallskip \begin{center} {\small \scalebox{0.7}{ \begin{tabular}{lccc} \tableline \noalign{\smallskip} & Linear Regression/ with feature elimination & Linear SVM/ with feature elimination & Random Forest/ with feature elimination \\ \noalign{\smallskip} \tableline \noalign{\smallskip} Precision & 0.9293/ 0.9293 & 0.9227/0.9289 & 0.9215/0.9255 \\ Recall & 0.9283/ 0.9283 & 0.9212/0.9272 &0.9215/0.9255\\ F1-score & 0.9284/ 0.9284 & 0.9214/0.9274 &0.9215/0.9255 \\ \noalign{\smallskip} \tableline \end{tabular} }} \end{center} \end{table} \subsection{Feature Importance} \articlefiguretwo{Importance_entropy.png}{Importance_gini.png}{Feature_Importance}{Feature importance based on entropy} In this experiment, one of the algorithm we adopted is random forest, an ensembled decision tree algorithm to classify the galaxy spectra. During the training, the random forest algorithm also calculated feature importance. As shown in Figure \ref{Feature_Importance} By selecting the top 10 important features (feature elimination), we acquire even better precision and recall score. This indicate that we could achieve better results with most important features extracted. \section{Conclusions and Future Work} In this experiment, we simply apply PCA with 20 components and it acquires over 92\% accuracy in classification of AGNS. However, the spectra can not be present well simply in linear combination of spectral features. A better extraction and representation of features might lead to better classification results. The future plan is to investigate other ways, such as deep neural networks (auto encoder, RBM etc.), to extract features and comparing with the results of using PCA. Also, an limitation of PAI is that it does not support random search or grid search functions, we adjust the hyper parameters manually and by experience. In the future, we should perform a complete search in the parameter space. \acknowledgements This work is supported by the Young Researcher Grant of National Astronomical Observatories, Chinese Academy of science, National Natural Science Foundation of China (NSFC)(11503051, 61402325) and the Joint Research Fund in Astronomy (U1531111, U1531115, U1531246, U1731125, U1731243) under cooperative agreement between the NSFC and Chinese Academy of Sciences (CAS). Data resources are supported by Chinese Astronomical Data Center (CADC) and Chinese Virtual Observatory (China-VO).
{ "redpajama_set_name": "RedPajamaArXiv" }
2,470
{"url":"https:\/\/stats.stackexchange.com\/questions\/371575\/is-ratio-of-ranks-a-well-studied-statistic","text":"# Is ratio of ranks a well-studied statistic?\n\nI have two lists of recommendations: that is, two different algorithms have assigned ranks to the same list of objects. I would like to know if they're similar. I specifically care more about recommendations near the top of the list than near the bottom, so e.g. it matters a lot more that item 1 is the same in both lists than that item 427 is the same in both lists. The standard methods I know about (Kendall, Spearman, etc.) all assume that all rankings are equally meaningful, which is just not true.\n\nThe most meaningful transformation that comes to mind is taking the ratio of ranks: we can then say e.g. that the two rankings are similar if $$0.9 \\cdot \\text{Rank}_1 < \\text{Rank}_2 < 1.1 \\cdot \\text{Rank}_1$$ always (or typically). Is this a well-studied transformation? Can someone suggest statistical properties it would have?\n\n\u2022 $Rank_2>0$ and $\\alpha>1$, then $Rank_2<\\alpha Rank_2$ trivially. Do you mean $Rank_2<\\alpha Rank_1$ ? \u2013\u00a0Acccumulation Oct 12 '18 at 23:27\n\u2022 I corrected the typo, please check \u2013\u00a0kjetil b halvorsen Oct 14 '18 at 12:24\n\nSome ideas, not really a complete answer. Let $$n$$ be the number of objects, objects indexed by $$i$$. I denote the two sets of rankings by $$R_1, R_2$$, the ranks of object $$i$$ is then $$R_{1i}, R_{2i}$$. I will redefine your condition in a more symmetrical way (note that there is a misprint in your inequality, can you correct?): $$\\alpha^{-1} \\le \\frac{R_1}{R_2}\\le \\alpha$$ for some $$\\alpha > 1$$. This is equivalent to $$\\alpha^{-1} \\le \\frac{R_2}{R_1}\\le \\alpha$$, showing symmetry.\n\nThen you can fix some $$\\alpha$$, for instance your value $$\\alpha=1.1$$ and count up the number of times the condition holds. That is still not a metric, for full equality it equals $$n$$, so instead you can subtract it from $$n$$. Maybe that works, you would have to experiment. It is symmetric and zero for full equality, to be a metric you will have to check the triangle inequality. Anyhow this will give more weight to the higher rankings (assuming you assign ranks from 1 upwards to $$n$$ for the best).\n\nBut there are many possibilities. For each $$i$$, we can find the smallest value of $$\\alpha$$ so that the inequality holds (and then subtract 1), define that as $$\\alpha_i = \\max\\{\\frac{R_{1i}}{R_{2i}}, \\frac{R_{2i}}{R_{1i}} \\}-1$$ The smallest possible value of $$\\alpha_i$$ is zero, so for full equality the sum of the $$\\alpha_i$$'s is zero. But, we would probably expect this to be dominated by the lower rankings, so to give larger weights to the upper end, start ranking from the top, so best is ranked 1. Again you would need to experiment.\n\nGenerally, rankings can be seen as permutations, so you could look into metrics on permutations. One good account is in chaper 6 of Persi Diaconis: Group Representations in Probability and Statistics. But, as many such metrics cannot be written as a sum over $$i$$, it is not obvious how to make weighted versions. But Spearman's Rank correlation is one such metric, and it should be possible to make a weighted version of that.\n\nThere is many posts here about comparisons of rankings, you could look through this list.\n\nEDIT\n\n\nSpearman's rank correlation can be seen as Pearson correlation on the ranks of the data, so can be calculated as such. The Pearson correlation (and certainly other correlations) can be generalized to weighted correlation, see Wikipedia. Then you only need to decide on which weight function to use.\n\nFor example, say you have ranked $$n$$ objects and are interested mostly in the top $$k$$ rankings. Then you could define $$w_i =\\begin{cases} 1, & \\max\\{R_{1i},R_{2i}\\}\\ge n-k+1 \\\\ 0 & \\text{other cases} \\end{cases}$$ or maybe some smoother version.\n\nAnd yes, this is certainly not a new problem. R has the function cov.wt which you can use. And Wikipedia: Spearman correlation has the phrase ... should also not be used in cases where the data set is truncated; that is, when the Spearman correlation coefficient is desired for the top X records (whether by pre-change rank or post-change rank, or both), ... showing prior interest! The following papers should interest you: Weighted rank correlation and more sensitive to agreements in top rankings.\n\n\u2022 Isn't it equivalent to $\\alpha \\leq \\frac {R_2}{R_1} \\leq \\alpha ^{-1}$? \u2013\u00a0Acccumulation Oct 12 '18 at 23:19\n\u2022 Well, I said $\\alpha > 1$, so your inequality seems impossible ... \u2013\u00a0kjetil b halvorsen Oct 12 '18 at 23:21\n\u2022 In what condition is my inequality impossible, but $\\alpha ^{-1} \\leq \\frac {R_1}{R_2} \\leq \\alpha$ is possible? Come to think of it, what does $\\frac {R_1}{R_2}$ even mean? \u2013\u00a0Acccumulation Oct 12 '18 at 23:26\n\u2022 The indices should be $1i$ etc, the value of rankings for object $i$. \u2013\u00a0kjetil b halvorsen Oct 12 '18 at 23:33\n\u2022 The challenge I have with most approaches to comparing rankings is that I'm not interested in \"do the two criteria order the items differently\" so much as in \"will they produce more or less the same top recommendations\". While I can't be alone in being interested in that as a criterion, it doesn't seem to be a common thing in statistics. In particular, the permutation approach is almost certainly not the right direction, because I pointedly don't want to have a metric that would be invariant under passing both rankings through the same permutation. \u2013\u00a0Elena Yudovina Oct 14 '18 at 1:29\n\nI can't comment on the ratio of ranks, but I do know of a method that could help you with your initial problem. The irreproducible discovery rate (IDR) is a statistical method originally created to deal with genomics data. The input is a two different rankings of the same set of objects. In the original case, these were signal peaks from DNA sequencing experiments, but you could put in any two lists of ranks. The algorithm posits two underlying distributions: a correlated one to describe the shared signals at the top of the list and an uncorrelated one to describe the remaining noise. If you see lots of items with low IDR, it indicates there is a substantial correlated component at the top of the list, so maybe you could try this to answer your question.\n\n\u2022 Thank you! This definitely looks promising, I haven't read the paper in enough details yet to say if it's exactly what I need \u2013\u00a0Elena Yudovina Oct 12 '18 at 21:41","date":"2019-12-11 13:51:14","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 23, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.8045209050178528, \"perplexity\": 401.3061586946939}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2019-51\/segments\/1575540531917.10\/warc\/CC-MAIN-20191211131640-20191211155640-00534.warc.gz\"}"}
null
null
Q: LaTeX: Inconsistent colors in PDF I am using tcolorboxes with grey frames for examples in my PhD thesis. It will be printed professionally and the sample print I received shows inconsistent colors. The tcolorboxes are created identically with a grey frame \definecolor{framegrey}{cmyk}{0,0,0,0.15}. However when inspecting the PDF with Acrobat Professional I see that on some pages the grey frame is defined with the correct cmyk color (0,0,0,0.15), on other pages it is set with a different cmyk color (0.09,0.07,0.07,0)! These pages tend to have colorful pixel graphics inside the tcolorbox. The framecolors looks identical on the screen, the resulting colors in print are different! Does anybody know how to get consistent colors from LaTeX? * *\usepackage[cmyk]{xcolor} does not solve the problem *colframe=white!85!black results in similar problems I cannot give a minimal working example at the moment, as I don't know what exactly is producing this behaviour. The two colors look very similar. Additional information I found out, that compiling the document on Fedora 30 results in the above problem, while compiling it on Debian Stretch yields correctly defined framegrey in CMYK. I did check the tikz version as suggested in the comments, but both systems have version 3.0.1a installed (< version 3.1.3). Here you find a list of all differences in the tex environment on both systems: --- Fedora 30 +++ Debian Stretch [...] xcolor.sty 2016/05/11 v2.12 LaTeX color extensions (UK) color.cfg 2016/01/02 v1.6 sample color configuration - pdftex.def 2018/01/08 v1.0l Graphics/color driver for pdftex + pdftex.def 2017/01/12 v0.06k Graphics/color for pdfTeX +infwarerr.sty 2016/05/16 v1.4 Providing info/warning/error messages (HO) + ltxcmds.sty 2016/05/16 v1.23 LaTeX kernel commands for general use (HO) [...] -graphicx.sty 2017/06/01 v1.1a Enhanced LaTeX Graphics (DPC,SPQR) +graphicx.sty 2014/10/28 v1.0g Enhanced LaTeX Graphics (DPC,SPQR) -graphics.sty 2017/06/25 v1.2c Standard LaTeX Graphics (DPC,SPQR) +graphics.sty 2016/10/09 v1.0u Standard LaTeX Graphics (DPC,SPQR) [...] -fancyhdr.sty 2017/06/30 v3.9a Extensive control of page headers and footers -multicol.sty 2018/04/20 v1.8s multicolumn formatting (FMi) +fancyhdr.sty 2016/09/06 3.8 Extensive control of page headers and footers +multicol.sty 2016/04/07 v1.8p multicolumn formatting (FMi) [...] -inputenc.sty 2018/04/06 v1.3b Input encoding file -microtype.sty 2018/01/14 v2.7a Micro-typographical refinements (RS) -microtype-pdftex.def 2018/01/14 v2.7a Definitions specific to pdftex (RS) -microtype.cfg 2018/01/14 v2.7a microtype main configuration file (RS) -mathtools.sty 2018/01/08 v1.21 mathematical typesetting tools - calc.sty 2017/05/25 v4.3 Infix arithmetic (KKT,FJ) - mhsetup.sty 2017/03/31 v1.3 programming setup (MH) +inputenc.sty 2015/03/17 v1.2c Input encoding file + utf8.def 2017/01/28 v1.1t UTF-8 support for inputenc + t1enc.dfu 2017/01/28 v1.1t UTF-8 support for inputenc + ot1enc.dfu 2017/01/28 v1.1t UTF-8 support for inputenc + omsenc.dfu 2017/01/28 v1.1t UTF-8 support for inputenc +microtype.sty 2016/05/14 v2.6a Micro-typographical refinements (RS) +microtype-pdftex.def 2016/05/14 v2.6a Definitions specific to pdftex (RS) +microtype.cfg 2016/05/14 v2.6a microtype main configuration file (RS) +mathtools.sty 2015/11/12 v1.18 mathematical typesetting tools + calc.sty 2014/10/28 v4.3 Infix arithmetic (KKT,FJ) + mhsetup.sty 2010/01/21 v1.2a programming setup (MH) [...] -pgfplots.sty 2018/03/28 v1.16 Data Visualization (1.16) +pgfplots.sty 2016/08/10 v1.14 Data Visualization (1.14) tikz.sty 2015/08/07 v3.0.1a (rcs-revision 1.151) pgf.sty 2015/08/07 v3.0.1a (rcs-revision 1.15) pgfrcs.sty 2015/08/07 v3.0.1a (rcs-revision 1.31) [...] -pdftexcmds.sty 2018/01/30 v0.27 Utility functions of pdfTeX for LuaTeX (HO) -infwarerr.sty 2016/05/16 v1.4 Providing info/warning/error messages (HO) - ltxcmds.sty 2016/05/16 v1.23 LaTeX kernel commands for general use (HO) - ifpdf.sty 2017/03/15 v3.2 Provides the ifpdf switch +pdftexcmds.sty 2016/05/21 v0.22 Utility functions of pdfTeX for LuaTeX (HO) + ifpdf.sty 2016/05/14 v3.1 Provides the ifpdf switch [...] -pgfplotstable.sty 2018/03/28 v1.16 Table typesetting and Pretty-printing (1. -16) - array.sty 2018/04/30 v2.4h Tabular extension package (FMi) +pgfplotstable.sty 2016/08/10 v1.14 Table typesetting and Pretty-printing (1. +14) + array.sty 2016/10/06 v2.4d Tabular extension package (FMi) [...] - caption.sty 2018/05/01 v3.3-147 Customizing captions (AR) -caption3.sty 2018/05/27 v1.8a caption3 kernel (AR) + caption.sty 2016/02/21 v3.3-144 Customizing captions (AR) +caption3.sty 2016/05/22 v1.7-166 caption3 kernel (AR) [...] -algorithm2e.sty 2017/07/18 v5.2 algorithms environments -ifoddpage.sty 2016/04/23 v1.1 Conditionals for odd/even page detection +algorithm2e.sty 2013/01/06 v5.00 algorithms environments [...] -tcolorbox.sty 2018/07/26 version 4.14 text color boxes +tcolorbox.sty 2016/11/18 version 3.96 text color boxes [...] -etoolbox.sty 2018/02/11 v2.5e e-TeX tools for LaTeX (JAW) - xparse.sty 2018-05-12 L3 Experimental document command parser - expl3.sty 2018-06-14 L3 programming layer (loader) -expl3-code.tex 2018-06-14 L3 programming layer -l3pdfmode.def 2018-06-14 v L3 Experimental driver: PDF mode +etoolbox.sty 2017/01/02 v2.4 e-TeX tools for LaTeX (JAW) + xparse.sty 2016/11/21 v6760 L3 Experimental document command parser + expl3.sty 2016/11/21 v6760 L3 programming layer (loader) +expl3-code.tex 2016/11/21 v6760 L3 programming layer +l3pdfmode.def 2016/08/18 v6679 L3 Experimental driver: PDF mode [...] - siunitx.sty 2018/05/17 v2.7s A comprehensive (SI) units package -l3keys2e.sty 2018-05-12 LaTeX2e option processing using LaTeX3 keys -translator.sty 2018/01/04 v1.12 Easy translation of strings in LaTeX -hyperref.sty 2018/02/06 v6.86b Hypertext links for LaTeX + siunitx.sty 2017/01/01 v2.7a A comprehensive (SI) units package +l3keys2e.sty 2016/11/21 v6760 LaTeX2e option processing using LaTeX3 keys +translator.sty 2010/06/12 ver 1.10 +translator-language-mappings.tex +hyperref.sty 2016/06/24 v6.83q Hypertext links for LaTeX [...] - pd1enc.def 2018/02/06 v6.86b Hyperref: PDFDocEncoding definition (HO) + pd1enc.def 2016/06/24 v6.83q Hyperref: PDFDocEncoding definition (HO) [...] - hpdftex.def 2018/02/06 v6.86b Hyperref driver for pdfTeX + hpdftex.def 2016/06/24 v6.83q Hyperref driver for pdfTeX [...] - ts1enc.dfu 2018/04/05 v1.2c UTF-8 support for inputenc + ts1enc.dfu 2017/01/28 v1.1t UTF-8 support for inputenc [...] -siunitx-abbreviations.cfg 2017/11/26 v2.7k siunitx: Abbreviated units +siunitx-abbreviations.cfg 2017/01/01 v2.7a siunitx: Abbreviated units A: This question can be divided in sub-questions: * *How do I create a document for professional printing? *How do I eliminate the differences between compiling a document in Fedora30 and Debian Stretch? As a preferred Windows user I can only directly answer #1: You should create a document that directly complies to one of the PDF/X standards. As long as you want one document for the archives/libraries and for print, this standard should be PDF/X-4 and the base colour model should be CMYK. (PFD/X-1 is paper-only, PDF/X-3 is the outdated version of PDF/X-4.) A minimal example of PDF/X you can find in this answer. Minimal working examples (MWE) take a while to create but they are incredibly useful. Does the problem persist when you compile the MWE fom above augmented by colour? In MikTeX with normal coloured text I do not see this behaviour. I see your packages are outdated. Update your distribution to the latest version. You do not specify how in Acrobat you read the colour values. There is no colour picker in Acrobat. The correct way to check is with the Output Preview of the Print Production tool. Select the OutputIntent as your simulation profile and choose only Process Color. Is your text visible? If yes, it's OK. An alternative way is to set \pdfobjcompresslevel=0, \pdfcompresslevel=0 and edit the file in a text editor. You should find the values 0 0 0 .15 if the colours were correctly written to PDF.
{ "redpajama_set_name": "RedPajamaStackExchange" }
2,358
Caso Miguel ou Caso menino Miguel pode referir-se a: Caso Miguel Otávio Santana da Silva (Recife) Caso Miguel dos Santos Rodrigues (Imbé)
{ "redpajama_set_name": "RedPajamaWikipedia" }
4,720
Robert Dudley, Earl of Leicester's original design for the tracery moulding frieze above the north entrance to the 16th century Kenilworth Castle, for his great secret love, Elizabeth Rex, alas, never used... Encrusted now with Swarovski crystal hearts. The frozen and metalised spine of a monstrous, vanquished predator. A pewter, highly detailed serpent-dragon in a figure eight, on a fine, black faux- leather split bracelet strap, a Swarovski crystal rivet either side and adjustable with a chain and spring catch. A fine, black faux- leather bracelet strap, adjustable with a chain and spring catch, and creeping along is is a black pewter cat with red Swarovski crystal eyes. Seeing into the past, the present and the future with the unsettling guidance of the spirit board. Early scientific instrument, for controlling the wavelength of moonlight emissions. This lordly, iconic torque bangle must be that of a Scandinavian or Saxon pagan noble, won, almost inevitably, by force. The frenzied roiling of the ravening spiniferous creature hi-lights the peril of harbouring such chaotic companions. A talismanic magic tablet device for assisting in the achievement of the the alchemist's magnum opus, the 'great work', incorporating sigils of the principle constituents required for alchemical transmutation; sun, moon, spirit, salt, sulphur and mercury, and a Solomon's Seal formed by the unity of the four elements engraved upon its natural black onyx focal disc.
{ "redpajama_set_name": "RedPajamaC4" }
8,045
This lovely Easter Garden Basket is the perfect way to express your best wishes for this Easter holiday! Filled with lovely springtime flowers, this arrangement will fill their home with the virbant colors and beauty of the season! Flowers, container and accessories may vary due to season and availability.
{ "redpajama_set_name": "RedPajamaC4" }
3,485
What age do you have to be to go to Kylemore Karting? 1 What age do you have to be to go to Kylemore Karting? 1.1 What is the fastest go kart place? 1.1.1 How fast do indoor go karts go? 2 How much does a karting car cost? 2.1 Where is the biggest go-kart track in the US? 2.1.1 Can you go-kart drunk? At Kylemore Karting, we make customer safety our number one priority. Our fully trained race team, industry leading karts and equipment and clever track designs allow us offer a safe and enjoyable karting experience for kids 9 years+ and over 137cm. How much is GP Karting? It is located off Langata road just after Carnivore grounds. It is easily accessible via public means. Get a matatu/bus from town heading to Langata for Kshs. 50 and alight at the Carnivore stage….GP-Karting Langata. Kshs.900 13 – 16 Years Kshs.1,100 17 – Adults Kshs. 1,300 What is the fastest go kart place? The pulse-pounding experience located at Las Vegas Motor Speedway just 15 minutes from The Strip features a new fleet of 4-stroke, gas-powered Sodikart SR5 270 CCs and the longest, fastest and widest outdoor go-kart track in Las Vegas. How tall do u have to be to go cart? Minimum Height : 125cm Whether you are looking to race for fun, arrange a kids party, have private tuition or even pursue a career in Motorsport, we have kids go karting packages to suit all occasions. How fast do indoor go karts go? Most karts max out around 40-50mph, while this is a maintained speed that is safe and as controlled as you can be. It's a speed that is comfortable in the sense of you wont fly off the track, and hurt other racers or patrons. What do you need for karting? Other than that, you just need your helmet, suit and boots – and obviously your kart, tyres, fuel, engine. If you start going to a few different tracks, you might want to buy a new set of tyres, or start changing gear ratios. How much does a karting car cost? The average go-kart costs between $1,500 to $2,500. However, you'll need to understand that there are many types of go-karts and each go-kart type has a different price range. For example, a pedal go-kart for a kid can cost as low as $150, whereas a professional high-end adult racing go-kart can cost up to $10,000. Is a Go Kart a car? And What's a Go Kart? Go karting is the sport of driving go karts. Go karts are fundamentally a small-scale four-wheeled vehicle, not too dissimilar from a car. In fact, go karts share many of the same characteristics of a car including: a steering wheel, tires, brakes, an engine/motor, and gas/brake pedals. Where is the biggest go-kart track in the US? Kart Kountry (Sheperdsvile, Kentucky): Home to the largest go-kart track in the country, five sizes of karts whiz around five different outdoor racing courses. How fast do Vegas superkarts go? Vegas Superkarts' 4-stroke engines can get up to speeds of more than 40 mph on a 2,100-foot outdoor track, according to a news release Tuesday. Drivers must be 14 or older and 55 inches tall; riders (some models have two seats) must be at least 42 inches tall. Can you go-kart drunk? While you certainly can pretty much do anything while drunk, the answer is no! You shouldn't go go-karting when you're drunk. There are many things that can go wrong while under the influence of alcohol and in worst case can cause serious injury or even death to you and others. What is the weight limit for a go-kart? A minimum height of 52 inches is required for adult karts. A minimum age of 10 is required for adult karts. A maximum weight of 450 lbs. to race on adult track. https://www.youtube.com/channel/UCuTWueFO5wcA6h7HMQpNKTA How do you unlock characters in Spiderman Friend or Foe? Who are all the CNN anchors?
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
1,355
.class public final Landroid/nfc/NfcActivityManager; .super Landroid/nfc/IAppCallback$Stub; .source "NfcActivityManager.java" # interfaces .implements Landroid/app/Application$ActivityLifecycleCallbacks; # annotations .annotation system Ldalvik/annotation/MemberClasses; value = { Landroid/nfc/NfcActivityManager$NfcActivityState;, Landroid/nfc/NfcActivityManager$NfcApplicationState; } .end annotation # static fields .field static final DBG:Ljava/lang/Boolean; .field static final TAG:Ljava/lang/String; = "NFC" # instance fields .field final mActivities:Ljava/util/List; .annotation system Ldalvik/annotation/Signature; value = { "Ljava/util/List", "<", "Landroid/nfc/NfcActivityManager$NfcActivityState;", ">;" } .end annotation .end field .field final mAdapter:Landroid/nfc/NfcAdapter; .field final mApps:Ljava/util/List; .annotation system Ldalvik/annotation/Signature; value = { "Ljava/util/List", "<", "Landroid/nfc/NfcActivityManager$NfcApplicationState;", ">;" } .end annotation .end field .field final mDefaultEvent:Landroid/nfc/NfcEvent; # direct methods .method static constructor <clinit>()V .locals 1 .prologue .line 43 const/4 v0, 0x0 invoke-static {v0}, Ljava/lang/Boolean;->valueOf(Z)Ljava/lang/Boolean; move-result-object v0 sput-object v0, Landroid/nfc/NfcActivityManager;->DBG:Ljava/lang/Boolean; return-void .end method .method public constructor <init>(Landroid/nfc/NfcAdapter;)V .locals 2 .param p1, "adapter" # Landroid/nfc/NfcAdapter; .prologue .line 196 invoke-direct {p0}, Landroid/nfc/IAppCallback$Stub;-><init>()V .line 197 iput-object p1, p0, Landroid/nfc/NfcActivityManager;->mAdapter:Landroid/nfc/NfcAdapter; .line 198 new-instance v0, Ljava/util/LinkedList; invoke-direct {v0}, Ljava/util/LinkedList;-><init>()V iput-object v0, p0, Landroid/nfc/NfcActivityManager;->mActivities:Ljava/util/List; .line 199 new-instance v0, Ljava/util/ArrayList; const/4 v1, 0x1 invoke-direct {v0, v1}, Ljava/util/ArrayList;-><init>(I)V iput-object v0, p0, Landroid/nfc/NfcActivityManager;->mApps:Ljava/util/List; .line 200 new-instance v0, Landroid/nfc/NfcEvent; iget-object v1, p0, Landroid/nfc/NfcActivityManager;->mAdapter:Landroid/nfc/NfcAdapter; invoke-direct {v0, v1}, Landroid/nfc/NfcEvent;-><init>(Landroid/nfc/NfcAdapter;)V iput-object v0, p0, Landroid/nfc/NfcActivityManager;->mDefaultEvent:Landroid/nfc/NfcEvent; .line 201 return-void .end method # virtual methods .method public createBeamShareData()Landroid/nfc/BeamShareData; .locals 15 .prologue const/4 v12, 0x0 .line 332 monitor-enter p0 .line 333 :try_start_0 invoke-virtual {p0}, Landroid/nfc/NfcActivityManager;->findResumedActivityState()Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v8 .line 334 .local v8, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; if-nez v8, :cond_0 monitor-exit p0 .line 371 :goto_0 return-object v12 .line 336 :cond_0 iget-object v6, v8, Landroid/nfc/NfcActivityManager$NfcActivityState;->ndefMessageCallback:Landroid/nfc/NfcAdapter$CreateNdefMessageCallback; .line 337 .local v6, "ndefCallback":Landroid/nfc/NfcAdapter$CreateNdefMessageCallback; iget-object v11, v8, Landroid/nfc/NfcActivityManager$NfcActivityState;->uriCallback:Landroid/nfc/NfcAdapter$CreateBeamUrisCallback; .line 338 .local v11, "urisCallback":Landroid/nfc/NfcAdapter$CreateBeamUrisCallback; iget-object v5, v8, Landroid/nfc/NfcActivityManager$NfcActivityState;->ndefMessage:Landroid/nfc/NdefMessage; .line 339 .local v5, "message":Landroid/nfc/NdefMessage; iget-object v10, v8, Landroid/nfc/NfcActivityManager$NfcActivityState;->uris:[Landroid/net/Uri; .line 340 .local v10, "uris":[Landroid/net/Uri; iget v2, v8, Landroid/nfc/NfcActivityManager$NfcActivityState;->flags:I .line 341 .local v2, "flags":I monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 344 if-eqz v6, :cond_1 .line 346 :try_start_1 iget-object v13, p0, Landroid/nfc/NfcActivityManager;->mDefaultEvent:Landroid/nfc/NfcEvent; invoke-interface {v6, v13}, Landroid/nfc/NfcAdapter$CreateNdefMessageCallback;->createNdefMessage(Landroid/nfc/NfcEvent;)Landroid/nfc/NdefMessage; :try_end_1 .catch Ljava/lang/Exception; {:try_start_1 .. :try_end_1} :catch_0 move-result-object v5 .line 352 :cond_1 :goto_1 if-eqz v11, :cond_5 .line 353 iget-object v13, p0, Landroid/nfc/NfcActivityManager;->mDefaultEvent:Landroid/nfc/NfcEvent; invoke-interface {v11, v13}, Landroid/nfc/NfcAdapter$CreateBeamUrisCallback;->createBeamUris(Landroid/nfc/NfcEvent;)[Landroid/net/Uri; move-result-object v10 .line 354 if-eqz v10, :cond_5 .line 355 move-object v0, v10 .local v0, "arr$":[Landroid/net/Uri; array-length v4, v0 .local v4, "len$":I const/4 v3, 0x0 .local v3, "i$":I :goto_2 if-ge v3, v4, :cond_5 aget-object v9, v0, v3 .line 356 .local v9, "uri":Landroid/net/Uri; if-nez v9, :cond_2 .line 357 const-string v13, "NFC" const-string v14, "Uri not allowed to be null." invoke-static {v13, v14}, Landroid/util/Log;->e(Ljava/lang/String;Ljava/lang/String;)I goto :goto_0 .line 341 .end local v0 # "arr$":[Landroid/net/Uri; .end local v2 # "flags":I .end local v3 # "i$":I .end local v4 # "len$":I .end local v5 # "message":Landroid/nfc/NdefMessage; .end local v6 # "ndefCallback":Landroid/nfc/NfcAdapter$CreateNdefMessageCallback; .end local v8 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; .end local v9 # "uri":Landroid/net/Uri; .end local v10 # "uris":[Landroid/net/Uri; .end local v11 # "urisCallback":Landroid/nfc/NfcAdapter$CreateBeamUrisCallback; :catchall_0 move-exception v12 :try_start_2 monitor-exit p0 :try_end_2 .catchall {:try_start_2 .. :try_end_2} :catchall_0 throw v12 .line 347 .restart local v2 # "flags":I .restart local v5 # "message":Landroid/nfc/NdefMessage; .restart local v6 # "ndefCallback":Landroid/nfc/NfcAdapter$CreateNdefMessageCallback; .restart local v8 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; .restart local v10 # "uris":[Landroid/net/Uri; .restart local v11 # "urisCallback":Landroid/nfc/NfcAdapter$CreateBeamUrisCallback; :catch_0 move-exception v1 .line 348 .local v1, "e":Ljava/lang/Exception; invoke-virtual {v1}, Ljava/lang/Exception;->printStackTrace()V .line 349 const/4 v5, 0x0 goto :goto_1 .line 360 .end local v1 # "e":Ljava/lang/Exception; .restart local v0 # "arr$":[Landroid/net/Uri; .restart local v3 # "i$":I .restart local v4 # "len$":I .restart local v9 # "uri":Landroid/net/Uri; :cond_2 invoke-virtual {v9}, Landroid/net/Uri;->getScheme()Ljava/lang/String; move-result-object v7 .line 361 .local v7, "scheme":Ljava/lang/String; if-eqz v7, :cond_3 const-string v13, "file" invoke-virtual {v7, v13}, Ljava/lang/String;->equalsIgnoreCase(Ljava/lang/String;)Z move-result v13 if-nez v13, :cond_4 const-string v13, "content" invoke-virtual {v7, v13}, Ljava/lang/String;->equalsIgnoreCase(Ljava/lang/String;)Z move-result v13 if-nez v13, :cond_4 .line 363 :cond_3 const-string v13, "NFC" const-string v14, "Uri needs to have either scheme file or scheme content" invoke-static {v13, v14}, Landroid/util/Log;->e(Ljava/lang/String;Ljava/lang/String;)I goto :goto_0 .line 355 :cond_4 add-int/lit8 v3, v3, 0x1 goto :goto_2 .line 371 .end local v0 # "arr$":[Landroid/net/Uri; .end local v3 # "i$":I .end local v4 # "len$":I .end local v7 # "scheme":Ljava/lang/String; .end local v9 # "uri":Landroid/net/Uri; :cond_5 new-instance v12, Landroid/nfc/BeamShareData; invoke-direct {v12, v5, v10, v2}, Landroid/nfc/BeamShareData;-><init>(Landroid/nfc/NdefMessage;[Landroid/net/Uri;I)V goto :goto_0 .end method .method declared-synchronized destroyActivityState(Landroid/app/Activity;)V .locals 2 .param p1, "activity" # Landroid/app/Activity; .prologue .line 189 monitor-enter p0 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->findActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v0 .line 190 .local v0, "activityState":Landroid/nfc/NfcActivityManager$NfcActivityState; if-eqz v0, :cond_0 .line 191 invoke-virtual {v0}, Landroid/nfc/NfcActivityManager$NfcActivityState;->destroy()V .line 192 iget-object v1, p0, Landroid/nfc/NfcActivityManager;->mActivities:Ljava/util/List; invoke-interface {v1, v0}, Ljava/util/List;->remove(Ljava/lang/Object;)Z :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 194 :cond_0 monitor-exit p0 return-void .line 189 .end local v0 # "activityState":Landroid/nfc/NfcActivityManager$NfcActivityState; :catchall_0 move-exception v1 monitor-exit p0 throw v1 .end method .method public disableReaderMode(Landroid/app/Activity;)V .locals 6 .param p1, "activity" # Landroid/app/Activity; .prologue const/4 v5, 0x0 const/4 v4, 0x0 .line 223 monitor-enter p0 .line 224 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->getActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v1 .line 225 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; const/4 v3, 0x0 iput-object v3, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->readerCallback:Landroid/nfc/NfcAdapter$ReaderCallback; .line 226 const/4 v3, 0x0 iput v3, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->readerModeFlags:I .line 227 const/4 v3, 0x0 iput-object v3, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->readerModeExtras:Landroid/os/Bundle; .line 228 iget-object v2, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->token:Landroid/os/Binder; .line 229 .local v2, "token":Landroid/os/Binder; iget-boolean v0, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->resumed:Z .line 230 .local v0, "isResumed":Z monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 231 if-eqz v0, :cond_0 .line 232 invoke-virtual {p0, v2, v5, v4}, Landroid/nfc/NfcActivityManager;->setReaderMode(Landroid/os/Binder;ILandroid/os/Bundle;)V .line 235 :cond_0 return-void .line 230 .end local v0 # "isResumed":Z .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; .end local v2 # "token":Landroid/os/Binder; :catchall_0 move-exception v3 :try_start_1 monitor-exit p0 :try_end_1 .catchall {:try_start_1 .. :try_end_1} :catchall_0 throw v3 .end method .method public enableReaderMode(Landroid/app/Activity;Landroid/nfc/NfcAdapter$ReaderCallback;ILandroid/os/Bundle;)V .locals 4 .param p1, "activity" # Landroid/app/Activity; .param p2, "callback" # Landroid/nfc/NfcAdapter$ReaderCallback; .param p3, "flags" # I .param p4, "extras" # Landroid/os/Bundle; .prologue .line 207 monitor-enter p0 .line 208 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->getActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v1 .line 209 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; iput-object p2, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->readerCallback:Landroid/nfc/NfcAdapter$ReaderCallback; .line 210 iput p3, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->readerModeFlags:I .line 211 iput-object p4, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->readerModeExtras:Landroid/os/Bundle; .line 212 iget-object v2, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->token:Landroid/os/Binder; .line 213 .local v2, "token":Landroid/os/Binder; iget-boolean v0, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->resumed:Z .line 214 .local v0, "isResumed":Z monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 215 if-eqz v0, :cond_0 .line 216 invoke-virtual {p0, v2, p3, p4}, Landroid/nfc/NfcActivityManager;->setReaderMode(Landroid/os/Binder;ILandroid/os/Bundle;)V .line 218 :cond_0 return-void .line 214 .end local v0 # "isResumed":Z .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; .end local v2 # "token":Landroid/os/Binder; :catchall_0 move-exception v3 :try_start_1 monitor-exit p0 :try_end_1 .catchall {:try_start_1 .. :try_end_1} :catchall_0 throw v3 .end method .method declared-synchronized findActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; .locals 3 .param p1, "activity" # Landroid/app/Activity; .prologue .line 161 monitor-enter p0 :try_start_0 iget-object v2, p0, Landroid/nfc/NfcActivityManager;->mActivities:Ljava/util/List; invoke-interface {v2}, Ljava/util/List;->iterator()Ljava/util/Iterator; move-result-object v0 .local v0, "i$":Ljava/util/Iterator; :cond_0 invoke-interface {v0}, Ljava/util/Iterator;->hasNext()Z move-result v2 if-eqz v2, :cond_1 invoke-interface {v0}, Ljava/util/Iterator;->next()Ljava/lang/Object; move-result-object v1 check-cast v1, Landroid/nfc/NfcActivityManager$NfcActivityState; .line 162 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; iget-object v2, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->activity:Landroid/app/Activity; :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 if-ne v2, p1, :cond_0 .line 166 .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; :goto_0 monitor-exit p0 return-object v1 :cond_1 const/4 v1, 0x0 goto :goto_0 .line 161 .end local v0 # "i$":Ljava/util/Iterator; :catchall_0 move-exception v2 monitor-exit p0 throw v2 .end method .method findAppState(Landroid/app/Application;)Landroid/nfc/NfcActivityManager$NfcApplicationState; .locals 3 .param p1, "app" # Landroid/app/Application; .prologue .line 78 iget-object v2, p0, Landroid/nfc/NfcActivityManager;->mApps:Ljava/util/List; invoke-interface {v2}, Ljava/util/List;->iterator()Ljava/util/Iterator; move-result-object v1 .local v1, "i$":Ljava/util/Iterator; :cond_0 invoke-interface {v1}, Ljava/util/Iterator;->hasNext()Z move-result v2 if-eqz v2, :cond_1 invoke-interface {v1}, Ljava/util/Iterator;->next()Ljava/lang/Object; move-result-object v0 check-cast v0, Landroid/nfc/NfcActivityManager$NfcApplicationState; .line 79 .local v0, "appState":Landroid/nfc/NfcActivityManager$NfcApplicationState; iget-object v2, v0, Landroid/nfc/NfcActivityManager$NfcApplicationState;->app:Landroid/app/Application; if-ne v2, p1, :cond_0 .line 83 .end local v0 # "appState":Landroid/nfc/NfcActivityManager$NfcApplicationState; :goto_0 return-object v0 :cond_1 const/4 v0, 0x0 goto :goto_0 .end method .method declared-synchronized findResumedActivityState()Landroid/nfc/NfcActivityManager$NfcActivityState; .locals 3 .prologue .line 180 monitor-enter p0 :try_start_0 iget-object v2, p0, Landroid/nfc/NfcActivityManager;->mActivities:Ljava/util/List; invoke-interface {v2}, Ljava/util/List;->iterator()Ljava/util/Iterator; move-result-object v0 .local v0, "i$":Ljava/util/Iterator; :cond_0 invoke-interface {v0}, Ljava/util/Iterator;->hasNext()Z move-result v2 if-eqz v2, :cond_1 invoke-interface {v0}, Ljava/util/Iterator;->next()Ljava/lang/Object; move-result-object v1 check-cast v1, Landroid/nfc/NfcActivityManager$NfcActivityState; .line 181 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; iget-boolean v2, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->resumed:Z :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 if-eqz v2, :cond_0 .line 185 .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; :goto_0 monitor-exit p0 return-object v1 :cond_1 const/4 v1, 0x0 goto :goto_0 .line 180 .end local v0 # "i$":Ljava/util/Iterator; :catchall_0 move-exception v2 monitor-exit p0 throw v2 .end method .method declared-synchronized getActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; .locals 2 .param p1, "activity" # Landroid/app/Activity; .prologue .line 171 monitor-enter p0 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->findActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v0 .line 172 .local v0, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; if-nez v0, :cond_0 .line 173 new-instance v0, Landroid/nfc/NfcActivityManager$NfcActivityState; .end local v0 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; invoke-direct {v0, p0, p1}, Landroid/nfc/NfcActivityManager$NfcActivityState;-><init>(Landroid/nfc/NfcActivityManager;Landroid/app/Activity;)V .line 174 .restart local v0 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; iget-object v1, p0, Landroid/nfc/NfcActivityManager;->mActivities:Ljava/util/List; invoke-interface {v1, v0}, Ljava/util/List;->add(Ljava/lang/Object;)Z :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 176 :cond_0 monitor-exit p0 return-object v0 .line 171 .end local v0 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; :catchall_0 move-exception v1 monitor-exit p0 throw v1 .end method .method public onActivityCreated(Landroid/app/Activity;Landroid/os/Bundle;)V .locals 0 .param p1, "activity" # Landroid/app/Activity; .param p2, "savedInstanceState" # Landroid/os/Bundle; .prologue .line 409 return-void .end method .method public onActivityDestroyed(Landroid/app/Activity;)V .locals 4 .param p1, "activity" # Landroid/app/Activity; .prologue .line 466 monitor-enter p0 .line 467 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->findActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v0 .line 468 .local v0, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; sget-object v1, Landroid/nfc/NfcActivityManager;->DBG:Ljava/lang/Boolean; invoke-virtual {v1}, Ljava/lang/Boolean;->booleanValue()Z move-result v1 if-eqz v1, :cond_0 const-string v1, "NFC" new-instance v2, Ljava/lang/StringBuilder; invoke-direct {v2}, Ljava/lang/StringBuilder;-><init>()V const-string/jumbo v3, "onDestroy() for " invoke-virtual {v2, v3}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; move-result-object v2 invoke-virtual {v2, p1}, Ljava/lang/StringBuilder;->append(Ljava/lang/Object;)Ljava/lang/StringBuilder; move-result-object v2 const-string v3, " " invoke-virtual {v2, v3}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; move-result-object v2 invoke-virtual {v2, v0}, Ljava/lang/StringBuilder;->append(Ljava/lang/Object;)Ljava/lang/StringBuilder; move-result-object v2 invoke-virtual {v2}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String; move-result-object v2 invoke-static {v1, v2}, Landroid/util/Log;->d(Ljava/lang/String;Ljava/lang/String;)I .line 469 :cond_0 if-eqz v0, :cond_1 .line 471 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->destroyActivityState(Landroid/app/Activity;)V .line 473 :cond_1 monitor-exit p0 .line 474 return-void .line 473 .end local v0 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; :catchall_0 move-exception v1 monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 throw v1 .end method .method public onActivityPaused(Landroid/app/Activity;)V .locals 7 .param p1, "activity" # Landroid/app/Activity; .prologue const/4 v3, 0x0 .line 441 monitor-enter p0 .line 442 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->findActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v1 .line 443 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; sget-object v4, Landroid/nfc/NfcActivityManager;->DBG:Ljava/lang/Boolean; invoke-virtual {v4}, Ljava/lang/Boolean;->booleanValue()Z move-result v4 if-eqz v4, :cond_0 const-string v4, "NFC" new-instance v5, Ljava/lang/StringBuilder; invoke-direct {v5}, Ljava/lang/StringBuilder;-><init>()V const-string/jumbo v6, "onPause() for " invoke-virtual {v5, v6}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; move-result-object v5 invoke-virtual {v5, p1}, Ljava/lang/StringBuilder;->append(Ljava/lang/Object;)Ljava/lang/StringBuilder; move-result-object v5 const-string v6, " " invoke-virtual {v5, v6}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; move-result-object v5 invoke-virtual {v5, v1}, Ljava/lang/StringBuilder;->append(Ljava/lang/Object;)Ljava/lang/StringBuilder; move-result-object v5 invoke-virtual {v5}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String; move-result-object v5 invoke-static {v4, v5}, Landroid/util/Log;->d(Ljava/lang/String;Ljava/lang/String;)I .line 444 :cond_0 if-nez v1, :cond_2 monitor-exit p0 .line 453 :cond_1 :goto_0 return-void .line 445 :cond_2 const/4 v4, 0x0 iput-boolean v4, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->resumed:Z .line 446 iget-object v2, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->token:Landroid/os/Binder; .line 447 .local v2, "token":Landroid/os/Binder; iget v4, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->readerModeFlags:I if-eqz v4, :cond_3 const/4 v0, 0x1 .line 448 .local v0, "readerModeFlagsSet":Z :goto_1 monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 449 if-eqz v0, :cond_1 .line 451 const/4 v4, 0x0 invoke-virtual {p0, v2, v3, v4}, Landroid/nfc/NfcActivityManager;->setReaderMode(Landroid/os/Binder;ILandroid/os/Bundle;)V goto :goto_0 .end local v0 # "readerModeFlagsSet":Z :cond_3 move v0, v3 .line 447 goto :goto_1 .line 448 .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; .end local v2 # "token":Landroid/os/Binder; :catchall_0 move-exception v3 :try_start_1 monitor-exit p0 :try_end_1 .catchall {:try_start_1 .. :try_end_1} :catchall_0 throw v3 .end method .method public onActivityResumed(Landroid/app/Activity;)V .locals 7 .param p1, "activity" # Landroid/app/Activity; .prologue .line 418 const/4 v1, 0x0 .line 419 .local v1, "readerModeFlags":I const/4 v0, 0x0 .line 421 .local v0, "readerModeExtras":Landroid/os/Bundle; monitor-enter p0 .line 422 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->findActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v2 .line 423 .local v2, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; sget-object v4, Landroid/nfc/NfcActivityManager;->DBG:Ljava/lang/Boolean; invoke-virtual {v4}, Ljava/lang/Boolean;->booleanValue()Z move-result v4 if-eqz v4, :cond_0 const-string v4, "NFC" new-instance v5, Ljava/lang/StringBuilder; invoke-direct {v5}, Ljava/lang/StringBuilder;-><init>()V const-string/jumbo v6, "onResume() for " invoke-virtual {v5, v6}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; move-result-object v5 invoke-virtual {v5, p1}, Ljava/lang/StringBuilder;->append(Ljava/lang/Object;)Ljava/lang/StringBuilder; move-result-object v5 const-string v6, " " invoke-virtual {v5, v6}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; move-result-object v5 invoke-virtual {v5, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/Object;)Ljava/lang/StringBuilder; move-result-object v5 invoke-virtual {v5}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String; move-result-object v5 invoke-static {v4, v5}, Landroid/util/Log;->d(Ljava/lang/String;Ljava/lang/String;)I .line 424 :cond_0 if-nez v2, :cond_1 monitor-exit p0 .line 434 :goto_0 return-void .line 425 :cond_1 const/4 v4, 0x1 iput-boolean v4, v2, Landroid/nfc/NfcActivityManager$NfcActivityState;->resumed:Z .line 426 iget-object v3, v2, Landroid/nfc/NfcActivityManager$NfcActivityState;->token:Landroid/os/Binder; .line 427 .local v3, "token":Landroid/os/Binder; iget v1, v2, Landroid/nfc/NfcActivityManager$NfcActivityState;->readerModeFlags:I .line 428 iget-object v0, v2, Landroid/nfc/NfcActivityManager$NfcActivityState;->readerModeExtras:Landroid/os/Bundle; .line 429 monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 430 if-eqz v1, :cond_2 .line 431 invoke-virtual {p0, v3, v1, v0}, Landroid/nfc/NfcActivityManager;->setReaderMode(Landroid/os/Binder;ILandroid/os/Bundle;)V .line 433 :cond_2 invoke-virtual {p0}, Landroid/nfc/NfcActivityManager;->requestNfcServiceCallback()V goto :goto_0 .line 429 .end local v2 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; .end local v3 # "token":Landroid/os/Binder; :catchall_0 move-exception v4 :try_start_1 monitor-exit p0 :try_end_1 .catchall {:try_start_1 .. :try_end_1} :catchall_0 throw v4 .end method .method public onActivitySaveInstanceState(Landroid/app/Activity;Landroid/os/Bundle;)V .locals 0 .param p1, "activity" # Landroid/app/Activity; .param p2, "outState" # Landroid/os/Bundle; .prologue .line 461 return-void .end method .method public onActivityStarted(Landroid/app/Activity;)V .locals 0 .param p1, "activity" # Landroid/app/Activity; .prologue .line 413 return-void .end method .method public onActivityStopped(Landroid/app/Activity;)V .locals 0 .param p1, "activity" # Landroid/app/Activity; .prologue .line 457 return-void .end method .method public onNdefPushComplete()V .locals 3 .prologue .line 378 monitor-enter p0 .line 379 :try_start_0 invoke-virtual {p0}, Landroid/nfc/NfcActivityManager;->findResumedActivityState()Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v1 .line 380 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; if-nez v1, :cond_1 monitor-exit p0 .line 389 :cond_0 :goto_0 return-void .line 382 :cond_1 iget-object v0, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->onNdefPushCompleteCallback:Landroid/nfc/NfcAdapter$OnNdefPushCompleteCallback; .line 383 .local v0, "callback":Landroid/nfc/NfcAdapter$OnNdefPushCompleteCallback; monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 386 if-eqz v0, :cond_0 .line 387 iget-object v2, p0, Landroid/nfc/NfcActivityManager;->mDefaultEvent:Landroid/nfc/NfcEvent; invoke-interface {v0, v2}, Landroid/nfc/NfcAdapter$OnNdefPushCompleteCallback;->onNdefPushComplete(Landroid/nfc/NfcEvent;)V goto :goto_0 .line 383 .end local v0 # "callback":Landroid/nfc/NfcAdapter$OnNdefPushCompleteCallback; .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; :catchall_0 move-exception v2 :try_start_1 monitor-exit p0 :try_end_1 .catchall {:try_start_1 .. :try_end_1} :catchall_0 throw v2 .end method .method public onTagDiscovered(Landroid/nfc/Tag;)V .locals 3 .param p1, "tag" # Landroid/nfc/Tag; .annotation system Ldalvik/annotation/Throws; value = { Landroid/os/RemoteException; } .end annotation .prologue .line 394 monitor-enter p0 .line 395 :try_start_0 invoke-virtual {p0}, Landroid/nfc/NfcActivityManager;->findResumedActivityState()Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v1 .line 396 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; if-nez v1, :cond_1 monitor-exit p0 .line 406 :cond_0 :goto_0 return-void .line 398 :cond_1 iget-object v0, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->readerCallback:Landroid/nfc/NfcAdapter$ReaderCallback; .line 399 .local v0, "callback":Landroid/nfc/NfcAdapter$ReaderCallback; monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 402 if-eqz v0, :cond_0 .line 403 invoke-interface {v0, p1}, Landroid/nfc/NfcAdapter$ReaderCallback;->onTagDiscovered(Landroid/nfc/Tag;)V goto :goto_0 .line 399 .end local v0 # "callback":Landroid/nfc/NfcAdapter$ReaderCallback; .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; :catchall_0 move-exception v2 :try_start_1 monitor-exit p0 :try_end_1 .catchall {:try_start_1 .. :try_end_1} :catchall_0 throw v2 .end method .method registerApplication(Landroid/app/Application;)V .locals 2 .param p1, "app" # Landroid/app/Application; .prologue .line 87 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->findAppState(Landroid/app/Application;)Landroid/nfc/NfcActivityManager$NfcApplicationState; move-result-object v0 .line 88 .local v0, "appState":Landroid/nfc/NfcActivityManager$NfcApplicationState; if-nez v0, :cond_0 .line 89 new-instance v0, Landroid/nfc/NfcActivityManager$NfcApplicationState; .end local v0 # "appState":Landroid/nfc/NfcActivityManager$NfcApplicationState; invoke-direct {v0, p0, p1}, Landroid/nfc/NfcActivityManager$NfcApplicationState;-><init>(Landroid/nfc/NfcActivityManager;Landroid/app/Application;)V .line 90 .restart local v0 # "appState":Landroid/nfc/NfcActivityManager$NfcApplicationState; iget-object v1, p0, Landroid/nfc/NfcActivityManager;->mApps:Ljava/util/List; invoke-interface {v1, v0}, Ljava/util/List;->add(Ljava/lang/Object;)Z .line 92 :cond_0 invoke-virtual {v0}, Landroid/nfc/NfcActivityManager$NfcApplicationState;->register()V .line 93 return-void .end method .method requestNfcServiceCallback()V .locals 2 .prologue .line 318 :try_start_0 sget-object v1, Landroid/nfc/NfcAdapter;->sService:Landroid/nfc/INfcAdapter; invoke-interface {v1, p0}, Landroid/nfc/INfcAdapter;->setAppCallback(Landroid/nfc/IAppCallback;)V :try_end_0 .catch Landroid/os/RemoteException; {:try_start_0 .. :try_end_0} :catch_0 .line 322 :goto_0 return-void .line 319 :catch_0 move-exception v0 .line 320 .local v0, "e":Landroid/os/RemoteException; iget-object v1, p0, Landroid/nfc/NfcActivityManager;->mAdapter:Landroid/nfc/NfcAdapter; invoke-virtual {v1, v0}, Landroid/nfc/NfcAdapter;->attemptDeadServiceRecovery(Ljava/lang/Exception;)V goto :goto_0 .end method .method public setNdefPushContentUri(Landroid/app/Activity;[Landroid/net/Uri;)V .locals 3 .param p1, "activity" # Landroid/app/Activity; .param p2, "uris" # [Landroid/net/Uri; .prologue .line 248 monitor-enter p0 .line 249 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->getActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v1 .line 250 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; iput-object p2, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->uris:[Landroid/net/Uri; .line 251 iget-boolean v0, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->resumed:Z .line 252 .local v0, "isResumed":Z monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 253 if-eqz v0, :cond_0 .line 254 invoke-virtual {p0}, Landroid/nfc/NfcActivityManager;->requestNfcServiceCallback()V .line 256 :cond_0 return-void .line 252 .end local v0 # "isResumed":Z .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; :catchall_0 move-exception v2 :try_start_1 monitor-exit p0 :try_end_1 .catchall {:try_start_1 .. :try_end_1} :catchall_0 throw v2 .end method .method public setNdefPushContentUriCallback(Landroid/app/Activity;Landroid/nfc/NfcAdapter$CreateBeamUrisCallback;)V .locals 3 .param p1, "activity" # Landroid/app/Activity; .param p2, "callback" # Landroid/nfc/NfcAdapter$CreateBeamUrisCallback; .prologue .line 262 monitor-enter p0 .line 263 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->getActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v1 .line 264 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; iput-object p2, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->uriCallback:Landroid/nfc/NfcAdapter$CreateBeamUrisCallback; .line 265 iget-boolean v0, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->resumed:Z .line 266 .local v0, "isResumed":Z monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 267 if-eqz v0, :cond_0 .line 268 invoke-virtual {p0}, Landroid/nfc/NfcActivityManager;->requestNfcServiceCallback()V .line 270 :cond_0 return-void .line 266 .end local v0 # "isResumed":Z .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; :catchall_0 move-exception v2 :try_start_1 monitor-exit p0 :try_end_1 .catchall {:try_start_1 .. :try_end_1} :catchall_0 throw v2 .end method .method public setNdefPushMessage(Landroid/app/Activity;Landroid/nfc/NdefMessage;I)V .locals 3 .param p1, "activity" # Landroid/app/Activity; .param p2, "message" # Landroid/nfc/NdefMessage; .param p3, "flags" # I .prologue .line 274 monitor-enter p0 .line 275 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->getActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v1 .line 276 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; iput-object p2, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->ndefMessage:Landroid/nfc/NdefMessage; .line 277 iput p3, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->flags:I .line 278 iget-boolean v0, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->resumed:Z .line 279 .local v0, "isResumed":Z monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 280 if-eqz v0, :cond_0 .line 281 invoke-virtual {p0}, Landroid/nfc/NfcActivityManager;->requestNfcServiceCallback()V .line 283 :cond_0 return-void .line 279 .end local v0 # "isResumed":Z .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; :catchall_0 move-exception v2 :try_start_1 monitor-exit p0 :try_end_1 .catchall {:try_start_1 .. :try_end_1} :catchall_0 throw v2 .end method .method public setNdefPushMessageCallback(Landroid/app/Activity;Landroid/nfc/NfcAdapter$CreateNdefMessageCallback;I)V .locals 3 .param p1, "activity" # Landroid/app/Activity; .param p2, "callback" # Landroid/nfc/NfcAdapter$CreateNdefMessageCallback; .param p3, "flags" # I .prologue .line 288 monitor-enter p0 .line 289 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->getActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v1 .line 290 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; iput-object p2, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->ndefMessageCallback:Landroid/nfc/NfcAdapter$CreateNdefMessageCallback; .line 291 iput p3, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->flags:I .line 292 iget-boolean v0, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->resumed:Z .line 293 .local v0, "isResumed":Z monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 294 if-eqz v0, :cond_0 .line 295 invoke-virtual {p0}, Landroid/nfc/NfcActivityManager;->requestNfcServiceCallback()V .line 297 :cond_0 return-void .line 293 .end local v0 # "isResumed":Z .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; :catchall_0 move-exception v2 :try_start_1 monitor-exit p0 :try_end_1 .catchall {:try_start_1 .. :try_end_1} :catchall_0 throw v2 .end method .method public setOnNdefPushCompleteCallback(Landroid/app/Activity;Landroid/nfc/NfcAdapter$OnNdefPushCompleteCallback;)V .locals 3 .param p1, "activity" # Landroid/app/Activity; .param p2, "callback" # Landroid/nfc/NfcAdapter$OnNdefPushCompleteCallback; .prologue .line 302 monitor-enter p0 .line 303 :try_start_0 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->getActivityState(Landroid/app/Activity;)Landroid/nfc/NfcActivityManager$NfcActivityState; move-result-object v1 .line 304 .local v1, "state":Landroid/nfc/NfcActivityManager$NfcActivityState; iput-object p2, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->onNdefPushCompleteCallback:Landroid/nfc/NfcAdapter$OnNdefPushCompleteCallback; .line 305 iget-boolean v0, v1, Landroid/nfc/NfcActivityManager$NfcActivityState;->resumed:Z .line 306 .local v0, "isResumed":Z monitor-exit p0 :try_end_0 .catchall {:try_start_0 .. :try_end_0} :catchall_0 .line 307 if-eqz v0, :cond_0 .line 308 invoke-virtual {p0}, Landroid/nfc/NfcActivityManager;->requestNfcServiceCallback()V .line 310 :cond_0 return-void .line 306 .end local v0 # "isResumed":Z .end local v1 # "state":Landroid/nfc/NfcActivityManager$NfcActivityState; :catchall_0 move-exception v2 :try_start_1 monitor-exit p0 :try_end_1 .catchall {:try_start_1 .. :try_end_1} :catchall_0 throw v2 .end method .method public setReaderMode(Landroid/os/Binder;ILandroid/os/Bundle;)V .locals 3 .param p1, "token" # Landroid/os/Binder; .param p2, "flags" # I .param p3, "extras" # Landroid/os/Bundle; .prologue .line 238 sget-object v1, Landroid/nfc/NfcActivityManager;->DBG:Ljava/lang/Boolean; invoke-virtual {v1}, Ljava/lang/Boolean;->booleanValue()Z move-result v1 if-eqz v1, :cond_0 const-string v1, "NFC" const-string v2, "Setting reader mode" invoke-static {v1, v2}, Landroid/util/Log;->d(Ljava/lang/String;Ljava/lang/String;)I .line 240 :cond_0 :try_start_0 sget-object v1, Landroid/nfc/NfcAdapter;->sService:Landroid/nfc/INfcAdapter; invoke-interface {v1, p1, p0, p2, p3}, Landroid/nfc/INfcAdapter;->setReaderMode(Landroid/os/IBinder;Landroid/nfc/IAppCallback;ILandroid/os/Bundle;)V :try_end_0 .catch Landroid/os/RemoteException; {:try_start_0 .. :try_end_0} :catch_0 .line 244 :goto_0 return-void .line 241 :catch_0 move-exception v0 .line 242 .local v0, "e":Landroid/os/RemoteException; iget-object v1, p0, Landroid/nfc/NfcActivityManager;->mAdapter:Landroid/nfc/NfcAdapter; invoke-virtual {v1, v0}, Landroid/nfc/NfcAdapter;->attemptDeadServiceRecovery(Ljava/lang/Exception;)V goto :goto_0 .end method .method unregisterApplication(Landroid/app/Application;)V .locals 4 .param p1, "app" # Landroid/app/Application; .prologue .line 96 invoke-virtual {p0, p1}, Landroid/nfc/NfcActivityManager;->findAppState(Landroid/app/Application;)Landroid/nfc/NfcActivityManager$NfcApplicationState; move-result-object v0 .line 97 .local v0, "appState":Landroid/nfc/NfcActivityManager$NfcApplicationState; if-nez v0, :cond_0 .line 98 const-string v1, "NFC" new-instance v2, Ljava/lang/StringBuilder; invoke-direct {v2}, Ljava/lang/StringBuilder;-><init>()V const-string v3, "app was not registered " invoke-virtual {v2, v3}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; move-result-object v2 invoke-virtual {v2, p1}, Ljava/lang/StringBuilder;->append(Ljava/lang/Object;)Ljava/lang/StringBuilder; move-result-object v2 invoke-virtual {v2}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String; move-result-object v2 invoke-static {v1, v2}, Landroid/util/Log;->e(Ljava/lang/String;Ljava/lang/String;)I .line 102 :goto_0 return-void .line 101 :cond_0 invoke-virtual {v0}, Landroid/nfc/NfcActivityManager$NfcApplicationState;->unregister()V goto :goto_0 .end method
{ "redpajama_set_name": "RedPajamaGithub" }
7,532
{"url":"http:\/\/copper.math.buffalo.edu\/337\/day18_outline.html","text":"# Complex Newton\n\nNote: extent=(-r,r,-r,r) option in imshow.\n\n# Tartans for exploration of color and numpy fancy indexing\n\n## Numpy boolean indexing\n\nFor the \"mask: want patterns like this:\n\n0 0 0 1 1 1 0 0 0 1 1 1 0 ...\n1 0 0 0 1 1 1 0 0 0 1 1 1 ...\n1 1 0 0 0 1 1 1 0 0 0 1 1 ...\n1 1 1 0 0 0 1 1 1 0 0 0 1 ...\n0 1 1 1 0 0 0 1 1 1 0 0 0 ...\n0 0 1 1 1 0 0 0 1 1 1 0 0 ...\n...\n\n\nYou requested threadcount details from The Scottish Register of Tartans website\nfor the Argyll Campbell tartan. The details are given below.\n\nB4K4B36K40G36W8G36K40B34K6\n\nPallet:\nB=2C4084BLUE;K=101010BLACK;G=005020MOD GREEN;W=E0E0E0WHITE;\n\nThreadcount given over a half sett with full count at the pivots.\n\nYou may wish to refer to the threadcount guidance section\n\n\n## Parsing the palette\n\n### Dictionaries\n\nd = {'apple':'round crispy fruit', 'banana':'long yellow fruit'}\n\nd['apple']\n\n'round crispy fruit'\n\n\ndictionary comprehensions\n\n### Conversion from hex strings to int\n\nint('F8',16)\n\n\nExercise: make a color look-up table\n\n### Regular expressions\n\na pattern-matching syntax\n\nimport re","date":"2018-05-22 08:23:01","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 1, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.2516706883907318, \"perplexity\": 822.8308108613389}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 5, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2018-22\/segments\/1526794864648.30\/warc\/CC-MAIN-20180522073245-20180522093245-00419.warc.gz\"}"}
null
null
Q: Sum of elements in a diagonal Suppose I have the matrix MatrixForm[A = {{8, 1, 6}, {3, 5, 7}, {4, 9, 2}}] with Tr[A] I can sum the elements of the main diagonal. To sum the elements of the opposite diagonal, which runs from upper right to lower left I type Tr[Reverse[A]] How can I obtain the desired result differently? A: First I thought "Why would you want to do that?" but using Reverse has indeed some performance issues. Here is an alternative involving Extract and Total: A = RandomReal[{-1, 1}, {10000, 10000}]; a = Tr[Reverse[A]]; // AbsoluteTiming // First b = Total[Extract[A, Transpose[{Range[Length[A]], Range[Length[A], 1, -1]}]]]; // AbsoluteTiming // First a == b 0.487564 0.000616 True
{ "redpajama_set_name": "RedPajamaStackExchange" }
799
\section*{Summary} Experimental evidence confirms that our game-theoretical planner improves social acceptability of robotic behavior. \section*{Introduction} The widespread diffusion of service robots for diverse applications is making autonomous robots more and more pervasive in our lives~\cite{torras2016service}. In the near future, autonomous robots will likely coexist and share our very space. Application scenarios will be characterized by populated and dynamic environments, where autonomous navigation has to ensure not only the physical safety of human subjects, but also a great degree of \emph{social acceptability}~\cite{kruse2013human}. Trajectory planners at the state of the art mostly aimed at ensuring the former requisite~\cite{fox1997dynamic,fiorini1998motion,van2008reciprocal}, while seldom tackling the social acceptability issue. Most of contemporary autonomous navigation algorithms model humans as inanimate dynamic obstacles rather than social entities interacting with each other through complex and strategized patterns~\cite{kivrak2020social}. The oversimplification of human behavioral traits in the design of navigation algorithm may have severe consequences, such as the emergence of the well-known ``freezing robot problem''~\cite{trautman2015robot}. Socially-aware navigation is gaining momentum as a fundamental requirement toward the design of \emph{social robots,} able to adhere to social convention toward providing a friendly and comfortable interaction with humans. Socially-aware navigation combines perception, dynamical system theory, social conventions, human motion modeling, and psychology. Trajectories generated in this context should be predictable, adaptable, easily understandable, and acceptable by humans~\cite{rios2015proxemics}. Toward improving trust, comfort, and social acceptance, humans should be explicitly considered by robots as intelligent agents who interact and may influence the motion of others~\cite{turnwald2019human}. Recent efforts in socially-aware navigation model humans as static entities~\cite{sisbot2007human} or as agents driven by very simplistic motion models~\cite{shiomi2014towards}. Such simplistic assumptions may hardly cope with the complexity of human behavior and interaction, yielding trajectories that far from predictable, smooth, and in turn acceptable by humans. Models based on learning theory, on the other hand, promise better results~\cite{chen2017socially} provided that a large training data set involving human subjects is available, which is not always the case. Here, we present a socially-aware robot navigation strategy that accurately models human behavior using game theory (see Figure~\ref{Fig:Intro_Image} for a graphical abstract of the procedure). Game theory offers substantial benefits compared to alternative modeling methods, such as reactive strategies~\cite{helbing1995social,tadokoro1995motion,hoeller2007accompanying} and learning schemes~\cite{bennewitz2005learning,alahi2016social,gupta2018social,liang2019peeking}. With respect to the former, game theory is able to perform motion prediction and anticipation of the behavior of other humans, typical of human decision making in social contexts~\cite{turnwald2016understanding}. Compared to the latter, it overcomes their distinctive lack of explainability, generalization, and the need for large training data set. Game theory has successfully found applications in robot motion planning, as in Zhang et al.~\cite{zhang1998motion}, where a non-cooperative, zero-sum game is used to coordinate their motion and avoid obstacles to execute a set of prioritized tasks. Gabler et al.~\cite{gabler2017game} propose a game-theoretical framework in which humans and robots collaborate in an industrial assembly scenarios; Dragen et al.~\cite{dragan2017robot} and Nikolaidis et al.~\cite{nikolaidis2017mathematical} model the interaction between human and robot as a two-player game and point out how different game assumptions and approximations lead to different robot behaviors. Our approach uses non-cooperative game theory~\cite{nash1951non} to model the navigation behavior of multiple humans in populated environments, positing that conditions of safe navigation, adherence to social norms, and psychological comfort correspond to a Nash equilibrium in the proposed game-theoretical model. Differently from the previously cited works, our model contemplates more than two players --a feature that is essential to model populated environments. The human motion model informs the design of a robotic trajectory planner, whereby the robot tends to mimic human behavior during motion and interaction in populated environment. Hence, we pursue social acceptability through the concept of anthropomorphism~\cite{epley2007seeing,roesler2021meta} --the intrinsic tendency of humans to attribute intentions and consciousness to non-human entities~\cite{waytz2010sees}. Our work marks an important milestone in the field of social robotics. It provides an efficient, social-aware motion planning framework that encapsulates realistic features of human crowds, remarkably enhancing the social acceptance of the planned trajectories. Namely, we incorporate the human vital space~\cite{hall1966hidden}, the recognition of human groups~\cite{mavrogiannis2021core}, the sequential decision-making typical of human beings~\cite{xie2017learning}, and a natural human-obstacle interaction~\cite{manual1985special} --features that are often missing in many approaches, including those based on game theory~\cite{turnwald2019human}. The methodology proposed in this paper is generally applicable to any kind of mobile robot. To avoid confounds related to the choice of specific hardware setup and focus on the assessment of human perception of the robot motion, validation is executed on virtualized environments, where the humanly populated scene is extrapolated from surveillance videos. Three different experimental conditions are considered: the first involves only human subjects, the second contains a virtualized mobile robot programmed through the state-of-the-art Enhanced Vector Field Histogram (VFH~\cite{ulrich1998vfh+}) algorithm moving through the population, the third replaces the VFH algorithm with our game-theoretical approach. Across the three experimental conditions, we perform a twofold validation of our approach: first, we evaluate performance metrics typical of path planning (path length ratio, path regularity, and distance to the closest pedestrian), and then we analyze the results of a survey questionnaire to directly assess social acceptability by human subjects. To this aim, we administered a variant of the Turing test to a pool of 691 volunteers, who evaluated the human likeness of three sets of videos corresponding to the three scenarios explained above. To conceal the appearance of the agents, we masked humans and robots by replacing them with arrows so that the volunteers did not have the possibility to distinguish between them. Evidence from our experimental campaign reveals that trajectories generated by our game-theoretical approach exhibit performance metrics that are efficient and closer than those achieved by human subjects than VFH. Moreover, the outcome of the survey questionnaire highlights the superior acceptability of game-theoretical-generated trajectories with respect to those generated through VFH. \section*{Methods} \subsection*{Game-theoretical model} \subparagraph*{Assumptions} Let us start with a description of all the assumptions supporting our game-theoretical model for human motion. To improve readability, here and henceforth we will refer to human subjects as \emph{agent}. This term will be also used for the robot when no distinction between the two categories is required. All pedestrians are \emph{rational} agents with \emph{common knowledge} moving in a 2D \emph{populated dynamic} environment. \emph{Rational} behavior entails that agents only aim to reach their own \emph{individual} motion goal. In mathematical terms, this translates into a minimization of an individual cost (equivalently, a maximization of an individual benefit), such as their overall path length~\cite{bitgood2006not} or energy consumption~\cite{mcneill2002energetics}. Practically, agents continuously update their navigation behavior while walking in populated environments, based on the observation and possible prediction of the motion of the surrounding agents. The possession of \emph{common knowledge} by agents in our game-theoretical model implies that all agents have the same knowledge of the characteristics of the environment and of the rules that regulate it. Such an assumption is reasonable when dealing with models of human traits, as individuals commonly learn these skills by experience during everyday life~\cite{turnwald2019human}. Specifically, we consider a \emph{populated dynamic environment}, possibly busy, but not crowded, such as typical streets occupied by pedestrians walking on sideways, or populated indoor spaces, such as hotel halls~\cite{turnwald2019human}. We suppose that the environment contains static obstacles, that have to be avoided by agents in a \emph{natural} manner. Our approach is based on a microscopic modeling strategy, whereby a single individual is mapped onto a single software agents, which mimics the individuals' decision and their interactions. \subparagraph*{Game description} \label{description of the game} The proposed model for pedestrian motion is a non-cooperative, static, perfect information, finite, and general-sum game with many players (or agents). In our model, each agent aims at reaching its own goal \emph{individually}, but the minimization of its individual cost does not exclude the possibility to collaborate with other agents, should this help attaining \emph{individual} goals \cite{osborne1994course} as well. Cohesive groups of agents are considered as single agents, whereby members of the group share a common strategy and a common motion pattern. This last assumption practically entails that the navigation strategy of the robot in avoiding human groups would treat them as a single entity, without attempting at breaking into them to better attain its own navigation goal. The game is \emph{static} in the sense that agents move and take decisions \emph{simultaneously}, it is based on \emph{perfect information}, that is, each agent knows the current and the previous actions of all agents, e.g. via direct observations. The game is also \emph{finite}, i.e., the game has a \emph{finite} number $N$ of agents belonging to the agent set $\mathcal{N}$, where each agent ${i \in \mathcal{N}}$ can choose among a \emph{finite} number of actions available, defined with the action set $\Theta$, which is supposed to be common to all agents. In particular, we indicate with $\theta_{i} (t) \in \Theta$ the action executed by agent $i$ at the discrete time $t$. In our application, the execution of action $\theta_i(t)$ corresponds to a motion of agent $i$ in the 2D plane at constant velocity $v$ and constant heading $\theta_i (t)$ over the whole discrete time step $\Delta t$. We assume that agents have a bounded visibility angle and the possible action $\theta_i(t)$ is designed to uniformly partition such an angle. We denote with $p_i \in \mathbb{R}^2$ the position of agent $i$ in the 2D environment, with respect to a fixed orthogonal reference frame. Moreover, the proposed model is a \emph{general-sum} game, i.e., the sum of all gains and losses of the utility functions over all agents is \textit{not necessarily} equal to zero. Similar to~\cite{turnwald2016understanding}, we postulate that, in such a navigation task, agents tend to reach a Nash equilibrium -- the condition in which no agent has an incentive to unilaterally change its own action (or strategy) if the other agents do not change theirs. In other words, a Nash equilibrium occurs when each agent achieves its best response, i.e., its minimum individual cost, given the actions of the other agents. In general, however, existence and uniqueness of a Nash equilibrium is not guaranteed in our setup, and its analytical characterization is almost always impossible to have, thus making numerical approaches for an approximate computational necessary. Here, the Nash equilibrium is approximately computed via the \emph{sequential best response} approach ~\cite{sagratella2017algorithms}. Let us explain the idea of the sequential best response for two agents, A and B: agent A observes the motion of agent B and then solves an optimization problem to determine its own strategy, given the latest observed strategy of agent B. Afterwards, a check action is performed, verifying if the strategies of both agents are the same as those computed in the previous iteration; in such a case, the game has reached a Nash equilibrium. Otherwise, agent B computes its optimal strategy, given the latest observed strategy of agent A. The procedure is applied iteratively, until the equilibrium condition is met. The same strategy identically extends to $N$ agents. Our modeling procedure assumes that all the agents in the planar space play the game mentioned above. After the model has been identified, we will use it to control a single, synthetic agent to navigate through the populated environment. Such an agent is called \emph{robot player}. \subparagraph*{Optimization problem} \label{Optimization problem} The sequential best response approach in our game-theoretical model for the human motion in a populated environment requires the solution of a set of interdependent optimization problems, one for each agent moving in the environment. The goal of the optimization problem for each agent \emph{i} is to find the best sequence of actions, $\boldsymbol{\theta^{*}_{i}} = (\theta_{i}(t), \theta_i(t+\Delta t), \theta_i(t+2 \Delta t), \ldots, \theta_i(t+T \Delta t))$, over a finite prediction horizon $T \Delta t$, given the actions of the other agents. Without loss of generalization and to improve readability, here and henceforth we assume a unitary discrete-time step, i.e., $\Delta t = 1$. All agents seek for the Nash equilibrium applying the sequential best response strategy, solving their own optimization problem on the basis of the observed behavior of the rest of the population. We define the optimization problem for each agent $i \in \mathcal{N}$ as \begin{subequations} \begin{alignat}{2} \boldsymbol{\theta^{*}_{i}} =~ &\!\underset{\boldsymbol{\theta_{i}}}{\mathrm{min}} &\quad& J(\boldsymbol{\theta_{i}}) \label{eq:optProb}\\ &\text{s.t.} & & \left\| p_i(t, \theta_{i}(t))-p_j(t) \right\|_{2}~\geq \beta \quad \forall ~t, \forall i , j \in \mathcal{N}, i\neq j \label{collision avoidance constraint}\\ & & & p_i(t, \theta_{i}(t)) \notin \mathcal{O}_\mathrm{obs} \quad \forall ~t, \forall i \in \mathcal{N} \label{collision avoidance obstacle} \end{alignat} \end{subequations} with \begin{equation} p_i(t, \theta_{i}(t)) = p_i(t-1, \theta_{i}(t-1))+ \Delta p (\theta_i(t), v). \label{player position} \end{equation} The cost function $J(\boldsymbol{\theta_{i}})$ in \eqref{eq:optProb} is defined as \begin{equation} J(\boldsymbol{\theta_{i}}) = \Phi_\mathrm{goal}(\boldsymbol{\theta_{i}}) + \Phi_\mathrm{smooth}(\boldsymbol{\theta_{i}}) + \Phi_\mathrm{obs}(\boldsymbol{\theta_{i}}), \label{overall_cost_function} \end{equation} where the three summands are defined as follows: \textit{(i)} The term $\Phi_\mathrm{goal}(\boldsymbol{\theta_{i}})$ tends to reduce the overall path length for each agent $i$ and, hence, models the goal-oriented attitude of the agent: \begin{equation} \begin{aligned} \Phi_\mathrm{goal}(\boldsymbol{\theta_{i}})=\sum_{t=1}^{T} \gamma(t) \| p_i(t, \theta_{i}(t))-p_i^{*} \| \end{aligned} \label{minimise_distance} \end{equation} with $\gamma(t)$ being a time-variant weight factor; $p_i(t, \theta_i(t))$ is the estimated position of agent \emph{i} at time $t$, considering a constant speed modulus $v$ and the heading control action $\theta_i(t)$ applied at time $t$, computed using the kinematic update Equation \eqref{player position}; and $p_i^{*}$ is the estimate of agent $i$'s goal in the time horizon $T$. In the absence of an explicit definition of a pedestrian's goal, we assume that, within the horizon $[t, t+T]$, the goal of agent $i$ lays on a straight line starting in $p_i(t)$ and oriented along the observed agent heading at time $t$. Under these assumptions, the practical meaning of the time horizon $T$ is the estimate of the time interval within which a pedestrian sets up and maintain their walking goal. \textit{(ii)} The term $\Phi_\mathrm{smooth}(\boldsymbol{\theta_{i}})$ penalizes excessive rotations, thus promoting smooth trajectories. In fact, during navigation, humans tend to avoid too many changes of orientation to minimize their energy consumption \cite{mcneill2002energetics}: \begin{equation} \begin{aligned} \Phi_\mathrm{smooth}(\boldsymbol{\theta_{i}})= \sum_{t=1}^{T} ( 1- \gamma(t) ) | \theta_i(t) - \theta_i(t-1) | \end{aligned} \label{smoothness} \end{equation} where $\theta_i(t)$, $\theta_i(t-1)$ are the orientation of the agent at time $t$ and $(t-1)$, respectively. We observe that the term $\Phi_\mathrm{smooth}(\boldsymbol{\theta_{i}})$ is weighted in a complementary fashion to $\Phi_\mathrm{goal}(\boldsymbol{\theta_{i}})$, to satisfy the assumption (further detailed in the Implementation section) of their relative importance as long as the agent approaches its target. \textit{(iii)} The term $\Phi_\mathrm{obs}(\boldsymbol{\theta_{i}})$ tends to optimize the natural interaction with static objects. In fact, humans tend not to walk too close to static obstacles, unless it is necessary. For this reason, we model this behavior as a \emph{soft} constraint: \begin{equation} \begin{aligned} \Phi_\mathrm{obs}(\boldsymbol{\theta_{i}})= \sum_{t=1}^{T} \frac{\rho}{\| p_i(t, \theta_{i}(t))- p_{\text{obs}} \|} \end{aligned} \label{obst} \end{equation} where $\rho$ is a weighting factor and the denominator in \eqref{obst} is the distance between the agent position $p_i(t, \theta_{i}(t))$ and the closest \emph{static} obstacle $p_{\text{obs}}$ at time $t$. The exact procedure to compute $p_{\text{obs}}$ will be explained later. Practically, \eqref{obst} penalizes small distances between an agent and \emph{static} obstacles. The inequality in \eqref{collision avoidance constraint} is a hard constraint imposing to avoid other agents, assuming a circular region around agents as their vital space~\cite{hall1966hidden} to be avoided. In this way, agent \emph{i} is required to maintain at least a minimum distance $\beta$ with other agents in the observer scenario. Constraint \eqref{collision avoidance obstacle} models the avoidance of static obstacles by imposing that the position $p_i(t,\theta_{i}(t))$ is outside the obstacle space $\mathcal{O}_\mathrm{obs}$, defined as a subset, possibly disconnected, of the 2D planar space, occupied by obstacles, where motion of agents is forbidden. Equation~\eqref{player position} formalizes the kinematic update of the position of agent $i$ at time $t$, subject to a heading command $\theta_i(t)$, at a constant velocity $v$. \subparagraph{Validation} The proposed game-theoretical human motion model is validated by conducting a qualitative comparison between generated trajectories and human ones, observed in open-source surveillance videos~\cite{lerner2007crowds,pellegrini2009you}. These surveillance videos, used to validate the proposed model, show a typical urban scenario in which multiple agents walk interacting with each other and avoiding static obstacles. Figure~\ref{Validation} illustrates the frames, randomly selected, of the surveillance videos of two different scenarios. Specifically, Figure~\ref{Validation} compares real trajectories executed by humans (Figs.~\ref{scenario_1_real} and~\ref{scenario_2_real}) with the estimated trajectories generated for all agents by the proposed model solving our game-theoretical problem (Figs.~\ref{scenario_1_model} and~\ref{scenario_2_model}). We observe that, in both the illustrated scenarios, our game-theoretical approach generates collision-free trajectories (Figs.~\ref{scenario_1_model} and~\ref{scenario_2_model}) that are smooth and resemble those executed by their human counterparts. However, we note that the trajectories generated by our algorithm exhibit a sharper reaction than humans in the vicinity of surrounding agents. This is evident while comparing Figure~\ref{scenario_1_real} and Figure~\ref{scenario_1_model}, focusing on the interaction between the green trajectory and the blue one. A comparable circumstance can be observed in Figure~\ref{scenario_2_real} and Figure~\ref{scenario_2_model}, with reference to the yellow trajectory. This phenomenon is most likely caused by the discrete action set associated with each agent. Notably, in our implementation an agent can choose one out of seven possible headings inside their own visibility zone, resulting in a resolution of $ \pm \sfrac{\pi}{6}~\mathrm{rad}$, in the attempt of minimizing the corresponding cost function. On the other hand, human subjects can select their heading over an infinite set. A further cause of discrepancy between human and game-theoretical trajectories resides in the kinematic update of the agent position in Equation~\eqref{player position} --a linear update with constant heading and velocity over the whole sampling step-- and the estimation of the human target, assumed to be constant over an interval of duration $T$ --actually an unknown, subject to the very stochastic nature of human behavior. \subparagraph*{Algorithm} The game-theoretical model of pedestrian motion described above is used to inform a robotic trajectory planner for autonomous robots moving in populated environments. \begin{algorithm}[H] \textbf{Initialization}:\\ $p_{\mathrm{robot}} \leftarrow \mathrm{InitializeRobotPosition}$\\ \Repeat{$p_{\mathrm{robot}} = p_{\mathrm{goal}}$}{ $\mathrm{GroupRecognition}$\\ $ \boldsymbol{\theta} \leftarrow \mathrm{FirstEstimation}$ \\ \ForEach{agent $i$ (robot included)}{ $C_\mathrm{obs} \leftarrow false$ [flag defining the collision with obstacles for agent $i$] \\ $C_\mathrm{agents} \leftarrow false$ [flag defining the collision between agents for agent $i$]\\ $ C_\mathrm{obs}, C_\mathrm{agents} \leftarrow \mathrm{CheckCollision}(i, \boldsymbol{\theta})$ \\ $ \boldsymbol{\theta_i} \leftarrow \mathrm{ComputeSolution}(i, \boldsymbol{\theta}, C_\mathrm{obs}, C_\mathrm{agents})$\\ } $ p_{\mathrm{robot}} \leftarrow \mathrm{UpdateRobotPosition}$ } \caption{Main algorithm} \label{Algorithm_main} \end{algorithm} \vspace{12pt} \begin{algorithm} \SetKwFunction{main}{$\mathrm{ComputeSolution}$} \SetKwProg{Fn}{}{}{} \Fn{\main{$\boldsymbol{i, \theta}, C_\mathrm{obs}, C_\mathrm{agents}$}} { \eIf{$C_\mathrm{agents}$}{ $\boldsymbol{\theta_i}^\mathrm{gt} \leftarrow \mathrm{GTPlanner}(\boldsymbol{\theta}) $ [Solution to Algorithm \ref{Algorithm_2}]\\ $\boldsymbol{\theta_i}^\mathrm{dec} \leftarrow \mathrm{Decelerate} $\\ \eIf{$\mathrm{Cost}(\boldsymbol{\theta_i}^\mathrm{gt}) \leq \mathrm{Cost}(\boldsymbol{\theta_i}^\mathrm{dec})$} { $\boldsymbol{\theta_i} \leftarrow \boldsymbol{\theta_i}^\mathrm{gt}$ } { $\boldsymbol{\theta_i} \leftarrow \boldsymbol{\theta_i}^\mathrm{dec}$ } } {\If{$C_\mathrm{obs}$}{ $\boldsymbol{\theta_i} \leftarrow \mathrm{IndividualOptimization}$ }} \Return $\boldsymbol{\theta_i}$ } \caption{The $\mathrm{ComputeSolution}$ function} \label{Model_solution_algorithm} \end{algorithm} The main steps executed by the proposed trajectory planner are described in Algorithm \ref{Algorithm_main}. First, the robot position ($p_{\mathrm{robot}}$) is initialized using the function $\mathrm{InitializeRobotPosition}$. Then, the algorithm executes an iterative procedure that stops when the robot reaches its target position ($p_{\mathrm{goal}}$). Here, we will refer to both humans and the robot with the term ``agent''. Each iteration performs five main steps: recognition of groups of humans ($\mathrm{GroupRecognition}$), first estimation of trajectories for all agents ($\mathrm{FirstEstimation}$), collision checking between agents and with obstacles ($\mathrm{CheckCollision}$), computation of the agent trajectory ($\mathrm{ComputeSolution}$), and update of the robot position using the computed trajectory ($\mathrm{UpdateRobotPosition}$). This iterative procedure predicts the agents' motion and generates the robot optimal trajectory over the fixed time horizon $T$, by applying the strategy detailed below. After such an optimal trajectory for the robot is computed, only the action corresponding to the first time step is actually applied to the robot and the process is repeated until the robot reaches its goal. In the following, each step of the Algorithm \ref{Algorithm_main} is detailed: \begin{itemize} \item $\mathrm{GroupRecognition}$. The algorithm performs the \emph{group recognition} of agents considering the observed orientation of each agent, and the distances between them. In fact, a group is typically moving maintaining a common orientation and keeping a distance between agents shorter than the vital space typical of the single agent. Upon recognition, groups are considered as \emph{unique entities} and treated as single agents in the subsequent phases. \item $\mathrm{FirstEstimation}$. A preliminary estimation of all agents' trajectories (i.e., $\theta$) is performed, projecting hypothetical rectilinear trajectories over the interval $T$. \item $\mathrm{CheckCollision}$. Given the trajectories of all agents ($\theta$), previously estimated by the $\mathrm{FirstEstimation}$, the $\mathrm{CheckCollision}$ function detects the possible occurrence of collisions between an agent $i$ with obstacles and other agents, activating the flag variables $C_\mathrm{obs}$ and $C_\mathrm{agents}$, respectively. In particular, we refer to the occurrence of a collision each time the individual vital space of an agent is violated. \item $\mathrm{ComputeSolution}$. Considering the estimated trajectories ($\theta$), and the flags $C_\mathrm{obs}$ and $C_\mathrm{obs}$, Algorithm \ref{Model_solution_algorithm} computes a solution of the motion planning problem for an agent $i$ selecting one of the possible cases: \begin{enumerate}[(i).] \item if a collision with other agents is envisaged, two alternative solutions are evaluated. Hence, the solution that involves the lowest cost of Equation \eqref{overall_cost_function} will be selected. The first solution ($\boldsymbol{\theta_i}^\mathrm{gt}$) is computed using the strategy defined in Algorithm \ref{Algorithm_2}, where trajectories are generated seeking for a Nash equilibrium solution of the game presented in the \emph{Game description} section. The second solution is computed through the $\mathrm{Decelerate}$ function, which evaluates the opportunity to \emph{decelerate} --a typical human behavioral trait in navigation-- to avoid the collision with other agents. In particular, after identifying the discrete time step $t$ at which a collision between agent $i$ and other agents is envisaged to occur, the cost associated with sixteen different deceleration patterns is evaluated using the cost function \eqref{overall_cost_function}, provided that constraints in Equations \eqref{collision avoidance constraint} and \eqref{collision avoidance obstacle} are satisfied; \item if an agent is envisaged to collide with a \emph{static} obstacle ($C_\mathrm{obs}$), the agent solves its individual optimization problem described above (without playing the game and, hence, not seeking for the Nash Equilibrium); \item if no collision between agents or static obstacles is envisaged, trajectories are kept linear, maintaining the current heading and constant velocity, practically implementing what was already computed in the $\mathrm{FirstEstimation}$ procedure. \end{enumerate} \item $\mathrm{UpdateRobotPosition}$. Considering the computed trajectory of the robot, the action corresponding to the first time step is executed and the robot position is updated using Equation~\eqref{player position}. \end{itemize} \begin{algorithm}[H] \SetAlgoLined \textbf{Initialization}:\\ \qquad $k \leftarrow 1$ [iteration index] \\ \qquad $ \boldsymbol{\theta}^k \leftarrow \boldsymbol{0} $ [straight paths for all agents as $\mathrm{FirstEstimation}$] \\ \qquad $ i \leftarrow 1 $ [agent index] \\ \textbf{Iterate until convergence}: \qquad $ \bar{p}_j^k \leftarrow $ \eqref{player position}, given $\boldsymbol{\theta}^k$, for all $j$ [present and future predicted positions of all agents]\\ \qquad $\boldsymbol{\theta_i^{k+1}} \leftarrow $ solution to Eq. (\ref{eq:optProb}--\ref{collision avoidance obstacle}), given $ \left( \bar{p}_j^k \right)_{j \neq i}$ [best response to all other agents] \\ \eIf{$i < \mathcal{N}$}{ $i \leftarrow i + 1 $ [move on to next agent] }{ $i \leftarrow 1 $, $ k \leftarrow k + 1 $ [move on to next iteration] } \caption{Nash trajectory computation} \label{Algorithm_2} \end{algorithm} \subparagraph*{Implementation} The algorithm presented above has been implemented in Matlab and the main implementation choices are discussed in what follows. The discrete time step has been set to $\Delta t = 1.2 \mbox{s}$. The time horizon for optimization has been set to $T = 4$, that is, 4.8 seconds. In the main paper, we opted to keep a unitary discrete time step, to enhance readability. As previously stated, each agent can execute actions taken from an action set $\Theta$ of finite size. Specifically, in our implementation, each agent has seven possible actions for $\theta_i(t)$, which represents the heading within the agent \emph{visibility} zone. Namely, $\theta_i(t)$ is updated as $\theta_i(t) = \theta_i(t-1) + u(t-1)$, where $u(t-1)$ takes values in the finite set $\Theta=\{ -\sfrac{\pi}{2}, -\sfrac{\pi}{3}, -\sfrac{\pi}{6}, 0, \sfrac{\pi}{6}, \sfrac{\pi}{3}, \sfrac{\pi}{2} \}~\mathrm{rad} $. We remark that we limited the cardinality of $\Theta$ to seven, pursuing a trade-off between satisfactory performance and reasonable computational complexity of the algorithm. In Equation \eqref{collision avoidance constraint}, the $\beta$ parameter is set considering the Hall convention \cite{hall1966hidden} that posits the existence of a vital space of circular shape that ensures comfort conditions for human navigation. The value of $\beta$ has been estimated through the analysis of the open-source surveillance videos~\cite{lerner2007crowds,pellegrini2009you}. In Equation \eqref{overall_cost_function}, the term ($\Phi_\mathrm{obs}(\boldsymbol{\theta_{i}})$) can be neglected if the first estimation of the agent trajectory does not intersect any static obstacle. Otherwise, $\Phi_\mathrm{obs}(\boldsymbol{\theta_{i}})$ in Equation \eqref{obst} is computed referring to the closest obstacle, toward which the agent is projected to collide. Then, the closest point of such obstacle to the agent position is computed ($p_{\text{obs}}$). To reduce the computational load, obstacles are mapped into a discrete spatial map overlapping with the 2D environment. The map consists of a rectangular matrix of 576x720 cells, which are marked as being occupied by an obstacle or free from them. Each cell covers approximately a square of 1.8x1.8 cm. The weight $\gamma(t)$ in Equations \eqref{minimise_distance} and \eqref{smoothness} is selected as a time-varying term that is used to balance the relative importance of terms $\Phi_{\mathrm{goal}}(\boldsymbol{\theta}_i)$ and $\Phi_{\mathrm{smooth}}(\boldsymbol{\theta}_i)$ over the optimization horizon $T$. This choice emerges from the analysis of the available surveillance videos, where we observed that the minimization of the distance to goal typically prevails on the smoothness requirement as long as the agent gets closer to their goal, and vice versa. Considering $T=4$ time steps, we chose the following sequence for $\gamma(t)$, starting from a generic time instant $t^*$: $\gamma(t^*)=0.6$, $\gamma(t^*+1)=0.7$, $\gamma(t^*+2)=0.8$, $\gamma(t^*+3)=1.0$. \subsection*{Questionnaire and a-priori power analysis} \subparagraph*{Questionnaire} \label{Experimental setup} The proposed methodology is validated using a variation of the Turing test \cite{saygin2000turing}, which evaluates whether the robot behavior, controlled by the game-theoretical method, is comparable to or indistinguishable from human navigation patterns. The variation of the Turing test consists of an online questionnaire composed by three main parts: a preliminary training part and two different parts to collect data. A screen for each part of the survey questionnaire is illustrated in Figure~\ref{online test}. The survey questionnaire starts with general questions about the participant, such as gender, age, and the level of professional experience in robotics field on a Likert scale~\cite{likert1932technique} from 1 (no experience) to 5 (expert). Then, a preliminary training part allows the participant to become familiar with the working environment (Figures~\ref{training1} and~\ref{training2}). Training videos show pedestrians moving in an urban environment, as shown in Figure~\ref{training1}. In particular, the training part guides the participant from a typical urban scenario of Figure~\ref{training1} to the particular scenario used in the other parts of the test illustrated in Figure~\ref{first section}. The intermediate scenario of Figure~\ref{training2} is designed to gradually guide the participant to the final set-up. In the testing scenario of Figure~\ref{first section}, agents (pedestrians and robot) have been replaced with arrows and the urban environment has been removed to prevent the participant from focusing on the scenario, rather than on the movement of agents. In the second part, the participant watches 21 videos randomly (about 15 seconds each) consisting in three different experimental conditions: 7 videos show an environment with only pedestrians; other 7 videos a scenario with pedestrians and a robot controlled with an algorithm at the state-of-the-art (the Enhanced Vector Field Histogram~\cite{ulrich1998vfh+}); and the remaining 7 videos show a scenario with pedestrians and a robot controlled with the proposed algorithm. In all experimental conditions, robot trajectories are re-planned with a frequency of 2 Hz. At the end of each video, the participant affirms if they have noticed an arrow that moved with a \emph{weird} motion. In the affirmative case, they point that arrow, as shown in Figure~\ref{first section}. In the last part of the survey questionnaire, the participant watches the same 21 videos of the previous part but in a different random order. Unlike the previous part, an arrow is circled in red, as shown in Figure~\ref{second section}. Then, the participant have to determine if the selected arrow is a real pedestrian and, then, they evaluate the \textit{naturalness} of the arrow motion on a Likert scale~\cite{likert1932technique} defined in a range from 1 (completely unnatural) to 5 (completely natural). All videos used in the survey questionnaire are generated from an open-access dataset~\cite{lerner2007crowds}. The test takes about 20 minutes to be completed properly. The test has three \emph{rules}: \textit{(i)} the participant cannot pause the video; \textit{(ii)} the participant can watch videos only once; \textit{(iii)} the participant should complete the test without interruptions or distractions. \subparagraph*{A-priori power analysis} Preliminary, we conducted an a-priori power analysis to estimate the number of participants required to provide acceptable and significant statistical results \cite{prajapati2010sample}. To this aim, we used the free software G*Power \cite{erdfelder1996gpower}. First, we identified our case analysis as a non-parametric study, since non-parametric statistical tests make no constraints and prerequisites on the data distributions \cite{corder2014nonparametric}. Then, we assumed that the data collected after the a-priori study would be analyzed via the non-parametric Kruskal-Wallis test because our \emph{independent} variables have more than two independent groups (HO, GT, and VFH) and our \emph{dependent} variables (the rating of the weirdness motion, human-likeness, and naturalness of movement) are ordinal. Based on \cite{prajapati2010sample}, we computed the total sample size considering the ANOVA test \cite{roberts2014student}, i.e., the parametric-equivalent test of the Kruskal--Wallis one and then multiplied the result by the corrective factor ARE, obtaining the equivalent sample size of the non-parametric Kruskal--Wallis test. The result of the a-priori analysis for our non-parametric test is about $152$ volunteers, considering an alpha level equal to $5\%$, power of the study $80\%$ and the three number of groups, corresponding to the three different experimental conditions. We recruited the participants using the Institutional mail of Politecnico di Torino and then we distributed an online questionnaire to students and university staff. Ultimately, we collected $691$ responses, exceeding the sample size of 152. \section*{Results} \subsection*{A game-theoretical framework for social acceptability of robotic trajectories} Figure~\ref{Fig:Intro_Image} schematizes the proposed procedure for the realization and validation of our game-theoretical framework for the social acceptability of robotic trajectories. The methodology can be subdivided in four main logical phases, corresponding to panels in the figure. First, a game-theoretical model of pedestrian motion is devised an its parameters are tuned on the basis of the analysis of human motion videos (panel (a)). Second, a robotic trajectory planner informed by the game-theoretical pedestrian model is realized. The robot is deployed and operated in a virtual humanly populated environment, where humans execute real trajectories extracted from videos. In this phase, three important performance metrics in robotic trajectory planning are evaluated and compared with the state-of-the-art VFH algorithm (panel (b)). Third, the virtual environments containing humans and the robot are processed and prepared to be administered for the validation questionnaire (panel (c)). Finally, the questionnaire is administered and the results are collected and analyzed (panel (d)). \subsection*{Analysis of performance metrics} We performed a preliminary assessment of the trajectories generated in three experimental conditions, which differ for the algorithm governing the motion of a selected agent (i.e., either a robot or a human being): in the condition \emph{humans only} (HO), all the agents were human beings moving in a real environment; in the condition \emph{humans and GT} (GT), one of the agents was controlled by our game-theoretical algorithm, while the other agents were human beings; and in the condition \emph{humans and VFH} (VFH), one of the agents was controlled by the VFH algorithm \cite{ulrich1998vfh+}, and the other were human beings. Each experimental condition comprises seven different experiments, differing for the quantity of human subjects and their motion patterns. The virtualized environment is constructed by processing movies collected from surveillance cameras of populated environments ~\cite{lerner2007crowds}, obtaining a 2D arena where virtual agents reproduce the human motion captured in the video. In the HO condition, the performance metrics are evaluated in the original arena, with reference to a randomly selected human being. In the GT and VFH conditions, a virtual agent is introduced in the arena and commanded to navigate through the existing virtual agents (corresponding to human beings) using the given trajectory planner. Three widely adopted metrics, deemed as important for socially navigating robots, were evaluated across the three experimental conditions: the Path Length Ratio (PLR), the Path Regularity (PR), and the Closest Pedestrian Distance (CPD)~\cite{biswas2021socnavbench}. The PLR is defined as the ratio between the length of the line-of-sight path between the initial and final point of a path and the actual path length between the same two points~\cite{biswas2021socnavbench}. A higher path length ratio is usually preferred, since it indicates that an agent minimizes the length of the path to reach its goal. We computed the PLR for each experiment and we illustrate its average values across the three experimental conditions in Figure~\ref{Grafico_parameters}a. While the results in Figure~\ref{Grafico_parameters}a suggest that the HO scenario was characterized by the highest average PLR, followed by GT and VFH. The PR quantifies to what extent a path is similar to a straight line~\cite{biswas2021socnavbench}. Following normalization, $PR=1$ corresponds to a straight path from start to goal. Values of PR closer to one are preferable, since they are indicative of a smoother motion, without excessive changes of direction. In Figure~\ref{Grafico_parameters}b, the average PR for each experimental condition is illustrated, where the highest average value pertains to HO, followed by GT and VFH. These results appear in line with the tenet that humans tend to minimize their energy, thus avoiding sudden changes of orientation, and with the design principle of the VFH algorithm, which avoids obstacles only when the agent is close to them~\cite{ulrich1998vfh+}, entailing swift changes of orientation to get away from them. The CPD is defined as the distance from the closest pedestrian, normalized with respect to the maximum length measurable during experiments, which is the diagonal of the experimental arena. Also for this parameter, the attainment of values closer to one is desirable, as this implies a good tendency in staying clear from humans when following planned trajectories. Average values of CPD in the three experimental conditions are illustrated in Figure~\ref{Grafico_parameters}c, where the highest average value is related to GT, followed by HO and VFH. The reason for the latter is presumably due to the purely reactive design of the VFH algorithm. We posit that the intermediate ranking of HO with respect to CPD is due to the ability of humans to evaluate situations on a case-by-case basis. While the rankings described above are suggestive of superior performance metrics attained by GT over VFH, the verification of the statistical significance of these comparisons is in order. To this aim, Kruskal-Wallis analysis~\cite{ostertagova2014methodology} was executed across the three metrics, revealing the non-achievement of significant statistical distinguishability ($p=0.286$ for PLR, $p=0.400$ for PR, $p=0.834$ for CPD). The reason behind such observations is strictly related to the consideration of only seven experiments for each experimental condition, with differential degree of variability, and thus characterized by a limited statistical power. In line with these considerations, we conducted a systematic analysis of the inter-experiment variability within each experimental condition (Levene's test ~\cite{gastwirth2009impact}). Essentially, we evaluated the extent to which each algorithm generated experiments that were similar to one another. Such an analysis revealed that there exists a significant differential variability with respect to PR ($F_{2,18}=3.75$, $p = 0.043$). Thus, we performed a post-hoc analysis that revealed much more variability in the VFH videos compared to HO and GT (VFH vs. HO: $p = 0.038$; VFH vs. GT: $p = 0.040$; HO vs. GT: $p = 0.97$); similarly, albeit not statistically significant, we observed a trend toward increased variability in VFH experiments with respect to PLR ($F_{2,18}=3.22$, $p = 0.064$). Finally, the inter-experiment variability within each experimental condition was indistinguishable concerning CPD ($F_{2,18}=2.31$, $p = 0.130$). These results indicate that, albeit indistinguishable in absolute values, the reproducibility and predictability of each experimental condition in terms of PR and PLR was much higher in HO and GT than in VFH scenario. \subsection*{Survey questionnaire} We collected 691 responses to the survey questionnaire, where participants were in majority men in their thirties with very little experience on robotics (Table~\ref{First part data}). \captionsetup[table]{labelfont={bf}} \begin{table} \begin{center} \begin{tabular}{ |cc| } \hline Number of participants & 691 \\ \hline Gender & 58\% male and 42\% female \\ \hline Age & 29.44$\pm$11.30 \\ \hline Experience with robotics & 1.5$\pm$0.86\\ \hline \end{tabular} \end{center} \caption{Demographic characteristics and experience with robotics on a scale from 1 (minimum experience) to 5 (maximum experience) collected during the first part of the test.} \label{First part data} \end{table} A power analysis~\cite{prajapati2010sample} indicated that the adequate statistical power was guaranteed with 152 participants. Since the number of participants largely exceeded the required sample size, we opted for a bootstrapping approach~\cite{efron1994introduction}, in which we randomly sampled 152 observations from the complete pool of responses and iterated this process 100 times. Adopting this procedure, we kept the sample size to the appropriate number (thus reducing the odds to obtain biologically irrelevant findings~\cite{johnson1999insignificance}) and increased the generalizability of our findings by testing their robustness against repeated observations. The test was composed by three parts: (i) in the first part, the participant underwent a training phase to become familiar with the working environment (see Figure~\ref{online test}a-b); (ii) in the second part, the participant watched $21$ videos reproducing the experiments in the three experimental conditions, where both the background and the agents are concealed --blue arrows over a gray background-- (Figure~\ref{online test}c illustrates a single experiment); (iii) in the third and final part, the participant watched the same $21$ videos, where they were asked in addition to focus on a circled arrow (Figure~\ref{online test}d illustrates a single experiment). The participant is unaware that the circled arrow targeted a random human agent in the HO condition and the robotic agent in GT and VFH conditions. The execution of each part entails answering specific questions. In the first part of the survey questionnaire, the participants were required to provide their gender, age, and level of experience in robotics. To assess the level of social acceptance of our game-theoretical trajectories, in the second part (following habituation), we asked the participants to say if they perceived "weirdness" in the motion observed in the videos, and then to indicate which is the perceived "weird" arrow, if any. In the third part, participants were requested to determine whether the circled arrow is a human or not. Then, participants were asked to rate the naturalness of the motion of the circled arrow on a Likert scale. Experimental outcomes were analyzed with the Kruskal-Wallis test, as the independent variables belong to more than two independent groups (HO, GT, and VFH) and the dependent variables (the rating of the weirdness of motion, the classification as a human or not, and the naturalness of movement) are ordinal~\cite{corder2014nonparametric}. Our null hypothesis, $H_{0}$, posits that all experimental conditions (HO, VFH, GT) are perceived as indistinguishable. To statistically reject the $H_{0}$ hypothesis and understand if there exist differences among experimental conditions, we computed the \emph{p} value considering $5$\% of significance level~\cite{kraska2013nonparametric}. Then, we performed a Bonferroni post-hoc analysis~\cite{foster2018introduction} to determine which groups are classified as significantly different from each other. This procedure was adopted for all the questions in the second and third part of the survey questionnaire, except for the second question of the second part, where participants were asked to indicate the perceived ``weird'' arrow, if any. In this case, the answers expressed relative the HO scenario were discarded, since all arrows corresponded to human beings and an indication of weirdness would not make sense to our research question. As a consequence, only to experimental conditions had to be compared (GT and VFH) and, to this aim, we used the Mann--Whitney test~\cite{mann1947test,fay2010wilcoxon}. In the analysis of the results of the second part, in accordance with our expectations, the VFH condition was characterized by the highest level of weirdness compared to HO and GT conditions, which were in turn indistinguishable from one another (Kruskal--Wallis test for all bootstrapping iterations: $p<10^{-17}$; post-hoc analysis: for HO-VFH $p<10^{-10}$ for all bootstrapping iterations, for GT-VFH $p<10^{-14}$ for all bootstrapping iterations, for GT-HO $p>0.05$ for 88 bootstrapping iterations out of 100, but the remaining has $p>0.01$). Figure~\ref{Grafico_totale}a illustrates the mean rank (in light of the bootstrapping procedure) in ``weirdness'' of motion (WM) along with its comparison interval~\cite{Hochberg1987}, in order to better highlight the significant difference among the three groups. Notably, GT and HO are indistinguishable from one another, while VFH is significantly different from GT and HO. Specifically, while VFH was considered ``weird'' in the majority of instances ($61$\%), GT was considered ``weird'' much less often than HO videos ($33$\% and $37$\%, respectively) (See Figure~\ref{part_2_analysis}). We then asked the participants who detected weirdness in the videos to indicate which of the arrows exhibited such weirdness. We posit that more weirdness should be perceived in agents driven by algorithms than in agents associated with human beings. Our experiments indicated that the agent judged as weird was actually associated to a robot only in $16$\% of GT, while this proportion drastically increased to $47$\% of VFH (see Figure~\ref{part_2_analysis} patterned bars). This finding, combined with the Mann-Whitney test ($p<10^{-20}$ considering the whole bootstrapping analysis), supports the view that the trajectories generated by GT are perceived as much more natural than those generated by VFH. Additionally, it suggests that the motion of the robot controlled by GT is perceived as more human-like than the one generated by VFH. --In the third part, we further delved into the subjective rating of the three motion patterns by asking participants to focus on the motion of a circled target agent and evaluate whether such motion corresponds to a human or not (human likeness), along with its degree of naturalness on a Likert scale from one (minimum naturalness) to five (maximum naturalness). When focusing on the qualitative measurements of the human likeness, we observed that VFH-related arrows were considered much less human-like ($41.11$\%) than both GT ($64.59$\%) and HO ($80.31$\%). Thus, as illustrated in Figure~\ref{Grafico_totale}b, VFH is judged as the least human-like ($p<10^{-22}$ with Kruskal-Wallis bootstrapping; $p<10^{-5}$ VFH-GT, $p<10^{-30}$ VFH-HO post-hoc analysis), which is consistent with the previous part of the test, where VFH is perceived as generating the "weirdest" motion. Additionally, GT-related arrows were considered significantly less human-like compared to HO ($p<10^{-22}$ with Kruskal-Wallis bootstrapping; $p<10^{-4}$ post-hoc analysis GT-HO). We note that, in this case, the comparison intervals are not overlapping, thus denoting a clear distinguishability across experimental conditions. Figure~\ref{part_3_nat} illustrates the results related to the naturalness of the circled arrow. The figure shows the result about the average naturalness of motion of the circled arrow on a Likert scale from $1$ (minimum naturalness) to $5$ (maximum naturalness), computed over the 100 iterations of the bootstrapping procedure. According to our expectations, HO exhibits the highest mean degree of naturalness ($4$), closely followed by GT ($3.5$), whereas a larger gap separates VFH ($2.6$). Importantly, a value of 3 in the adopted Likert scale was associated with the response "natural," while values below this threshold were associated with a "non-natural" motion pattern. In light of this labeling, although HO was perceived as more ``natural'' than all other experimental conditions, it is noteworthy that GT was also classified as ``natural''. \section*{Discussion and conclusions} The main goal of our study was to design a navigation system for autonomous robots moving through populated environments, characterized by a high degree of acceptability by humans. Specifically, in light of the increasing use of autonomous robots in real life, we tested whether a navigation system designed through the principles of game theory would generate indistinguishable trajectories from those walked by human beings. To this aim, we first leveraged game theory to develop a model capable of predicting the intention of motion of humans in populated environments, and then, based on this model, we devised a trajectory planning algorithm for a mobile robot. Finally, to assess the social acceptance of the generated robotic trajectories, we conducted a survey questionnaire on a statistically robust group of volunteers using a variation of the Turing test. For greater completeness and toward even more robust outcomes, before analyzing the results collected from the test, we also analyzed the geometrical features of the robotic trajectories, generated in the three experimental conditions (HO, GT, and VFH), selecting three metric parameters from the state of the art (PLR, PR, CPD). The ranking obtained through this analysis (HO, GT, VFH) is consistent with the results obtained through the Turing test, except for the closest pedestrian distance (CPD), in which the trajectories generated by our planner (GT) exhibit higher values of the parameter than those measured in environments populated by humans only (HO). We hypothesize that this exception is due to the fact that our model guarantees by design a minimum safe distance to pedestrians to prevent collisions and to ensure, in any case, a comfortable action space. On the other hand, humans on the walk are more flexible in this respect, and evaluate circumstances on a case basis. While the outcome of the Turing test is consistent with the analysis of the metric parameters of the trajectories, the statistical analysis (Kruskal-Wallis) executed on the latter shows that this finding is not statistically significant. To explain this non-statistically significant result, we point out that the statistical analysis was conducted on only seven experiments per group, with differential degree of variability, and thus characterized by a limited statistical power. Moreover, in line with these considerations, we conducted a systematic analysis (Levene test) to evaluate the degree of variability of the different scenarios. In other terms, we evaluated the extent to which each algorithm generated videos that were similar to one another. This analysis revealed that there exists a significant variability with respect to the path regularity (PR), whereby the videos with the robot controlled by the VFH are the most variable, compared to the HO and GT experimental conditions. This finding suggests that the VFH algorithm is less predictable (i.e., it provides less regular results) than our algorithm and a real pedestrian. The variant of the Turing test comprises a first part that functions as a training phase. The second part comprises two consecutive phases. The first phase is devoted to compare the social acceptability of trajectories generated by either our game-theoretical algorithm (GT) or a state of the art algorithm (VFH) against a reference experimental condition, a complex social environment populated by humans only (HO). To this aim, participants were asked to say if they perceived weirdness in HO, GT, or VFH experimental conditions. The statistical test confirms that the perceived weirdness in trajectories in which only human subjects are involved is statistically indistinguishable from trajectories where the GT-controlled robot and human subjects coexist. Conversely, the videos in which the trajectories are generated by the VFH algorithm are perceived with a remarkably higher degree of overall weirdness compared with either HO or GT scenarios. In the second phase of the second part of the test, participants were asked to indicate which is the perceived "weird" arrow, if any. In this regard, we observed that the trajectories generated by the VFH algorithm were more frequently recognized as "weird" than those generated by our GT algorithm. In the third part of the test, participants were requested to focus on a circled arrow (a human in the HO experimental condition, a robot in GT and VFH ones), and were asked to evaluate whether or not the motion of the circled arrow corresponded to human recordings, and then rate their degree of naturalness. We observed that, while the arrow in VFH scenario was perceived as not human-like, the arrow controlled through GT was considered human-like, albeit not as human-like as the one rated in the HO experimental condition. We believe that this result is related to the fact that, in this part of the test, participants were asked to focus on one arrow only, thus being biased toward detecting an artificial behavior. The same ranking between the three experimental conditions (HO, GT, and VFH) resulted from the analysis of the naturalness of motion of the circled arrow. Indeed, HO has the highest degree of naturalness, closely followed by our GT trajectory planner, and then by the VFH planner. We can conclude that, if participants are not guided to focus on a particular arrow, they would not distinguish much difference between a real human and a robot controlled through our game-theoretical framework and, therefore, the generated trajectory is a good candidate for social acceptance. This implies that our trajectory planning algorithm would help programming robots to blend well in populated environments, and, hence, to be perceived as more friendly, collaborative, and non-hostile. Our findings are consistent with other studies in the literature, such as \cite{turnwald2019human}, where a different game-theoretical planner is perceived almost as human-like as human recordings. However, in \cite{turnwald2019human}, the authors created a human-like motion planner for mobile robots, still maintaining a simplified framework that does not comprise, for example, human groups, obstacle avoidance performed by humans, and the human desire of keeping a safe vital space around them \cite{hall1966hidden}. Moreover, their tests only comprise simplified scenarios: a first test with either only humans, or only robots; a second test in which the participant, based on virtual reality, interacts with an agent who can move as a human or a robot. In our study, we went one step further in modeling (including the vital space, the group recognition, and the human-obstacle interaction) but also in the design of the variation of the Turing test (considering a real case scenario in which a robot moves in a human populated environment). Nevertheless, it is hard to make extensive comparisons with other approaches, as the literature on variants of the Turing tests for assessing social acceptability of a robot agent is scant. Notably, the literature reports three main methods to evaluate the human-likeness and the social acceptance of robot navigation: (i) definition of social rules or performance metrics and, then, assessment of the adherence of the robot motion planner to these principles \cite{kirby2009companion,muller2008socially,pradeep2016human}; (ii) comparison between simulated trajectories and observed pedestrian behavior \cite{tamura2012development}; (iii) questionnaire based on a variation of the Turing test \cite{kretzschmar2016socially,turnwald2019human}. The main limitation of the first two methods is that they do not consider how humans perceive the robot. However, these methods can be applied to evaluate, as a preliminary test, some features of the generated trajectories. Indeed, our analysis of the metric parameters of the generated trajectories falls within the first methodology, whereas the second methodology has been used as a validation criterion for our game-theoretical model of pedestrian motion. Hence, toward our aims, we deemed the Turing as an effective means to study the human-likeness and the social acceptability of the generated trajectories. Unlike the Kretzschmar's \cite{kretzschmar2016socially} and Turnwald's \cite{turnwald2019human} tests, where volunteers watched videos in which the totality of agents moved either in an artificial way or as real pedestrians, our questionnaire changes completely such a perspective. In fact, our test videos reproduce a true use case scenario of the algorithm (an environment populated by people with a single robot moving within), where the real nature of agents is masked and made uniform to eliminate any participants' bias. Moreover, unlike Kretzschmar's test \cite{kretzschmar2016socially}, where the Turing test is executed only on 10 participants, we performed an a priori power analysis to infer the correct sample size to obtain statistically significant results. Due to the largely superior size of collected data than the outcome of the power analysis, we carried out a 100-iteration bootstrap, always getting consistent results across iterations, highlighting the robustness of our results and further corroborating our hypothesis.\\ When interpreting the results of our study, we should also acknowledge the limitations of the \textbf{model} and of the \textbf {test} design. Regarding the former, our \textbf {model} does not take into account the uncertainties that arise from the interaction with the external world. Importantly, the stochasticity of human behavior is not explicitly modeled, although this is implicitly accounted for through tuning model parameters identified from real trajectory data, extracted from surveillance cameras. A range of simplifying assumptions were in order to handle the computational complexity of the algorithm. The main one resides in the discrete nature of our model, whereby each agent can choose between a fixed number of motion directions --an indispensable trade-off between predictive accuracy and computational effort. Moreover, the designed human motion model has been devised to operate with a limited number of pedestrians: its computational complexity may be difficult to manage if the number of agents increases to more than a dozen. The pedestrian model used in this study only considers people's goal-directed and collision-avoiding behaviors, while ignoring other social activities that humans may perform in a pedestrian urban scenario, such as waiting for a bus or wandering without a clear direction. Thus, any pedestrian behavior that is not contemplated by our model breaks the assumptions under which our system works. In addition, our method does not allow customization of trajectories. For example, the prediction of a trajectory walked by an elderly person may be coincident with that of a child. The main limitation of the \textbf{test} design is the choice of the navigation algorithm chosen for comparison (VFH). Ideally, more than an algorithm should have been selected in order to mitigate algorithm-induced biases. However, since the execution of the Turing test already took about 20 minutes to the average participant, we prefer to limit our comparison to only one algorithm at the state of the art, in order to avoid increasing the time of the experiment for each participant, mitigate attention biases and, in the end, achieve robust results. Our work can be extend along several directions. To manage and predict the motion of big crowds, mean-field games could be adopted \cite{dogbe2010modeling}. We remark, however, that crowded and populated scenarios are different under many aspects, and the deployment of a robot in the two scenarios would cover totally different application fields. The lack of customization in the inference of trajectories by our model can be mitigated by combining our approach with learning strategies as in \cite{ma2017forecasting}, encompassing variegate behaviors across the experimental scenario. In fact, adding variability to the pedestrian model might allow for a more accurate prediction of human motion pattern and should allow the robot to better adapt to the needs of the human with whom it is interacting. For example, if a robot recognizes a person who has difficulties in walking, the robot should be able to predict their movement and possibly reduce its speed. Moreover, it would be interesting to understand and assess the quality of our generated trajectories considering not only social acceptability but also the comfort \cite{shiomi2014towards} feeling of participants, for instance by creating a real shared real environment with humans and a robot. \section*{Acknowledgments} A.R. and G.G are partially supported by Compagnia di San Paolo. S. G. is partially supported by the ERC under project COSMOS (802348). The experimental protocol regulating the administration of the Turing test to human subjects, the evaluation of the results, and the data management plan was approved by the ethical committee of the \emph{Istituto Superiore di Sanit\`a} (Italian Institute of Health) with approval code \emph{AOO-ISS 10/07/2020 - 0024079, Class: PRE BIO CE 01.00.} Each participant also provided informed consent, after the explanation of the nature and possible consequences of the study.\\ The code used for the generation of the trajectories and the anonymized data collected during the experiments are available through the following link: https://gitlab.com/PoliToComplexSystemLab/game-theoretic-trajectory-planning.git \section*{Attributions} Conceptualization SG SM AR Data Curation GG SP Formal Analysis GG SP Funding Acquisition AR Investigation All Methodology SG SM AR Project Administration AR Resources SG SM AR Software GG SP Supervision SG SM AR Validation GG SM Visualization GG SP Writing original draft GG AR Writing review and editing All \bibliographystyle{Science}
{ "redpajama_set_name": "RedPajamaArXiv" }
2,036
Career · celebrity · Fashion · Apr 12, 2017 What It means To Have Edward Enninful at British Vogue The news broke that Alexandra Shulman will be stepping down as Editor-in-chief of British Vogue, information pills followed swiftly with the announcement of her successor. To much surprise and breaking from tradition in many ways, look Edward Enninful OBE was named as the future Editor-in-chief to take reigns of the magazine. In her 25 years as the British Vogue editor, drug Shulman has been instrumental in ensuring the magazine was more than a monthly handbook of style and beauty. Seeing through shifts in British Fashion through the growth of iconic designers, political and economic storms. Her tenure at British Vogue boasts an impressive set of work including the iconic "Millennium Issue", as well as launching the Vogue festival, and marking last year's 100th British Vogue issue featuring the Duchess of Cambridge. Its undeniable that her legacy cannot be tainted. But what does it mean now that British Vogue has appointed its first male Editor-in-chief in their 100-year history? It is without doubt that Vogue is the pinnacle of fashion and the role of Editor-in-chief is its' matriarch. Therefore, is a male editor an expected inevitability after 100 years or is this a bold appointment against tradition. Every Editor-in-Chief has played an important role, not just at British Vogue, but within the whole of the British fashion industry, who will no doubt await with anticipation. Being the first male, after 100 years of female editors, can we expect Enninful's approach to this important role to be somewhat different? Enninful may not be a familiar name to some but, nevertheless, he brings a spectrum of experience from within the fashion industry. Starting out his fashion career aged 16, he became a London-based stylist where he worked for Nick Knight. Since then his career took off- becoming the youngest Fashion Editor in the cutting-edge, fashion magazine i-D, to most recently acting as style editor for W Magazine. If that didn't impress you, Enninful has also played a leading role in Vogue Italia's landmark "Black Issue", where it was so successful that they had to print an extra 40,000 copies. Enninful's extensive résumé makes him more than qualified to be British Vogue's new Editor-in-Chief. Mandatory Credit: Photo by REX/Shutterstock (6897943w) Edward Enninful after receiving his Officer of the Order of the British Empire (OBE) and Naomi Campbell Investitures at Buckingham Palace, London, UK – 27 Oct 2016 As well as being the first male, Enninful also marks the first black person to edit the magazine. He has always celebrated his Ghanaian heritage throughout his career. This spirit has transcended to those he's worked alongside, encouraging others within the industry, including models, actresses, designers and other creatives, to be proud of their background. In 2016 his work, encouraging diversity within fashion communities, was recognised when he received an OBE for services to diversity in the fashion industry. Conde Nast chairman, Johnathan Newhouse called Enninful, "an influential figure in the communities of fashion, Hollywood and music which shape the cultural zeitgeist". His highly regarded status within the industry may have been the reason he was picked for the role, but, unlike other Vogue Editors, he continues to actively stay in touch with future generations of talent. It goes without saying that Enninful is an emblem of progress and diversity. The British public will look to him for a touch of positivity and continue to pick up an issue of British Vogue to escape the sirens of the noisy outside world. Written by Angelee Kholia The post What It means To Have Edward Enninful at British Vogue appeared first on LAPP..
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
304
Let's Kick Some Ass There's nothing like a Brit in a snit. From the Guardian: At the heart of the accusation is the fundamental tension between journalists - largely Arab reporters catering for an Arab audience - who say they are anxious to cover the story from both sides, and a United States that regards reporting on some aspects of the insurgency as tantamount to collaboration with terrorism. None of which would matter much were into not for the ferocious tenacity and professionalism of Al Jazeera, factors which have made the station an international phenomenon. Most gallingly for the US, its reporters have told a story that Washington either disagrees with or would rather remain untold: that the kind of war America is prosecuting in Iraq is messy and heavy handed; that civilians are too often the victims, and that the insurgents are not shadowy sinister figures but ordinary men with more support than politicians would like to acknowledge. The world has had enough of this cowboy shit. It never crossed my mind that another country might take the lead in saying "enough already" and exposing the lies that got us into this mess (oh that ought to really chap O'Reilly's ass, eh?), but it looks like that may indeed be where we are headed. Bully for them. (photo by David Woo, thanks to Valley Girl) posted by Jane Hamsher @ 11/26/2005 11:40:00 PM You Just Keep Thinkin', George, 'Cos That's What You're Good At Oh you can just see the big brains at work here. The Sunday Times is reporting that the day before Dubya "jokingly" told Tony Blair they ought to bomb the daylights out of Al-Jazeera in Qatar, Rummy was fuming about them in a Pentagon briefing. Sounds like one of those coordinated, full-frontal assault things that a kick-ass, take-no-prisoners guy like Dubya would go for: The Coalition Provisional Authority in Baghdad detailed 34 instances of alleged hype and distortion by the television station from April 8-13, ranging from reports of a helicopter and fighter plane being shot down to stories about American soldiers killing and mutilating Iraqi citizens. The CPA? Was this before, during or after they lost $8 billion dollars in cash? Just curious. In 2001, after the September 11 attacks, the Pentagon awarded the Rendon Group, a public affairs firm, a $16.7m contract to monitor media in the Islamic world. It was assigned to track "the location and use of Al-Jazeera news bureaux, reporters and stringers", and was asked to "identify the biases of specific journalists and potentially obtain an understanding of their allegiances". Now if only they'd just done what they were told and run with that Jessica Lynch thing and shut up about the torture, which we don't do but don't ask us to stop, all of this could've been avoided. Frank Gaffney, head of the Center for Security Policy, a Washington-based think tank, last week described Al-Jazeera as fair game on the grounds that it promoted beheadings and suicide bombings. Was this one of those nights where everyone at the Times got loaded and tried to see how many assholes they could shove into one story? O'Reilly better get cracking on that War on Christmas shit again, 'cos this one's a comer. (thanks to reader db for the tip, and graphics love to Monk of course) Update: According to UPI, Blair may have overplayed his hand by invoking the Official Secrets Act: Leading opposition figures from the Conservative, Liberal-Democratic, Scottish National and Plaid Cymru (Welsh) parties have banded together to back the cross-party motion titled "Conduct of Government policy in relation to the war against Iraq" to demand that the case for an inquiry be debated in the House of Commons. They seem assured of the 200 signatures required to get such a debate. Given how heavily Fearless Leader claims we leaned on British Intelligence to justify our own little incursion, this could get very interesting. Thanks, Murtha. Joe Will Take It From Here. When John Murtha went up like a trial balloon last week, Joltin' Joe Biden "wasn't there yet," 'cos God forbid someone says Joe Can't Do War. Then Jean Schmidt went over like Schiavo, Joe stuck his finger in the air and felt the wind shifting, and just in time for the Sunday morning chat shows he writes in the WaPo that he wants a timetable for withdrawal. Can someone count the number of times Colonial Joe talks about preserving "our interests" in Iraq? Hell, what's a few more dead bodies in the wake of the juggernaut that is Biden 2008. Hey Joe? When people say they don't think Democrats stand for anything, they're talking about you. Image Rehabilitation 101 "It was a little cocker spaniel dog in a crate that he'd sent all the way from Texas. Black and white spotted. And our little girl-Tricia, the 6-year old-named it Checkers. And you know, the kids, like all kids, love the dog and I just want to say this right now, that regardless of what they say about it, we're gonna keep it." Do they really want to go there? Andrea Mitchell Watch -- Day 1 I realize there is some unwritten law that says no woman newscaster is allowed to appear on any of the NBC channels and question the network's hypocrisy relative to TraitorGate, but Andrea Mitchell's continued presence thereon is particularly scalding. I live for those special moments when she, Tim Russert and Chris Matthews get together and "question" each other like they're all not up to their eyeballs in it. The stuff of Murrow, that. Tom Maguire busts her on this little gem from the Tim Russert show on October 29, 2005: MITCHELL: You know, I should have spoke--'cause there's been a lot blogged about all of this--I was called by the CIA because it was erroneously reported in The Washington Post that I was the recipient of the leak before Novak's column came out, and I had not been. So I was never questioned because I simply told the FBI--and, you know, NBC put out a statement that night--that I had not been a recipient of the leak; in fact, I had learned about it from Novak's column like everyone else. Then after the fact, a lot of us had gotten calls and conversations with people, you know, `Hey, how about the Novak column?' But that was after the fact. That particular sentence is really a marvel -- she maintains she was never "questioned" even as she's saying she talked to the FBI. Did a few agents just swing by to drop off a casserole and she started blathering so fast they couldn't get a word in edgewise? (Fitzgerald did, in fact, get a subpoena for her.) (Note: as emptywheel points out in the comments, more correctly Fitzgerald's subpoena was for information from the White House regarding contact with her.) Oh those nasty bloggers and their mean questions. We should all just go back to posting vacation snaps and leave reportering to the professionals. NB: If anyone's got a copy of Andrea Mitchell's Oct. 28, 2005 appearance on MSNBC with Chris Matthews the morning of the Libby indictments (before Fitzgerald's press conference) please email me, I need a copy. Better Stock Up Now... In case you were asking yourself, "Is it possible that the Scanlon plea will lead to more activity in the Abramoff investigation?", the answer is yes, indeedy. Prosecutors have already told one lawmaker, Rep. Robert W. Ney (R-Ohio), and his former chief of staff that they are preparing a possible bribery case against them, according to two sources knowledgeable about the matter who spoke on the condition of anonymity. The 35 to 40 investigators and prosecutors on the Abramoff case are focused on at least half a dozen members of Congress....The investigators are looking at payments made by Abramoff and his colleagues to the wives of some lawmakers and at actions taken by senior Capitol Hill aides, some of whom went to work for Abramoff.... Prosecutions and plea deals have become more likely, the lawyers said, now that Abramoff's former partner -- public relations executive Michael Scanlon -- has agreed to plead guilty to conspiracy and to testify about gifts that he and his K Street colleagues showered on lawmakers, allegedly in exchange for official favors. Clearly, Santa has decided that I have been a very good girl this year. Thw WaPo is reporting that Tom Delay, Conrad Burns, and John Doolittle, among many others, are under increased scrutiny, along with members of the Administration. As I explained here and here, Scanlon's cooperation is likely to be the straw that breaks the cooperation camel's back for a lot of lawmakers. Scanlon flipping opens the door wide to the possibility of a lot of felony prosecutions, by providing the investigators a direct link and testimony on pre-legislation deals, bribes and other improper conduct. Another Delay indictment in my stocking would be lovely, but throwing in so many more members of Congress and the Administration, all wrapped up in a tidy little bow? I'm giddy. According to the article, Ney, his former chief of staff Neil Volz, Jack Abramoff, and another Abramoff partner, Adam Kidan, have all been notified that they are targets in the matter. And there are more than 40 members of the investigative/prosecutorial team still working the case. I'm stocking up on popcorn. And did I mention that Fitz is still working, too? Ooooh, I have been a good girl this year. Kevin Drum has more from the WSJ. And Josh Marshall has this tidbit on the SunCruz murder investigation -- one of the hit guys is implicating Kidan. posted by ReddHedd @ 11/26/2005 09:30:00 AM Your Safety "Life Coach?" In what may be the single most pitiful article on Michael Brown that I have ever read, the NYTimes is reporting that Heckuva Job Brownie will be starting his own safety consulting business. He'll be teaching clients how to not be like him. ''Hurricane Katrina showed how bad disasters can be, and there's an incredible need for individuals and businesses to understand how important preparedness is,'' he said. Erm...okay. Now there's a selling point if I ever heard one. Brownie says he's lined up some clients, presumably people who think he can get them access to folks on Joe Albaugh's contracting coattails. And he's gotten some very postive feedback. ''I'm doing a lot of good work with some great clients,'' Brown said. ''My wife, children and my grandchild still love me. My parents are still proud of me.'' Uhhhh...yeah. I'm sure the folks in the Lower Ninth Ward in New Orleans and across the rest of the Gulf Coast find that reassuring. Ahem. Al-Jazeera: Chok Full O' Terrorists? I'm constantly amazed at people like Daniel Johnson who occasionally manage to drag themselves out of the primordial slime, discover anew their opposable thumbs and tap away at the keyboard: That shutting down Al Jazeera would be desirable from the Anglo-American point of view is obviously true. And if Qatar, a Gulf state that is nominally an ally of America (on which it relies for its independence), has allowed its capital to become Al Qaeda's principal propaganda base, it has no right to expect America automatically to refrain from punitive action on its territory. First off, Al-Jazeera isn't some dodgy cable access show you can only pick up with enough tinfoil and the right atmospheric conditions, it has a viewership of some 45 million people. They are the network who just signed Sir David Frost for their English language news channel set to launch next spring. They are also the network who recently hired ex-Marine Josh Rushing (above, he of The Control Room), who seems to have a bit more faith in the ultimate value of the message of democracy than some of his wingnuttier critics: Rushing views Al-Jazeera's English-language channel as a forum for reaching millions of Muslims, many of whom may not understand the America he knows, and for reaching millions who he thinks know little about the Muslim world, including Americans. "The gravity of it sets in all the time," he says during an interview in the dining room at the private Army and Navy Club, two blocks from the White House. "It puts me where the good fight is --— at a station that's going to bridge America and the rest of the world." It's ironic that those who skulked into power in the US by taking control of the media seem to think that the only way to spread democracy in the Middle East is to bomb the fuck out of it, and that anyone who collaborates with indigenous media is a traitor: Rushing's response to such criticism: "I believe in America so dearly and the values that it stands for that I'm in no way threatened by the kind of information this station's going to put out. "Besides," he explains, "once a Marine, always a Marine." Over at the BooMan Tribune, BooMan himself has a really good article outlining the utter stupidity of bombing anything in Qatar (does Dubya even know where it is?) where the US military quite sensibly decided to relocate much of its munitions, equipment and communications gear out of Saudi Arabia following Bin Laden's fatwa of 1998: The one thing the Bush administration did to appease Bin-Laden was to move our airbase from Saudi Arabia to Qatar where, presumably, it would cause less resentment and violent resistance. Whatever the merits of that decision, they have been pretty well wiped out by the decision to invade Iraq. Nevertheless, our airbase in Qatar is absolutely critical for supporting our operations in Iraq, and is used for missions over Afghanistan as well. If we had used the airbase in Qatar to bomb the capital of Qatar, where al-Jazeera's headquarters are located, it stands to reason that we would no longer be welcome to use that airbase for other purposes. And since we could not simply move our airbase back to Prince Sultan or negotiate a new location and build a new airbase overnight, it would have necessitated a complete takeover of the country to keep our air force operating over Iraq. If Bush had ordered such a mission it would have been just cause for mutiny, or even a palace coup to prevent tremendous harm to our country and our military's operations. The idea that Tony Blair had to argue against this mission is truly frightening. It should have been dismissed by any number of Americans before it could be discussed with Blair. Andy Card, Condi Rice, Don Rumsfeld, Dick Cheney, Colin Powell, and several others should have pointed out the lunacy of such a plan as soon as they heard of it. It stands to reason that any attempt to blow up al-Jazeera would have to be done with plausible deniability. That means a truck bomb or something would need to be used. You can't use bomber aircraft launched from 20 miles away. Such a plan is triply insane. Meanwhile per Crooks & Liars, we learn that Al-Jazeera staffers have started a blog called Don't Bomb Us. And Mark Kleiman notes, " after his denial that Karl Rove or Scooter Libby had any role in revealing that Valerie Plame worked for the CIA, a statement by Scott McClellan that an report is "outlandish" is tantamount to a confirmation. Sometimes you have to throw up your hands and wonder if they are criminal, stupid, or criminally stupid. Party On, Pat From an AP article on Judge Reggie Walton, the judge in the Scooter Libby case: Walton's role also may be crucial regarding the use of classified information and whether documents sought by Libby's lawyers are relevant. If Walton rules against opening classified files to the defense team, Special Counsel Patrick Fitzgerald's case would move to trial. If Walton rules that Libby must be allowed to present certain evidence that is currently classified, all or portions of the case could be dismissed if the intelligence bureaucracy refuses to declassify the material for use in court. A lot of people, including yours truly, would love to see Libby facing Espionage charges, and the indictment certainly seems to indicate Fitzgerald had the goods. But in choosing to bring these particular charges rather than others at this point, he may have been anticipating this kind of an end-run. As Fitzgerald said in his press conference: [A]t the end of the day, I think I want to say one more thing, which is: When you do a criminal case, if you find a violation, it doesn't really, in the end, matter what statute you use if you vindicate the interest. And in the recent affidavit Fitzgerald filed, he states: Because the indictment in this case charges obstruction offenses rather than substantive national security crimes, it is hoped that the case can be tried with a minimum of issues concerning classified information needing to be resolved, and thus that the trial may be conducted in as public a manner as possible. Scooter's looking at 30 years. I'd say that vindicates the public interest, especially if it allows BushCo. a minimum of opportunity to screw the pooch by refusing to declassify certain material. Furthermore, as rwcole said in the comments: Sounds like Fitz decided to take Libby down using the OLD Grand Jury -- partly to insulate the evidence that would go to the NEW grand jury. This will help him to keep Libby's grubby mitts off evidence (if any) to be used in the big case - the espionage conspiracy case. Libby, of course, might get an invitation to that event as well -- but he'll have to wait with the other defendants to see what Fitz has against him. Like I needed more reason to believe Mr. Fitzgerald is smarter than me. He's putting the squeeze on Libby hard, and there is damn little wiggle room. SWEEEET. Update: Emptywheel has a great forray into Scooter and Dick's history of press overreaction and demonstrates quite ably that their Wilson smear campaign was right in character. (graphic by Monk at Inflatable Dartboard.) And Tarek Ayoub is Still Dead BradBlog has an amazing clip from Channel 4 in the UK on the memo that purportedly has Bush telling Tony Blair he wanted to bomb Al-Jazeera. They assert that the reason journalists are being threatened with the Official Secrets Act for the very first time is because the White House is putting pressure on Blair et. al. to keep the memo under wraps, and speculates that there are other things in the memo that the US doesn't want to come out. Said Sir Menzies Campbell: "Well it does seem to me a very draconian threat and it leads one to the suspicion that the anxiety here is not so much the national interest but preventing the government from embarrassment. After all the events with which we are concerned took place some considerable time ago. Why is it necessary to invoke the terms of an act designed to deal with issues which arise in a time of national emergency?" There is also an interview with Clive Stafford-Smith, who represents Al-Jazeera cameraman Sami al-Hajj, who was seized while traveling to Afghanistan on assignment and has been held in Guantanamo Bay for four years without being charged. In recently declassified documents, Stafford-Smith says that al-Hajj has been questioned 130 times, and 125 of those times they tried to get him to admit that Al-Jazeera was a terrorist front funded by Al Quaeda. So far neither Downing Street nor the State Department have responded to Al-Jazeera's inquiry about whether this is just one of Bush's "jokes" or not. Since the entire Arab world we are so anxious to "democratize" is waiting to hear exactly what's up with all of this, it seems like something Karen Hughes might want to big foot her way over and get right on. Meanwhile, the Power Tools have put on the Schutzstaffel regalia again and are marching around in circles singing Kampflied der Nationalsozialisten as they celebrate this ugly disgrace of an article in the New York Sun cheering on the bombing of Al-Jazeera. God it must get nasty when everyone wants to be Himmler. (hat tip to "Me" in the comments) Yeehaw!!! Isn't a Foreign Policy It's the holiday recess for members of Congress, home in their districts for turkey dinner and flesh pressing, senior citizen center lovefests. Except this year, the love isn't always there. (Here's a fun idea: show up at local elected official chat-fests and talk with them publicly about accountability and making better decisions. It's more fun than crowded shopping malls! And then report back here how things went.) Things are getting ugly -- and not just for the most junior member of the House of Representatives whose own hometown conservative newspaper, the Cincinnati Enquirer, is questioning her motives in an editorial today. Boy, Jack Murtha sure stirred up a hornets nest by...*gasp*...demanding accountability and actual planning for the betterment of the country, and because we owe it to the folks sacrificng for this nation in uniform. And he is not alone among Dem hawks who are disgusted with the way this war is being conducted. (Via Huffington Post.) And Bushie? Well, the Preznit and his cronies are digging in, refusing to look at reality and public mood as reflective of their failed policies. Instead, its a game of blame the messenger (we saw quite a bit of that from Mean Jean and her pals last week). But will this work? Can the public be coaxed back into the kool-aid after they've had a clear-eyed view of things? Kull said the best the administration may be able to hope for is a draw in the battle for public opinion. If positive changes occur, from a reduction in violence to a stable government to more international involvement, "then he may come out with a possible modest success out of it," he said. "But it's important to remember there are a lot of forces out there that are very determined to make sure this doesn't look like a success. . . . So it's unlikely it will look like a clear success." Probably not. The Economist has an article up today that is worth a read on this subject. Mike Kinsley has a must read editorial in the WaPo today. And Robin Wright has an eye-opener -- so much for the "things are fine and dandy" crowd. The time has come for grown-ups to take over our long term strategic planning. We face threats -- both external and potentially internal -- that are far too serious to be half-assed or glossed over. We must start learning from our mistakes (admitting them would be a good first step), and we have to start thinking in the long term. Here's my advice to Bushie: Yeehaw!!! is not a foreign policy. (Kudos to reader Zennurse for the title. That bumper sticker made me giggle half the afternoon. Mwahahahaha.) UPDATE: Ann Coulter is a moron. Think Progress has more. Now This Is How It's Done According to the AP, Michael Scanlon has agreed to testify against Jack Abramoff in his SunCruz indictment case, in exchange for Scanlon receiving consideration for a reduced sentence depending on his level of cooperation. This is a very common practice with prosecutors: flipping a subordinate to rat out the next level up. In Scanlon's case, the next level up is a Pandora's box of Republican KStreet power brokers and elected officials. In his plea agreement, Mr. Scanlon admitted helping Mr. Abramoff and Mr. Kidan buy SunCruz by persuading Representative Bob Ney, Republican of Ohio, to insert comments into the Congressional Record that were "calculated to pressure the then-owner to sell on terms favorable" to the two men. It is clear to me that prosecutors are now pressuring Ney to cough up another Congressional hairball -- perhaps Delay, given how entwined his PAC arrangements and Abramoff's skybox bonanza had gotten. The NYTimes has an additional article regarding the prosecution of these types of cases. (Covered earlier on FDL here.) Here's my question for the NYTimes: Are Joe DeGenova and Victoria Toensing -- the Boris and Natasha of the GOP attorney shill machine -- the only two attorneys that you have on speed dial? For pete's sake, can you at least publicly identify them as the GOP activists that they are, instead of giving them some sort of neutral quoting cover? It is wrong to pretend that these two are anything but partisans, and you do your readership an enormous disservice by pretending otherwise. (Josh Marshall has much more.) Whatever the shill position might be, the fact is that the DoJ's public corruption unit has at least one insider in their pocket on this, with more negotiating as I type. In this sort of case, flipping an insider is how it's done. Looks like Boris and Natasha have lost again. An Official Job Offer for Judith Miller at Firedoglake Dear Judy, I know it has been a very difficult year for you, and since your departure from the New York Times the offers haven't exactly been rolling in. Oh sure you've been spotted brunching with the New York Post, but what would the Hamptons set say? You have the Pulitzers to think of after all, and sharing bylines with the likes of Deborah Orrin would be positively unseemly. You might as well go to work for the Enquirer chanelling the ghost of Jeanne Dixon. And Regnery isn't really a press, it's more like wingnut welfare. Recently you've been traveling around and speaking in favor of a national shield law for journalists, and when Jay Rosen confronted you with the fact that such a law would not have covered your particular case, you affirmed that it was a good law anyway, and since you really cared about the rights of journalists it was worthy of support. I think what you really need right now is an opportunity to prove that this isn't just an attempt to wrap yourself in First Amendment finery, that you really do care about protecting journalists and are willing to take action to support them. So I want to offer you a gig. No, I'm serious. We can't pay you, but if the reports of your seven figure parachute are true that shouldn't be a problem for you right now. I think your skills are desperately needed at the moment in order to help the cause of journalists worldwide. Apparently there is a memo floating around in London that says George Bush wanted to bomb Al-Jazeera. Due to their Official Secrets Act, none of the British papers can legally publish it. I think we can make beautiful music together here, Judy. I think you have amazing connections that can allow you to put your hands on that document. And we will happily publish it. Of course, there's no risk for us because it costs us nothing and we don't have any Official Secrets Act over here, but think of the opportunity to rise above partisan politics to truly stand in defense of the principles you purport to hold so dear. Al-Jazeera has, in fact, been bombed by US forces on two occasions, both in Baghdad and Kabul. If there is evidence that these were deliberate attacks, you owe it to your fellow journalists to ferret out the truth. Because they didn't just throw journalists in jail, Judy, a journalist died. Maybe you even knew him, his name was Tarek Ayoub. He was a father, a husband, and a Palestinian. Do his rights as a journalist count too? I know when you think about it you will see that this is a really great idea. Reddhedd and I eagerly await your response. In the spirit of rapprochement, wishing you and yours a happy Thanksgiving. From the Department of It's About Damn Time It seems that the so-called "purple Democrats" have decided to start voting with the party a whole lot more these days. It's about damn time. On three big votes recently - the energy bill, the FY 2006 Labor, Health and Human Services (HHS) and Education spending bill, and a budget reconciliation bill aimed at $49.5 billion in spending cuts - not a single Democrat voted with Republicans. Democrats have little power in a Congress where they are in the minority, unless they stick together and force the Republicans to eek out whatever victories they can, or fail altogether. This forces Republicans to rely on votes from the moderates in their own party -- and forces them to move bills to less extreme positions in order to secure their votes (at least to some extent, anyway, I'm not ready to dance in the streets or anything...). With the 2006 election looking shaky for Republicans at the moment, what with Preznit Crapola Poll Ratings and his merry band of corrupt cronies and all, Democrats from conservative districts are trying to distance themselves from Bushie at the speed of light. While it is true that some have had a "see the light" conversion after the Katrina debacle in their districts (see Rep. Gene Taylor pictured above as a possible example), for some it's just political expediency. While I'd rather see a more progressive motivation for the change of heart, at the moment I'll content myself with the votes and the Democratic unity. (Photo courtesy of The Hill.) posted by ReddHedd @ 11/24/2005 04:11:00 PM You've got to hand it to Michael Isikoff and his tireless efforts to try and make the most unsympathetic man in the world -- Karl Rove -- look almost human. Even on Thanksgiving, he picks up pen in weary hand to let us know that Karl, poor Karl, had to take out a $100,000 line of credit to pay his mounting legal bills. According to the Center for Public Integrity, the average net worth of the individual members of the Bush cabinet, including the President and Vice President, was between $9.3 and $27.3 million in 2002. That's nearly ten times that of their counterparts in the Clinton administration. On the richter scale of genuine newsworthiness, I'd say Rove's loan rates just below Nick and Jessica's split. I am certain that there will no doubt be a fat, healthy Rover defense fund fueled with endless supplies of blood money should the need arise. Of far more worth is the fact that there is no free speech in America, at least not in that little patch of land known as Crawford, Texas, where Daniel Ellsberg and Cindy Sheehan's sister Dede Miller were arrested yesterday in defiance of a new ban that makes it illegal to camp or park within 7 miles of His Imperial Highness. Today I'm thankful that there are still people in this country ready to put themselves on the line to challenge Karl Rove and everyone else who thumbed through a copy of Orwell's "1984" and said "hey cool, you know I bet that would work." (thanks always to Monk at Inflatable Dartboard, whose superlative graphics are always a source of inspiration) posted by Jane Hamsher @ 11/24/2005 11:40:00 AM It's dreary here, cold, with a wind that makes our dachshund whine when she goes out. I'm sitting in my kitchen, watching Elmo with my toddler between bouts of cooking, and being thankful for so many things. Thought I would share a few with you this morning, before I get back to turkey, stuffing and praline sweet potatoes. - I'm very thankful for FDL, and to Jane for giving me a chance to blog with her a little over two months ago. It's been a whirlwind, and a blessing to channel my anger at this Administration into a more pro-active, informative debate. Our commenters are amazing. Jane is absolutely right -- I get some of my best ideas from the comments, and I could not do this without everyone who participates here. - I am grateful for my husband, who is very supportive of my blog obsession, and a wonderful father to our toddler. Speaking of the toddler, I am so grateful to have a child -- now I have an excuse to watch Elmo and The Wiggles whenever I want. Some days are chaos here trying to get an article up in the midst of a tantrum or fixing dinner, but I wouldn't change a thing. - The greater blogosphere is amazing as well. Jane and I met at dKos, not surprisingly via Traitorgate postings. Having a place to discuss the pent-up frustration with non-wingnuts has been a sanity saver over the last few years. But finding such a wealth of intellectual debate on so many daily blog reads has been a true blessing. - I am truly grateful to all of the public servants out there who do their jobs the way they are meant to be done -- with compassion, with dedication, and without political nastiness. Patrick Fitzgerald is a great example, but there are so many other unsung Fitz's out there: in uniform, undercover, carrying a briefcase or not. And I wanted to take a moment to say thank you for making all of our lives a little better. And also, a thank you to all those reporters who do their jobs well -- kudos. (And a Thanksgiving gift of an article in the WaPo to all of you.) - Finally, thankfully the curtain is pulling back and the rest of the American public seems to realize that the wizard is really just a fraudulant little snake oil salesman. Here's to more competence, and more accountability. Oh, and more indictments. Can't forget those. Wishing everyone in the FDL community a very thankful day. And for the Americans among us, Happy Thanksgiving. Now back to the cooking... (Painting entitled "Mayflower in Rip Tide," artist unknown.) Big Time's Big Adventure Writing in Salon, Sidney Blumenthal has a quick, brutal sketch of Richard B. Cheney: Cheney is a master bureaucrat, proficient in the White House, the agencies and departments, and Congress. The many offices Cheney has held add up to an extraordinary resume. His competence and measured manner are often mistaken for moderation. Among those who have misjudged Cheney are military men -- Colin Powell, Brent Scowcroft and Wilkerson, who lacked a sense of him as a political man in full. As a result, they expressed surprise at their discovery of the ideological hard man. Scowcroft told the New Yorker recently that Cheney was not the Cheney he once knew. But Scowcroft and the other military men rose by working through regular channels; they were trained to respect established authority. They are at a disadvantage in internal political battles with those operating by different rules of warfare. Their realism does not account for radicalism within the U.S. government. I've been reading Richard Clarke's book, and he notes that Cheney "had been one of the five most radical conservatives in Congress. The quiet often hid views that would seem out of place if aired more broadly." Read: Get him talking and Cheney's a fucking nutbag, everyone around him knows it, has known for 30 years, and nobody speaks up about it 'cos they're all terrified of being the blood sacrifice at some Cheney/Novak family picnic. A grateful nation should kick in and get him a CheneyMaster 3000 ™ Defibrillator and Bacon Frier on his way out the door so he can spend his retirement years fearlessly eating salt-cured fatback and we won't be worried that he'll step back in and nuke Beijing or something just because he's feeling a bit peckish. First We'll Bomb the Journalists Someone should tell Preznit Cave Smoker that he hasn't got a whole lot of homies in the Middle East, but Qatar (where Al-Jazeera is headquartered) is definitely one of them. And since Tony Blair has now taken the trouble to threaten the Daily Mirror about publishing a British government memo saying PM Poodle talked Dubya out of bombing Al-Jazeera in April of last year, one might reasonably assume such a document exists. Having "accidentally" bombed Al-Jazeera's Kabul office in 2001 and their Bagdhad offices in 2003, I guess Dubya was going for the hat trick. Reporters Without Borders said: "We find it hard to believe that President Bush really discussed this possibility. Have you been under a rock for the past five years? White House spokesman Scott McClellan said: "We are not interested in dignifying something so outlandish and inconceivable with a response." You don't have to launch space shuttles in your spare time to connect the dots on this one. I think it's high time for another gaggle. I'll take Terry Moran in the White House briefing room with a cattle prod. BooMan Tribune has more. (hat tip to AmericaBlog) Ney Not Having Fun Day(s) I swear that I'm not picking on Ohio today. But Bob Ney is in the news again -- this time via the DCCC. Identified in new court documents as "Representative No. 1," Republican Rep. Bob Ney of Ohio has become the poster boy in the Jack Abramoff bribery probe, a beneficiary of trips, tickets and campaign donations, allegedly in exchange for official acts.... Three full pages in the court papers in Scanlon's guilty plea Monday itemize things of value to Ney or his staff and official acts allegedly performed in return. According to the DCCC, a strong Democratic opponent is expected for the 2006 elections (although no one officially selected as yet). Here's hoping. By the way, if Ney is reading here, my suggestion is: cooperate. What else have you got to lose? Ahem. Erm...Write Your Own Caption Some days, the stories just seem to fall in your...um...lap. And some days, it's a turkey. Ahem. No turkeys for Froomkin today -- the man is on fire. Bush's poll numbers are down, the public doesn't trust him, and Froomkin gives Waas' latest article some good coverage. Plus, Wolf Blitzer can be hilarious. What's not to love? Congressman Murtha is blogging on HuffPo today. Looks like his positive response is outstripping the negative by a wide margin. Oh, and the American Prospect says that the Bush Administration really doesn't want peace in Iraq. Today Andrea Mitchell reported on MSNBC that the Administration had a lot more "nuance" in the PDB than they told the public about in the run-up to the war. Why does Andrea Mitchell call our Preznit and Darth Cheney liars dissemblers? (Because the evidence is forcing her to do so? Ahem.) It's snowing here, and I'm baking pumpkin pies for tomorrow. Music of choice: The Carols of Christmas. For all the FDL readers who will be out of town for the festive feasting, have a wonderful Thanksgiving. For all of the rest of you, hang on -- it's a news-filled festival today. More to come... (Picture courtesy of Dependable Renegade. Thanks to reader dubhaltach for the link.) UPDATE: Oh...*snerk*...hahahahahahaha. Just read the first part of this. Padilla Charged: Cert. or Moot? I previously detailed the importance of habeas review during the Lindsey Graham legislation abomination. The recent charging of Jose Padilla brings the issue back to the forefront, along with a whole host of other constitutional questions: limitations of executive power, separation of powers, civil rights, and many more. For those not familiar with the Padilla case, he is the American citizen who was picked up on "dirty bomb" charges, and who has been held in a military prison solely by order of the President for more than three years, due to his designation as an "enemy combatant," despite being a US citizen. Both the WaPo and the NYTimes are reporting there is no mention of the "dirty bomb" plot for which he was first arrested in his indictment. (ABC's World Today has more from Padilla's attorney as well.) Glenn Greenwald has an exceptional post up about this case, and it ought to be required reading for everyone. (Hat tip to reader "A" for bringing this to my attention. Many thanks!) The post is thought provoking, and well worth a full read -- especially given the magnitude of the issues. The NYTimes further explores the Administration's failure to come up with a coherent, consistent policy for detainees, enemy combatants and pretty much everything else dealing with the clash of civil rights and detention issues after 9/11. These are very important questions that go to the heart of what sort of country we are -- and what sort of country we want to be. And we all ought to be asking them of ourselves and our elected officials. We've been talking about these issues around the blogosphere for quite a while, certainly here at FDL. It's a good sign that this has hit the MSM in spades over the last few weeks (although perhaps more emblematic of the low Administration poll numbers and fretting among GOP members of Congress). Here's hoping this gets a lot more attention and serious thought in the weeks to come -- and some action that speaks a whole lot louder about our commitment to the human rights of everyone, not just people the GOP finds valuable in the moment. But the big question in my mind at this point is whether or not the Supreme Court will dismiss cert. in the Padilla case, on the grounds that it is now moot. Will Chief Justice John Roberts and his fellow justices stand up for human rights -- and rule on the issue of a citizen as "enemy combatant" -- or the right of the Administration to continue to stamp on our long-held principles of civil rights? Stay tuned. (Jeralyn has more at TalkLeft.) (Painting by Johannes Vermeer, entitled Woman Holding a Balance, c. 1664.) So She'd Still Call Him a Coward... Jean Schmidt, freshman Republican Congresswoman from the OH-2, doesn't understand why people are angry with her. She just doesn't get what all the fuss is about. And people have been mean to her, including: Saturday Night Live, liberals, her hometown newspaper the Cincinnati Enquirer (oops-- they are conservative, that's not good), her friend Danny Bubp (oops again -- didn't she use his fake words to malign Jack Murtha? That's not good...), and lots of folks in her district who think she went way too far in calling a decorated war veteran, who has served this nation in uniform and in Congress with honor, a "coward." What does Jean have to say for herself? Yesterday, Schmidt said she hoped the hubbub will have faded by the time Congress reconvenes next month. Asked if she would change anything if she could do it over again, she replied: "I wouldn't have used Congressman Murtha's name." Ahhh...but you still would have called him a coward? (Major graphics love to Mike Tidmas via Pandagon. Huge thanks to reader bkny for tracking down this pix for me.) FEMA Reconsiders FEMA has decided not to kick displaced Katrina victims out on the streets during the holiday season, thanks to an outpouring of disgust from...well...everyone. "We are not kicking people out into the streets," R. David Paulison, acting director of FEMA, said in announcing the revised deadlines at a news conference here. "We want families in decent housing." You know your proposed policy isn't going to work when protesting members of Congress seem like they have more of a heart than you do. Ahem. With thousands of people still missing in the devastation of Hurricane Katrina, more than 400 bodies stacked in morgues that have yet to be indentified, and homes that are filled with debris, mold, and heaven only knows what else, we can all do better in helping the least of those among us. Time magazine had a heart-rending article about reconstruction efforts that is worth a read. No matter how much I and my family have tried to do to help, it just doesn't seem enough in the face of so much devastation and sorrow. All my best to those who are trying to find their feet again. And I'm very thankful that FEMA seems to have found its heart...at least for now. UPDATE: Reader Marysz also points to this information regarding voter disenfrinchisement issues in NOLA because FEMA is refusing to cooperate with state government officials trying to contact voters prior to the February election. Lovely. I'll Take Dick Cheney in the Bathroom With the Defibrillator Bob Woodward's "Mr. X" may or may not be the first administration official who leaked Valerie Plame's identity to a member of the press -- it is still not known who the journalist was who was contemplating an article on Joe Wilson that made him decide to go public with his own story first. But by most accounts these four candidates are the front runners as Woody's leaker, so place your bets now, 'cos the lid could blow off this pig any day: George W. Bush: Argument For: Woodward's source was someone he interviewed at length for his book, Plan of Attack. He definitely interviewed Gee Dubya, though the book says his interview took place in December 2003. The possibility that he was doing "background" interviews earlier on, however, still exists. The WaPo claims that "Mr. X" has testified in the Plame matter but Reuters says he has not appeared before the grand jury -- Bush fits the bill on both counts. Undersecretary of State Marc Grossman gave a briefing on Wilson at the White House on June 11 or 12, Walter Pincus's article in the WaPo appeared on the 12th, and Woodward said he spoke to his source a few days after the Pincus article. So Bush definitely had access to the information by that time. It could also explain Patrick Fitzgerald's odd appearance at Bush's lawyer's office. Argument Against: As Atrios would say, God does not like me that much. Dick Cheney: Argument For: Knew by the time he told Scooter Libby on the 12th that Wilson's wife worked in counterproliferation. Spoke to Woodward for Plan of Attack, though he did it on background, so a confidentiality agreement would cover him. Definitely had both the motive and the malice. Has recently dispatched anonymous gremlins to issue peculiar denials in the press. Also testified but not before the grand jury. CIA hates him just enough to out Woodward prior to Libby's indictment and fuck him up. Argument Against: On Larry King, Woodward all but cleared him, saying he did not talk to meet with him during the time in question. Hard to imagine the word "casual" applied to Dick Cheney. Richard Armitage: Argument For: Possibly had access to INR memo during the time in question. Known to have been one of Woodward's sources for Plan of Attack. No record of having appeared before the grand jury. Hasn't issued a denial. Argument Against: Didn't have any axe to grind against Wilson, wouldn't have been party to any OVP/Rove conspiracy. According to this article, didn't have the INR memo until after Wilson's op-ed piece. If Michael Isikoff is right and Woodward's source is Novak's source it ain't Armitage, 'cos the idea of him shooting the shit with the Prince of Darkness just stretches all credibility. And God, at the moment, does not appear to like the GOP quite that much. Stephen Hadley Argument for: Coordinated disinformation related to Niger uranium for the White House, including Tenet's self-flagellation. Putative point man for the smear campaign, received Rove's email about Cooper conversation. No record of testifying before the grand jury. Told friends he thought he was going to be indicted. Weird non-denial in Korea. Recently promoted, a sure sign of guilt within the Bush administration. Argument against: Given how eagerly Bush distanced himself from Rover when it looked like he would be indicted, hard to imagine him embracing Hadley as furiously as he is at the moment if he thought he was guilty. And whatever involvement Hadley had doesn't seem to have been of the "lone wolf" variety. (special thanks to Valley Girl for the Clue graphic) What Did He Know and When Will He Know It? According to a new article by Murray Waas, both Preznit Gung Ho and Big Time received information in a classified Presidential Daily Brief (PDB) 10 days after the 9/11 attack on the World Trade Center that there were no ties between Saddam Hussein and Al Quaeda. Its existence was not disclosed to the Senate Intelligence Committee until the summer of 2004, and the administration has steadfastly refused to turn it over since then despite demands from both sides of the aisle. So why was any Bushbot with a pulse screaming about Al Quaeda's ties to Saddaam during the ramp up to war? One reason that Bush, Cheney, and Rumsfeld made statements that contradicted what they were told in CIA briefings might have been that they were receiving information from another source that purported to have evidence of Al Qaeda-Iraq ties. The information came from a covert intelligence unit set up shortly after the September 11 attacks by then-Undersecretary of Defense for Policy Douglas J. Feith. That's just ducky. One of the only bright moment in Bob Woodward's otherwise slavish Plan of Attack comes when Gen. Tommy Franks calls Feith "the fucking stupidest guy on the face of the earth." This would of course be the same Doug Feith whose prewar intelligence activities the Pentagon's inspector general is now investigating. I'm gonna go out on a limb here and say this might qualify as intelligence the President had that the Democrats didn't have when they voted to authorize the war. They're still a bunch of sheep in my book, but it doesn't make BushCo.'s attempts to re-write history (that's right, suck it up, Dick) any less egregious. How the Middle East Was Won According to London's Daily Mirror, Billy Sol Huroc George Bush once discussed blowing up the headquarters of Arabic-language TV al-Jazeera real good with Tony Blair. The White House calls the charge "outlandish," despite the fact that the US actually did bomb Al-Jazeera's Baghdad office in April 2003, killing journalist Tareq Ayyoub. At the time, State Department spokesmen said it was a "mistake," and called upon al-Jazeera "not to jump to conclusions." We know it pales next to big news like Oprah ending her 14 year feud with Letterman or Bob Novak trying to rewrite the Battle of Carville by punching a fellow passenger on a flight to Chicago. But all we want to know is -- does this mean Eason Jordan gets his job back? (hat tip to John Amato for the Eason Jordan reminder) Pressure Tactic? But Who Is Being Pressured? Scooter Libby's defense team has hired John Cline, an attorney at Jones Day and an expert in classified information cases, as an additional legal representative. The SF Chronicle has a short write-up on this today, and says: "This is about as subtle as a sledgehammer to the government," said Robert Weisberg, a criminal law expert at the Stanford Law School. "This suggests they are going to use a very concerted and aggressive strategy." Although Libby was investigated on suspicions that he or others in the Bush administration might have illegally leaked to journalists the identity of a covert CIA operative, Valerie Wilson, the actual crimes Libby was charged with have nothing to do with the misuse of government secrets. But Cline's involvement suggests that a defense strategy may be to try to bring large volumes of classified information into the trial to demonstrate the many things Libby was dealing with as a senior national security adviser when he spoke with journalists and later testified to the grand jury. If Libby's defense can show his thinking might have been obscured by the many sensitive issues he was dealing with, it could potentially weaken the case against him. I can buy that, to some extent. In a high profile, high stakes case like this, you want to signal to the US Attorney that you are willing to go to the mat, if necessary at the front end. Puts you in a better negotiating position -- you never want to begin negotiations from a point of weakness, especially with an indictment that is as specific as Libby's is. But here's my question: does a signal that Libby is going to aggressively pursue his case at trial put pressure on Fitz? Or is this a signal designed to perk up the ears of the folks at the White House? I think you can make a strong argument for the latter, given how badly the politicos allied with the Administration want the whole stench of this to go away as soon as possible, given the already bad poll numbers and horrible implications that a full-blown trial could have for the 2006 elections. Who is Libby really trying to pressure at this point? The law governing the use of the classified data is called the Classified Information Procedures Act, or CIPA. It sets up strict procedures for attorneys to review classified data with their clients in special rooms and for how the defense can request the disclosure of such secrets during a trial. The arguments can be critical. If a judge agrees to permit the use of the information in open court, then the prosecutors are faced with having either to allow the disclosure of sensitive government information or to consider dismissing the charges. "In this case the defense doesn't have to win on every element of their claims, they just may want to scare the daylights out of the government at this stage," said Weisberg. Yes, but which part of the government? My guess is that Fitzy doesn't scare easily. In fact, given some of the cases he has prosecuted -- Sheik Omar, Osama bin Laden, the Gambinos -- I'd say it is likely pretty tough to scare him at all at this point. Frankly, he's the premiere terrorism prosecutor in the country, given the magnitude of the cases he's tried. At this point, I doubt threatening him with a flurry of motions hearings on classified information means much of anything, other than another day and another late night at the office. He's clearly already got a pizza delivery service on speed dial, and some spare socks at the office, so what's the big deal for Fitz? But for Bushie and his cronies? Life is not looking so sweet these days. Their poll numbers are in the toilet and sinking. Republicans who aren't under investigation are outnumbered by those who are these days (or, at least, it sure as hell seems that way, doesn't it -- between Fitz and the Abramoff mess and Delay's indictment?). Oh, and the strategy to talk about all the classified information on Libby's plate as a means to show how important, distracted and busy he was? Well, that could backfire as a legal strategy in a big way if they open the door too wide for Fitz on cross-examination. Does Libby and his legal team really want to open the door to Libby's veracity -- and his bosses (both the VP and the P)-- on national security matters? Is that truly the way you'd want to go were Libby your client? Especially given the fact that close to 60% of the American public now thinks the Administration lied to them about...well, just about everything. By threatening to go to trial, what is Libby really angling for -- and from whom? Dunno, but let me join the "no pardon, no way" chorus. As if it wasn't clear all along that I felt that way, anyway, I just wanted to be on the record. UPDATE: Reader Mamayaga makes an excellent Occam's Razor point in the comments: "Actually, I'm thinking this might not be an aggressive signal at all, but rather just shoring up the defense for expected new charges of disclosing classified information." True -- why not grab the good attorneys, so Karl doesn't get them? Toto's Nemesis Bending Over Backward to Save Own Political Ass Oops. When you attribute a quote on the floor of the House to a fellow Republican, apparently you'd better get it in writing first. Danny Bubp, a freshman state representative who is a colonel in the Marine Corps Reserve, told The Enquirer that he never mentioned Rep. John Murtha, D-Pa., by name when talking with Schmidt, and he would never call a fellow Marine a coward.... "There was no discussion of him personally being a coward or about any person being a coward," Bubp said.... Hmmm, maybe Mean Jean should choose her political fall guys a little more carefully in the future. And maybe she should...um...be a little less deliberately...erm...obtuse. (Or stop lying. You pick.) It's unclear whether Schmidt, who will start her 79th day in the House today, knew at the time of her remarks that Murtha had served 37 years in the Marine Corps and Marine Corps Reserve. She immediately took back her remarks. It's against House rules to refer to a fellow lawmaker by name or to criticize them. Schmidt, a Republican from Clermont County's Miami Township, then wrote to Murtha to explain that she has a lot to learn and did not mean to disparage his service. Yes, I can certainly see how calling someone a coward in the well of the House of Representatives on national television isn't meant to disparage their character or that of their political party. Ahem. Murtha is taking the high road. Murtha, a lawmaker since 1974 and a Vietnam veteran who received a Bronze Star and two Purple Hearts, said Sunday on NBC's "Meet the Press" that he doesn't hold Schmidt responsible. "This is a new member, and sometimes they give her something to say that ... they get out of hand. I try not to take this stuff personal," he said. Hmmm...smarmy political tactics or decent guy trying to do what he feels is right for his country and the soldiers who serve it? Well, not really a choice for me. Let's hope the rest of America is waking the hell up. For more on Murtha's statements and the aftermath, the AP has a quick follow-up piece. And the NYTimes has even more. Howie Kurtz has more on the smarmy politician tactic of smearing via a straw man in today's Media Notes Extra. Josh Marshall cracks me up. As does Roger Ailes (the good one), who makes a very good point -- Toto's nemesis participated in an anti-Murtha event the night before and, unless she had a frontal lobotomy in between, had to have known that Murtha was a former Marine. Poor Mean Jean. All that contortion for nothing. (Graphics love to Artis Conception. Mwahahaha. Disturbing, yet hilarious.) UPDATE: Crooks and Liars has some hilarious video up of Mean Jean when she was running against Paul Hackett. Definitely worth a watch. UPDATE #2: Ooops -- as reader Steady Eddie points out, it was Josh who made the prior night anti-Murtha event point, not Roger. My bad. Never copyedit while watching an Elmo video with your toddler. The Scanlon Plea Hat Trick I spent a few hours last night researching the "speech and debate clause" to explain how significant Scanlon's plea is to the prosecution of members of Congress involved in the Abramoff mess. And I woke up to find that Bloomberg has beaten me to the punch. (Darn! Hate it when that happens.) But I'm going to outline the issue anyway, because it is worth the detail to understand how significant this plea deal is going to be for the DoJ in going after the whole of the conspiracy -- wherever it reaches into the halls of Congress. From Bloomberg's article: Scanlon's testimony may allow the government to overcome a defense based on the "speech and debate'' clause of the Constitution, which protects lawmakers from being prosecuted for legislation they introduce or speeches they make in Congress, Cole and other experts said. Scanlon may be able to testify about deals between lawmakers and lobbyists; such quid pro quos wouldn't be protected by the Constitution. "The speech and debate clause only prevents you from using a legislative act'' as evidence, Cole said. "The agreement is the crime.'' This is exactly right. The speech and debate clause is something that most folks never give a second thought, but it is a substantial protection for legislators -- and one that was a substantial block to the DoJ Public Integrity Unit until they got Scanlon (and perhaps Ney) to flip. Having direct testimony of a potential quid pro quo arrangement between monies paid and legislation being proposed is an essential step, and one that likely has a large number of elected officials dealing with acid reflux and then some today. "It is very tricky to prosecute a congressman,'' said Cole, now a Washington-based attorney for the law firm Bryan Cave LLP. "If a congressman gets on the floor of the Congress and says, "I'm introducing this bill, I think it stinks, but I'm getting paid $100,000 to do it,' that statement can't be used, even though it is in the Congressional Record.'' Scanlon's cooperation may signal that prosecutors have testimony that can overcome this obstacle. "If there is that explicit quid pro quo, that can be bribery,'' lawyer Reid Weingarten said last month. Weingarten, like Cole, served in the Justice Department's Public Integrity Section; he now is an attorney for the Washington-based firm Steptoe & Johnson LLP. U.S. District Judge Ellen Segal Huvelle accepted the plea agreement yesterday that calls for Scanlon to help prosecutors make their case against Abramoff and investigate his contacts in and around Capitol Hill. "They're using Scanlon to get everybody,'' said Melanie Sloan, a former federal prosecutor who now heads Citizens for Responsibility and Ethics in Washington, an advocacy group. "That's how it works. You keep rolling people.'' I like the sound of that. And that's how it works in most extensive conpiracy cases -- you roll one, and use that witness to roll the next guy...and on up the chain. The pressure mounts on everyone else involved to flip while there are still deals left to grab. The more people involved who cooperate, the bigger the incentive to cooperate for those remaining. But what exactly is the "speech and debate clause" anyway? The speech and debate clause is contained in the US Constitution, Article I, Section 6, Clause 1. Section 6. The Senators and Representatives shall receive a compensation for their services, to be ascertained by law, and paid out of the treasury of the United States. They shall in all cases, except treason, felony and breach of the peace, be privileged from arrest during their attendance at the session of their respective Houses, and in going to and returning from the same; and for any speech or debate in either House, they shall not be questioned in any other place. In other words, members of Congress cannot be arrested in the regular performance of their legislative duties -- but they can be prosecuted if it can be shown that a crime was committed, such as bribery, in order to obtain legislation. It is a very narrow exception, and one that can cause an enormous amount of difficulty for prosecutors. So much so, that an entire segment of the DoJ's Criminal Resource Manual is dedicated to the legal precedents on this issue. While the Speech and Debate Clause has been expressly held not to shield Senators or Representatives against bribery charges, Johnson v. United States, 383 U.S. 169 (1964), it does impose significant limits on the type of evidence that can be used to prove such an offense. The Clause broadly protects members of Congress "against inquiry into acts that occur in the regular course of the legislative process and into the motivation for those acts," United States v. Brewster, 408 U.S. 501, 525 (1972), and "precludes any showing of how [a member of Congress], acted, voted, or decided." Id. at 527. The Supreme Court has declared that "past legislative acts of a Member cannot be admitted without undermining the values protected by the Clause," including speeches in committee as well as those on the Floor of the Chamber, the Senator or Representative's votes, and his or her explanations for them. A somewhat wider latitude has been allowed insofar as the admissibility of activities that took place occurred prior to a legislative act. United States v. Helstoski, 442 U.S. 477, 489 (1979). However, the parameters of what constitutes a "legislative act" are quite broad, and can severely impair the ability of prosecutors to prove bribery and gratuity cases where the recipient is an elected Member of the Legislative Branch. In other words, you have to be able to show that the criminal act was the catalyst for the legislation. As in, there was a quid pro quo deal in place that a member would produce some legislation in exchange for money, favors, etc. -- and thus the deal is the criminal act at issue, and not the legislation itself. You can see how that sort of hairsplitting can be very difficult to show without someone on the inside providing the substantial details. Getting Scanlon to flip provides a hat trick for the Feds: 1. Scanlon gives the DoJ a connection to the inner workings of Abramoff's operation -- giving them Abramoff, Ralph Reed, and a lot of other money players, and a big window into the whole GOP money machine and College Republican Rethug cabal. 2. Scanlon also provides a window into Delay's operation, having worked for him for several years prior to leaving for a partnership with Abramoff. This gives the DoJ a better look inside Delay's KStreet operations and scheming from someone who would have seen it from the inside. 3. And Scanlon is the breach of the "speech and debate clause" firewall -- providing evidence of deals before legislation was introduced. This is vital for the DoJ, and has clearly rattled Ney enough that he's already talking with them. With the number of members of Congress potentially involved in this mess, if one of their own has started talking, along with Scanlon, then the DoJ can expect a parade of others. One thing you can count on is a politician's desire to save his or her own hide and political power base above everything else. (At least in most cases, anyway.) Quite a hat trick, I'd say. Pass the popcorn. For more on the speech and debate clause: -- Findlaw's Annotations provide a good case law reference. -- The DoJ's Criminal Resource Manual provides some great analysis on what the DoJ has to consider. UPDATE: More on the Scanlon plea details from TalkLeft. My Sister! My Daughter! In breaking news, Howard Kurtz in the Washington Post reviews Bob Woodward of the Washington Post talking to Larry King of CNN, the network which also hosts Howard Kurtz's show Reliable Sources. In other news, the New York Times take Howard Kurtz of the Washington Post to task for interviewing his boss Len Downie of the Washington Post about Bob Woodward of the Washington Post on his CNN show Reliable Sources. Remind me again -- how did we get into this mess? Bob Woodward: Not Exactly Hero Material Bob Woodward managed to jam his giant ego into Larry King's tiny studio tonight to answer a few simple questions. To say that he did not dazzle before the cameras would be too kind. He really gave Sulzberger a run for the title of Bang Bang the Idiot Boy. One can see why he is always being called out of the White House steno pool by the BushCo. brass -- he will most assuredly take down everything faithfully, and never be quite smart enough to know what it all means. Larry actually pressed him a couple of times, and Woody floundered. Larry showed the now-famous clip of Michael Isikoff pushing him about a "bombshell" story he was sitting on the night before the Libby indictment: KING: In retrospect, Bob, could you have said on the show that night, Well, to you and your viewers, I do have some information, I'm working on it, something was said to me, but I can't reveal it. That would have covered this whole thing. WOODWARD: But that's always the case. That's always the case. And that would be -- you know, well what is it -- you would have asked me, What are you working on, is it bigger than a bread box, is it a bombshell, is it a firecracker, is it a stick of dynamite and so forth. So Bob can't tell the truth because Larry might ask him about it. Journalistic integrity. Highly overrated. WOODWARD: Yes. I think I was a little hyper and a lot of pent-up frustrations, bad night. And as you have pointed out a number of times, I tend to be very neutral, overly neutral... I think the word you're hunting for is "dumb," Bob. WOODWARD: And so there is this moment when I realized I have a piece of something. I truly don't know what it means. But then I go in a mode where -- actually some people said, you know, "Why did you do this? Why not stay out of it? Why get involved?" And all of the juices -- my wife, Elsa, told me this, that she could almost hear it, the reporting news juices running. The necessity for talking about one's running juices on national TV escapes me, but be that as it may, Big Thick Bob has convinced me he didn't know he had something until Fitzgerald's press conference. But once he realized he had the first leak and was only a few phone calls away from being back in the spotlight again, I'm sure poor Elsa suddenly was knee deep in Woody's effluvium. KING: How did it even come up? WOODWARD: Came up because I asked about Joe Wilson, because a few days before, my colleague at the "Washington Post," Walter Pincus, had a front page, saying there was an unnamed envoy -- there was no name given -- who had gone to Niger the year before to investigate for the CIA if there was some Niger-Iraq uranium deal or yellow cake deal. This is key, because it's the first indication there has been about when exactly Woodward spoke to his source. Pincus's article on Niger uranium came out on June 12, 2003 but it did not disclose Joe Wilson's identity. Libby had first learned that Joe Wilson's wife worked for the CIA in late May or early June, and prior to the June 12 article being printed Pincus had contacted Cheney's office for comment. On June 11 or 12, however, Under Secretary of State for Political Affairs Marc Grossman held an oral brief at the White House for a larger group of people. I'll let my esteemed colleague emptywheel explain the significance: Now, there are two groups of people we know were privy to information on Wilson here (there may be more). The first is a group at OVP--the people who discussed how to respond to Pincus' inquiry about Wilson's trip and those who knew enough to write "Wilson" on their CIA dossier; for them, this information is almost certainly tied to the context of a malicious Get Wilson campaign. Then, there's the group that attended the White House meeting; for these people, information on Wilson might be less malicious, tied to the larger question of the problem with the Niger claim. Both groups include Libby, but the latter group would almost certainly include Hadley, who is reported to be Woodward's source, as well as people like Rove and Condi. So the list of suspects becomes much longer now that we know the date that Woodward first talked to his source, and stretches from Cheney's office to Rove, WHIG and the rest of the Bushbots. KING: What would you have done if the source had said, "Don't tell him," and you were subpoenaed to deposition? Would you refuse? WOODWARD: That is a situation I have not had to deal with in this case. But of course, when I went into my aggressive reporting mode, I didn't know exactly what was going to happen. Now, if I hadn't done that, and the source had said, "Keep quiet; it's confidential," then the special counsel in this case, Fitzgerald, wouldn't have known, I guess, and I would have stayed out of it. Right, Bob. Scooter Libby's being charged with perjury because he said he thought he told reporters he had heard it from other reporters, even though the reporter he says he thought he heard it from (Russert) says he didn't. So Woodward's notes say that he might have asked Libby about Plame, and this on June 23, possibly before he talked to any reporter and certainly any other than Judy Miller that we know of. What, exactly, are the chances that Libby's lawyers are not gonna subpoena the only journalist who potentially told him about Plame's identity? Bobby's secret would have come out one way or the other, if Scooter is planning any sort of defense short of insanity. And I'll bet that's how Bob twisted Mr. "X's" arm, which it sounds like he most assuredly did, journalistic privilege be damned. Whoever Mr. X is, Woody wasn't gonna do any jail time for him. KING: Why didn't you tell [Len Downie]? WOODWARD: Because I was focused on getting the book done. You know, the significance of this is yet to be determined. And what's the good news in all of this is, when it all comes out -- and hopefully it will come out -- people will see how casual and off-hand this was. I made the comment yesterday that the leakers had played Bob like a two dollar banjo, but I think it must have been an incredibly frustrating process for them. Because sadly, I don't think Bob did understand the significance of what they were telling him. Matt Cooper tipped to it instantly, but whoever told Woody was just a smidge too "casual" about it and dogged, simple Bob just didn't get what was going down. Still doesn't. Can you see Cheney picking up the paper every morning, riffling through it and muttering "fucking Woodward, where is the damn thing?" The rest of the interview was just pathetic. At one point Woodward was reduced to holding up old headlines from the Washington Post to show us how valuable his reporting had been. (As Bob Adams said in the comments, "he's gonna get 'rhoids if he keeps pulling Nixon out of his ass.") You know, I truly do believe this whole uproar has shocked Woodward. He doesn't get it. He thought he was going to be the hero. Even Larry asked him if he was being "used" by the administration -- Bob just looked befuddled. He seems to believe that whatever price he paid for access to an otherwise impenetrable administration has been worth it, with no notion that he has turned into a complete tool. We can assume his access to BushCo. for the purpose of finishing his new tome will continue unabated. Crooks & Liars has extensive clips. Clarification: When I said Bob didn't "know" he had anything until Fitzgerald's press conference, I don't think he necessarily knew he was the first (potentially) to be leaked to until then. I still think that the wheels were set in motion the week prior that lead to Bob's sudden willingness to come forward. But I think it didn't occur to him until he heard Fitzgerald speak that there was a chance for him to step back onto the stage with a big, dramatic flourist that would resurrect his Watergatian glory. We presume there was nobody around him to tell him in truth he would wind up the goat. Monday Evening Round-Up Larry Wilkerson says that Dick Cheney was the catalyst for torture in the Administration. "There's no question in my mind where the philosophical guidance and the flexibility in order to do so originated -- in the vice president of the United States' office," he said. "His implementer in this case was [Defense Secretary] Donald Rumsfeld and the Defense Department." Next up, Dick Cheney will be calling Larry Wilkerson a degenerate traitor and Toto's Nemesis will call him a coward while dressed another lovely red, white and blue jumpsuit ensemble. Speaking of Dick Cheney, today he accused critics of the Administration "corrupt and shameless" revisionists. Pot, this is kettle... And on the question of trouble in paradise, Elizabeth Bumiller weighs in: In short, Mr. Bush is said to be upset with Mr. Cheney because the vice president promised a fast, rosy finish to the war in Iraq, now a two-and-a-half-year-old conflict, and because of the indictment of the vice president's top aide in a case that has focused on alleged efforts to discredit a war critic. For now, the consensus among Republicans close to the White House is that Mr. Bush may well have been angry about the actions of Mr. Cheney's office, and that he has long been aware that the vice president oversold the case on Iraq's weapons of mass destruction. I'm having a moment of zen that I don't have to spend time with either of these people, frankly. If you aren't reading this, you are missing out. I'm starting to truly love William Arkin's "Early Warning" national and homeland security blog on the WaPo. Today, he tackles the issue of white phosphorus and stupid military statements about it (and about other things), and it is excellent reading. Porter Goss says that the CIA does not torture. Note the present tense on the verb. Ahem. And in case you want something more edifying, try this Digby piece. Excellent reading (as always). And in case you were wondering, the Iraqis want us out. There's an intriguing read up on TomDispatch, tying a lot of the BushCo crap together in one big post. Good for the schadenfreude-addicted among us. And just for sheer brilliance of prose and logic, read this from Mark Kleiman. For the record, Olbermann rocks. Great show this evening. Catch it if you can. (But thus far, Woodward sucks. Blergh. Nothing can ruin an evening more than forcing yourself to watch Booby and Larry Kink at the same time.) (This is one of my favorite Monet paintings. One of these days, I'm going to find this in a good canvas reproduction, and hang it in my bedroom where I can see it first thing in the morning. But for now, just enjoy along with me. Claude Monet, "Boats Leaving the Harbor," 1865.) Scariest Words in Washington: Cooperation and Plea Michael Scanlon pleaded guilty by information today in Federal Court to a charge of conspiracy, according to the NYTimes. Mr. Scanlon agreed to pay restitution totaling more than $19 million to the tribes, The Associated Press reported from the courtroom, and could face up to five years in prison. Mr. Scanlon, 35, was accused of conspiring to defraud Indian tribes out of millions of dollars as part of a lobbying and corruption scheme that involved wining and dining of some lawmakers, treating them to lavish trips and contributing to their campaigns. Until recently, Mr. Scanlon occupied a powerful - and lucrative - position at the intersection of political power and lobbying influence. For several years, he worked as a top aide to Representative Tom DeLay of Texas, the Republican majority leader. He left Mr. DeLay's office in 2000 to become an associate of Jack Abramoff, a Republican lobbyist. In that position "at the intersection of political power and lobbying influence," Mr. Scanlon had contact with a lot of highly placed politicos. Most of them Republicans feeding at the trough. And you can be sure that he knows where a few skeletens are buried -- and is willing to talk about them -- or he wouldn't be getting a deal. Scanlon faces 5 years in federal prison, and has agreed to repay $19 million in restitution out of his own pocket. That's a pretty stiff restitution amount -- although probably no more than a drop in the bucket compared to how much he and Abramoff and their political cronies are rumored to have scammed from their clients (more than $80 million, give or take). But still, definitely not a sweet deal -- it's hard time and serious restitution. The plea was negotiated by Scanlon's attorney, Plato Cacheris, who is an exceptional trial lawyer for white collar crime cases. One of the best practicing these days, frankly. That Scanlon is still facing jail time and a stiff level of restitution says how much the DoJ had on him -- and how much he is having to give them -- to even get the deal he has. In my mind, this is an enormous victory for the DoJ Public Integrity unit. We'll see as time goes on with this matter how much testimony and cooperation Scanlon will be giving, but I would say that it is broad and that it goes high up the chain, just based on his level of involvement and length of contact in all of this. And it looks like Scanlon isn't the only one in the mood to cut a deal. (Which is likely one of the big reasons that he is doing so, btw -- as I've said before, the first to deal gets the best offer. The last man standing in a conspiracy gets hammered.) Representative Bob Ney, an Ohio Republican, who heads the House Appropriations Committee, was alluded to in the indictment (although not by name) as a main beneficiary of largess, in return for helping Mr. Abramoff and Mr. Scanlon with their clients. Lawyers involved in the case have confirmed that Mr. Ney is the "Representative #1" cited in the indictment. The congressman - who has not been charged - has asserted that he was duped by the two and is cooperating with prosecutors, a spokesman for Mr. Ney says. Well, would you look at that? Rep. Ney is putting his self-interest ahead of that of Abramoff and Delay. Hmmm...what could that mean? So sad that the head of the Appropriations Committee in the House, a long time Congressman like Ney, could be duped so handily. *cough* Ahem. I say, it means that Delay and Abramoff had better lay in a good supply of antacids. You recall Mr. Delay and Mr. Abramoff, don't you? Mr. DeLay has been indicted in Texas on charges involving political fund-raising that are not related to the inquiry in which Mr. Scanlon pleaded guilty today. And Mr. Abramoff has been indicted in Florida on unrelated fraud-and-conspiracy charges involving an attempt to buy a fleet of casino boats. Power brokers in the Republican party all. What an ethics-filled group they've turned out to be, too. For shame just seems too subtle...think I'll just buy some more popcorn instead and sit back and enjoy the show. Oh, and Fitz? Still hard at work. UPDATE: The WaPo has much, much more, including this: The charge was in a criminal information filed Friday accusing Scanlon of conspiring with Abramoff to defraud Indian tribes and engage in a corrupt scheme that lavished trips, sports tickets and campaign donations on a member of Congress, Rep. Bob Ney, R-Ohio. DeLay is among those facing scrutiny for his associations with Abramoff, including a trip to Scotland and use of Abramoff's skybox at a Washington sports arena. Abramoff's lobbying network stretched far into the halls of Congress. Documents obtained by The Associated Press show nearly three dozen lawmakers helping to block an American Indian casino in Louisiana while collecting large donations from the lobbyist and his tribal clients. And the WaPo says Scanlon's restitution will be in excess of $19 million, along with cooperation with the ongoing investigation. Fasten your seatbelts, boys and girls, looks like some folks in D.C. may be in for a very bumpy ride. And did I mention that Fitz is still hard at work? Yeah, I thought I did. Having trouble accessing the new site? New Digs Moussaoui Case Stunner FDL Late Nite: Feingold Plays Rochambeau With Linc... Thanks For Being Patient Late Nite FDL: Incomprehensible Demoralization 3 Years of War Redd on CSPAN: No Prisoners Nancy Pelosi Should Act First, Criticize Later Let's Play Strategery
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
5,856
Le barrage de Teles Pires est un barrage dans le Mato Grosso en Brésil sur le Rio São Manuel. Il est associé à une centrale hydroélectrique de . Sa construction a commencé en 2011. Notes et références Voir aussi Articles connexes Hydroélectricité au Brésil Teles Pires Teles Pires Environnement au Mato Grosso Système hydrologique de l'Amazone
{ "redpajama_set_name": "RedPajamaWikipedia" }
2,301
{"url":"https:\/\/www.physicsforums.com\/threads\/mach-number-and-velocity.759247\/","text":"# Mach Number and Velocity\n\n1. Jun 23, 2014\n\n### LaReina\n\n1. The problem statement, all variables and given\/known data\nAn object is flying through the air at M=0.5. The free stream temperature is equal to 180 K. At what speed should the object fly when the temperature is 100 K in order to maintain the same Mach number? (therefore ensuring compressibility effects are the same). What was the speed of the first object.\n\n2. Relevant equations\n$M=\\frac{V}{a}$\n\n$a=\\sqrt{\u03b3RT}$\n\n3. The attempt at a solution\nI've worked out the speed for the first object which is as follows\n$a=\\sqrt{1.4\\times287\\times180}=268.931m\/s$\n$V=0.5\\times268.931=134.465m\/s$\n\nHowever when I work out the speed for the second temperature using the exact procedure, I get 100.225 as an answer. The answer that has been given is 88.52m\/s.\n\n2. Jun 23, 2014\n\n### Staff: Mentor\n\nPlease show us your work for the second temperature.\n\nChet\n\n3. Jun 25, 2014\n\n### LaReina\n\n$a=\\sqrt{1.4\\times287\\times100}=200.448$\n$V=200.448\\times0.5=100.224$\n\n4. Jun 25, 2014\n\n### Staff: Mentor\n\nThis calculation looks OK to me.\n\nChet\n\n5. Jun 25, 2014\n\n### dauto\n\nMay be the question has a typo and it meant to ask what happens if the temperature drops 100K (which means it drops to 80K). That brings the answer closer to the answer provided.","date":"2017-08-22 23:03:33","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.3678933084011078, \"perplexity\": 1313.9530381382317}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.3, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2017-34\/segments\/1502886116921.7\/warc\/CC-MAIN-20170822221214-20170823001214-00682.warc.gz\"}"}
null
null
{"url":"http:\/\/econ.tau.ac.il\/research\/abstract.asp?id=162005","text":"Working Paper #16-05 Interim Rationalizability Eddie Dekel , Drew Fudenberg and Stephen Morris This paper proposes the solution concept of interim rationalizability, and shows that all type spaces that have the same hierarchies of beliefs have the same set of interim rationalizable outcomes. This solution concept characterizes common knowledge of rationality in the universal type space. Jel Nos.: C70, C72 Keywords: Rationalizability, incomplete information, common knowledge, universal type space, strategic topology PAPER in PDF","date":"2013-05-21 18:54:47","metadata":"{\"extraction_info\": {\"found_math\": false, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.8749954700469971, \"perplexity\": 7303.058350957933}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2013-20\/segments\/1368700438490\/warc\/CC-MAIN-20130516103358-00047-ip-10-60-113-184.ec2.internal.warc.gz\"}"}
null
null
Q: Specifying the screen orientation of an ACTION_VIEW intent I want to play a video, so I'm using action_view like this. intent.setAction(android.content.Intent.ACTION_VIEW); Does anyone know of a way to specify that I want the video always played in a vertical screen orientation, or that determination always up to an activity itself? A: That determination is up to the activity itself. A: In the layout XML file you could add: android:orientation="vertical"
{ "redpajama_set_name": "RedPajamaStackExchange" }
7,812
Society for History Education, Inc. A non-profit organization and publisher of The History Teacher (ISSN: 0018-2745) is a peer-reviewed quarterly journal. THT publishes inspirational scholarship on traditional and unconventional techniques in history education. Volume 54 (2020-2021) is delivered internationally in print to members of the non-profit organization, the Society for History Education. The History Teacher Archives Contributing Materials Advertisement Placements Permissions and Copyrights Student and Teacher Awards Memberships/Subscriptions Society for History Education CSULB - 1250 Bellflower Blvd. Long Beach, CA 90840-1601 info@thehistoryteacher.org Volume 54, No. 1 November 2020 The History Teacher thehistoryteacher.org/N20 Front Cover: U.S. Mail Letterbox. Thurgood Marshall U.S. Courthouse in the Financial District of New York City. Photograph by Carol M. Highsmith, 2019. Library of Congress, Prints and Photographs Division, Carol M. Highsmith Archive, LC-DIG-highsm-55477. https://www.loc.gov/item/2019639050/. Back Cover: U.S. Courthouse and Post Office, Pittsburgh, Pennsylvania. Photograph by Carol M. Highsmith, 2006. Library of Congress, Prints and Photographs Division, Carol M. Highsmith Archive, LC-DIG-highsm-01599. https://www.loc.gov/item/2011647740/. In an electronic age when images and information can magically appear at our fingertips—and disappear just as quickly—historical artifacts are, indeed, things of the past. For historians, archivists, and adventure-seekers in general, little is more enticing than the opportunity to handle the actual, physical records of the past. Books, periodicals, personal letters—not only are these vital to human history itself, these are all made possible by the people of the postal service. The History Teacher appreciates the incredible dedication of postal service workers around the world, and acknowledges their extraordinary contributions to our lives and our history. We hope you and your students enjoy the possibilities presented in this edition of The History Teacher, including a special focus on The Student and our annual celebration of the prize-winning student authors for National History Day. Front Matter | Back Matter THE CRAFT OF TEACHING Evaluating the "Professionalizing History Majors" Course: Historical Knowledge and Co-Curricular Activities by Daniel S. Murphree (pp. 9-37) In Search of the Right Words: A History Teacher's Exploration of College Students' Epistemic Beliefs about History by Lucia Antonelli-Carter (pp. 39-67) History by the Numbers: A Quantitative Approach to Teaching the Importance of Conflicting Evidence by Peter Burkholder (pp. 69-106) NOTES AND COMMENTS How Do History Majors Fare in the Job Market? by H. Robert Baker and Gregory B. Lewis (pp. 107-128) SPECIAL FEATURE NATIONAL HISTORY DAY 2020 PRIZE ESSAYS by Jane Dabel, The History Teacher (pp. 129-130) Too Strong For a Woman: How Bernice Sandler Created Title IX to Break Barriers for Female Faculty in Higher Education by Natalie Miller, Senior Division (pp. 131-153) The Fifth Circuit Four: The Unheralded Judges Who Helped to Break Legal Barriers in the Deep South by Max Grinstein, Junior Division (pp. 155-179) Full Reviews Section (pp. 181-193) Austin, Brad and Pamela Grundy, eds. Teaching U.S. History through Sports by Ashley Brown Balto, Simon. Occupied Territory: Policing Black Chicago from Red Summer to Black Power by Sarah E. Doherty Getz, Trevor R. A Primer for Teaching African History: Ten Design Principles by Jonathan T. Reynolds Guelzo, Allen C. Reconstruction: A Concise History by Bradley R. Clampitt Levin, Kevin M. Searching for Black Confederates: The Civil War's Most Persistent Myth by Stanley Harrold Rembis, Michael, Catherine J. Kudlick, and Kim E. Nielsen, eds. The Oxford Handbook of Disability History by David Neumann Townsend, Robert B. History's Babel: Scholarship, Professionalization, and the Historical Enterprise in the United States, 1880-1940 by James P. Cousins Wineburg, Sam. Why Learn History (When It's Already on Your Phone) by Jeffery D. Nokes 7 Contributors to The History Teacher 194 The History of The History Teacher 197 Questionnaire for Potential Reviewers 198 Membership/Subscription Information 200 Submission Guidelines for The History Teacher ADVERTISERS IN THIS ISSUE 8 Society for History Education: The Richard and Louise Wilde Award 38 Sultan Qaboos Cultural Center: Indian Ocean in World History 68 Association for Asian Studies: Attend an AAS Conference 154 Society for History Education: Celebrating 50 Years 180 American Historical Association: New AHA Booklet Lucia Antonelli-Carter is originally from Bologna, Italy. She received an advanced degree in Modern European History from the University of Bologna in 1998. She is an Associate Professor of History at Mars Hill University, where she teaches European and world history courses. Her current research interest reflects on the ways in which historical consciousness and historical thinking prepare college graduates to face with confidence the challenges and opportunities of the current political and economic environment. H. Robert Baker holds a Ph.D. in History from the University of California, Los Angeles. He is an Associate Professor and Director of Undergraduate Studies in the Department of History at Georgia State University, and a Chancellor's Learning Scholar. Peter Burkholder is a Professor of History at Fairleigh Dickinson University. He is on the editorial board of The Teaching Professor, is a consulting editor for College Teaching, and serves on the advisory boards of the Society for History Education and the International Society for the Scholarship of Teaching and Learning in History. His article on content and critical thinking, published in The History Teacher in 2014, won the American Historical Association's 2015 Gilbert Award for the best article on teaching history. Max Grinstein is a student of Belmont Home School in Houston, Texas. He won first place in the Junior Paper division at the 2020 National History Day contest for his essay, "The Fifth Circuit Four: The Unheralded Judges Who Helped to Break Legal Barriers in the Deep South." Gregory B. Lewis holds a Ph.D. in Public Administration from Syracuse University. He is a Professor and Chair of Public Policy in the Department of Public Management and Policy in the Andrew Young School of Policy Studies at Georgia State University. Natalie Miller is a student at East High School in Duluth, Minnesota. Her paper, "Too Strong For a Woman: How Bernice Sandler Created Title IX to Break Barriers for Female Faculty in Higher Education," won first place in the Senior Paper division at the 2020 National History Day contest. Daniel S. Murphree received his Ph.D. from Florida State University and currently is an Associate Professor of History at the University of Central Florida. A proponent of the Scholarship of Teaching and Learning (SoTL), his research focuses on how to improve undergraduate history instruction and has been published in venues such as Perspectives on History, Teaching History, and The Journal of American History. Subscribe to THT via the American Historical Association (AHA) Organization of American Historians (OAH) U.S. Mail with a Printable Form Online Reading Room: Gaming in the from The History Teacher Comics, Cartoons, and Graphic Novels Wikipedia, Twitter, and "Instant Historying" Ⓒ Society for History Education, Inc.
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
1,512
Q: 'Insufficient Privileges' error while creating Force.com Site While creating force.com site for an org, we are getting an error of 'Insufficient Privileges'. Seems one of an issue, any clue here? Ps : I'm trying to create Site from Sys Admin login. A: "Profiles" wasn't enabled in the trial org, hence I was receiving this one of kind error. Raised case with salesforce and got "Profiles" setup option enabled which automatically fixed Site creation issue.
{ "redpajama_set_name": "RedPajamaStackExchange" }
5,931
Our Student Services Europe Education India Review®: A new monthly magazine on India Tadworth, Surrey, United Kingdom, 01 August 2018 – After successful launch of Scientific European® (SCIEU)® (a popular science magazine) and two peer-reviewed research journals European Journal of Sciences (EJS)® & European Journal of Social Sciences (EJSS)®, UK Education Consultancy Services Ltd announced the release of the premiere issue of India Review® (a monthly magazine on India) India Review® [ISSN 2631-3227 (Online)|ISSN 2631-3219 (Print)] focusses on matters pertaining to India and is geared towards people who wish to be better informed and expand their views about India. It is the first magazine that aims to present a fresh outlander's perspective bringing interesting and relevant ideas and aspects from diverse fields of history, politics, economy, trade, literature, philosophy and culture of India to the audience worldwide, to the people who are ever interested in knowing and experiencing India and its culture and be part of the same as a happening part of the world. Umesh Prasad, the founder editor said: ''The thought to have a magazine exclusively on India came to my mind around the time when Brexit referendum took place. Post Brexit, there may be a renewed emphasis on India and the relationship, especially the trade relation, between Britain and India and the European Union would probably see some changes. Consequently, there may be a need for a suitable platform in the form of magazine where interested people in the UK, EU and India including the diaspora may converge and refer to. And, ''India Review'' was born, published initially in English, French and German.'' The digital copy of India Review® is available free of cost to our readers across the world in line with our organisational policy applicable for research journals published by us. It is published on monthly basis in both online and print formats. Please visit www.IndiaReview.co.uk In order to download a free copy and for more information. Contact Name: Umesh Prasad Contact Email: info@indiareview.co.uk Company Website: https://www.indiareview.co.uk UK Education Consultancy Services Ltd. Company Number 10459935 (Registered in England) Registered Address: Charwell House, Wilsom Road, Alton, Hampshire, England, GU34 2PP Phone +44 1420 84244 Email info@uk-education.com © , UK Education Consultancy Services Ltd - All rights reserved. Company Number 10459935. Registered in England. ® Registered with World Intellectual Property Organisation (WIPO), Geneva vide International Registration No. 1345662
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
1,688
\section{Introduction} Idempotent completions for higher categories has seen tremendous recent progress. For 2-categories (which we always assume are locally idempotent complete), completing with respect to the two notions of condensation monads \cite{1905.09566} and separable monads \cite{1812.11933} produces equivalent 2-categories by \cite[Thm.~3.3.3]{1905.09566}. The major difference is that condensation monads are \emph{non-unital} and include the \emph{data} of the separating structure, while separable monads are \emph{unital} and include only the \emph{existence} of separating structure, the choice of which is contractible. In the setting of $\rm C^*/W^*$ 2-categories (which we always assume are locally orthogonal projection complete), the analogous notion of separable monad is Longo's \emph{Q-system} \cite{MR1257245,MR1444286}, which was originally studied for its role in subfactor theory. In our recent joint article \cite{2105.12010}, we introduced the notion of \emph{Q-system completion} ${\sf QSys}(\cC)$ for a $\rm C^*/W^*$ 2-category $\cC$, which comes equipped with a canonical $\dag$ 2-functor $\iota_\cC : \cC \hookrightarrow {\sf QSys}(\cC)$. While we analyzed some of the general theory of Q-system completion in that article, we focused more on applications to $\rm C^*$-algebra theory, showing the $\rm C^*$ 2-category of $\rm C^*$-algebras is Q-system complete. As an application, we used Q-system completion to induce actions of unitary fusion categories on $\rm C^*$-algebras, similar to the spirit of \cite{2010.01072}. In this article, we study some basic formal properties of Q-system completion, and our proofs can easily be adapted to the separable monad setting. Our main results extend the treatment of idempotent completion for 2-categories in \cite[Appendix A]{1812.11933}. Here is our first main theorem: \begin{thmalpha} \label{thm:QSys3Functor} Q-system completion is a $\dag$ 3-endofunctor on the $\dag$ 3-category of $\rm C^*/W^*$ 2-categories. \end{thmalpha} In \cite[Prop.~A.6.3]{1812.11933}, Douglas and Reutter provided strong evidence towards this theorem, and they mentioned they expect such a result to be true. To prove this theorem, we introduce an \emph{overlay} compatibility between the 2D graphical calculi for a $\rm C^*/W^*$ 2-category $\cC$ and the $\rm C^*/W^*$ 2-category ${\sf Fun}^\dag(\cC\to \cD)$ for another $\cD$. (We show in Proposition \ref{prop:FunDagC*W*} below that ${\sf Fun}^\dag(\cC\to \cD)$ is $\rm C^*/W^*$ whenever $\cC,\cD$ are.) See \S\ref{sec:Fun(A,B)} below for more details. Our second main theorem regards the universal property for idempotent completion for 2-categories discussed in \cite[\S1.2]{2012.15774}, proving the best possible uniqueness statement. Given 2-categories $\cC,\cD,\cE$ and 2-functors $F: \cC \to \cD$ and $G: \cC\to \cE$, the 2-category of \emph{lifts} of $F$ to $\cE$ along $G$ is the homotopy fiber at $F$ of the functor $$ -\circ G : {\sf Fun}(\cE \to \cD) \to {\sf Fun}(\cC\to \cD). $$ Objects in this lift 2-category are pairs $(\widetilde{F}, \theta)$ where $\widetilde{F}:\cE\to \cD$ is a 2-functor and $\theta: F \Rightarrow \widetilde{F}\circ G$ is an invertible 2-transformation. We refer the reader to \S\ref{sec:UniversalProperty} for the rest of the unpacked definition. \begin{thmalpha} \label{thm:UniqueLift} Suppose $\cC$ is a $\rm C^*/W^*$ 2-category. The Q-system completion ${\sf QSys}(\cC)$ satisfies the following universal property. For any $\dag$ 2-functor $F:\cC\to \cD$ where $\cD$ is Q-system complete, the 2-category of lifts of $F$ along $\iota_\cC$ is \emph{$(-2)$-truncated}, i.e., equivalent to a point. That is, $-\circ \iota_\cC: {\sf Fun}^\dag({\sf QSys}(\cC) \to \cD) \to {\sf Fun}^\dag(\cC\to \cD)$ is a $\dag$ 2-equivalence. \end{thmalpha} The main idea of the proof of this theorem comes from \cite[\S3.1]{1910.03178}. By a version of Grothendieck's \emph{Homotopy Hypothesis} for 2-categories \cite{MR1239560}, the homotopy category of strict 2-groupoids and strict 2-functors localized at the strict equivalences is equivalent to the 1-category of homotopy 2-types. Hence the homotopy fiber of $-\circ G$ restricted to the \emph{core} 2-groupoids $$ -\circ G : \core({\sf Fun}(\cE \to \cD)) \to \core({\sf Fun}(\cC \to \cD)) $$ is $k$-truncated for $-2\leq k\leq 1$ if and only if various (essential) surjectivity properties hold for $-\circ G$. In turn, these surjectivity properties for $-\circ G$ are ensured by various levels of \emph{dominance} for the 2-functor $G$. We make these notions precise in \S\ref{sec:UniversalProperty}. While we work in the $\rm C^*/W^*$ setting both for novelty and for applications to the world of operator algebras, we re-emphasize that these results do not depend on the dagger structure. \subsection*{Acknowledgements} The authors would like to thank Thibault D\'ecoppet, Brent Nelson, and David Reutter for helpful discussions. The authors were supported by NSF grants DMS 1654159, 1927098, and 2051170. \section{Preliminaries} In this article, \emph{2-category} will always mean a weak 2-category/bicategory which is locally idempotent complete, and a $\rm C^*/W^*$ 2-category will always mean a weak $\rm C^*/W^*$ 2-category which is locally orthogonal projection complete. We refer the reader to \cite{2002.06055} for background on 2-categories and to \cite{2105.12010} for background on $\rm C^*/W^*$ 2-categories. We refer the reader to \cite{MR3971584} or \cite{2105.12010} for a detailed discussion of the graphical calculus of string diagrams for 2-categories. The only 3-categories in this article are the 3-category $2{\mathsf{Cat}}$ of 2-categories \cite[\S5.1]{MR3076451} and its 3-subcategories $\rm C^*2{\mathsf{Cat}}$ and $\rm W^*2{\mathsf{Cat}}$ of $\rm C^*/W^*$ 2-categories respectively. \begin{nota} In a 2-category $\cC$, we refer to its objects, 1-morphisms, and 2-morphisms as \emph{0-cells}, \emph{1-cells}, and \emph{2-cells} respectively. We denote 0-cells in a 2-category $\cC$ by lowercase Roman letters $a,b,c$, 1-cells by uppercase Roman letters ${}_aX_b,{}_bY_c$ using bimodule notation for source (left) and target (right), and 2-cells by lowercase Roman letters later in the alphabet $f,m,n,t$. We write 1-composition as $\otimes$ read \emph{left to right}, and we write 2-composition as $\star$, which is read \emph{right to left}. In the graphical calculus of string diagrams in 2-categories, which is formally dual to the manipulation of pasting diagrams, we read 1-composition \emph{left to right} and 2-composition \emph{bottom to top}. $$ f: {}_aX\otimes_b Y_c \Rightarrow {}_aZ_c \qquad\rightsquigarrow\qquad \begin{tikzcd}[row sep=0] &\mbox{} \\ a \arrow[rr, bend left = 30, "Z"] \arrow[dr, swap, "X"] & \mbox{} & c \\ &b \arrow[uu, Rightarrow, swap, "f"] \arrow[ur, swap, "Y"] \end{tikzcd} \qquad\rightsquigarrow\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-.7) rectangle (.7,.7); \filldraw[gray!30] (-.2,-.7) -- (-.2,0) -- (0,0) -- (0,.7) -- (-.7,.7) -- (-.7,-.7); \filldraw[gray!55] (-.2,-.7) rectangle (.2,0); \filldraw[gray!75] (.2,-.7) -- (.2,0) -- (0,0) -- (0,.7) -- (.7,.7) -- (.7,-.7); \end{scope} \node at (0,-.9) {$\to$}; \node at (-.9,0) {$\Uparrow$}; \draw[violet,thick] (0,.3) -- node[left]{$\scriptstyle Z$}(0,.7); \draw[\XColor,thick] (-.2,-.3) -- node[left]{$\scriptstyle X$} (-.2,.-.7); \draw[orange,thick] (.2,-.3) -- node[right]{$\scriptstyle Y$} (.2,-.7); \roundNbox{unshaded}{(0,0)}{.3}{.1}{.1}{\scriptsize{$f$}}; } $$ In the 3-category $2{\mathsf{Cat}}$ of 2-categories, the object 2-categories are denoted by math calligraphic letters $\cC,\cD,\cE$, the 2-functor 1-morphisms are denoted by capital Roman letters $F,G,H$, the 2-transformation 2-morphisms are denoted by lowercase Greek letters $\varphi,\psi$, and 2-modification 3-morphisms are denoted by lowercase Roman letters $m,n$. We write 1-composition of 2-functors as $\circ$, which we read \emph{right to left}, i.e., if $F: \cA \to \cB$ and $G: \cB \to \cC$, then $G\circ F : \cA \to \cC$. We write 2-composition of 2-transformations as $\otimes$, and we write 3-composition of 2-modification as $\star$. \end{nota} \begin{rem} While we will not rely on any 3D string diagram graphical calculus in this article, its use for weak 3-categories can be justified using the article \cite{1903.05777}. In several locations, we provide 3D diagrams for conceptual clarity. Our conventions for 1-, 2-, and 3-composition in these 3D diagrams are indicated in the figure below. \[ \tikzmath{ \begin{scope} \filldraw[primedregion=white] (-.3,.4) rectangle (.7,2.4); \filldraw[boxregion=white] (.7,.4) rectangle (1.7,2.4); \end{scope} \draw[black,thick] (.7,.4) -- (.7,1.4); \draw[snake,thick] (.7,1.4) -- (.7,2.4); \roundNbox{unshaded}{(.7,1.4)}{.3}{0}{0}{\scriptsize{$n$}}; \draw[dotted] (-.3,.4) rectangle (1.7,2.4); \node at (-.6,.2) [rotate=-53] {$\to$}; \node at (1,-.3) {$\Rightarrow$}; \node at (2.3,1) [rotate=90] {$\Rrightarrow$}; \draw[gray!30] (0,0) rectangle (2,2); \draw[gray!30] (0,0) -- (-.6,.8) -- (-.6,2.8) -- (1.4,2.8) -- (2,2); \draw[gray!30] (-.6,2.8) -- (0,2); \draw[gray!30,dashed] (-.6,.8) -- (1.4,.8) -- (1.4,2.8); \draw[gray!30,dashed] (1.4,.8) -- (2,0); \node at (-.45,.95) {\scriptsize{$\cA$}}; \node at (.15,.15) {\scriptsize{$\cB$}}; } \] \end{rem} \subsection{The 2-category \texorpdfstring{${\sf Fun}(\cA\to \cB)$}{Fun(C->D)} of 2-functors, 2-transformations, and 2-modifications} \label{sec:Fun(A,B)} In this section, we first describe our graphical conventions for working with 2-functors, 2-transformations, and 2-modifications. We then use our graphical notation to unpack their definitions. \begin{nota} To define 2-transformations between 2-functors and 2-modifications between 2-transformations in a diagrammatic language, we overlay the 2D diagrammatic calculus for the hom 2-category ${\sf Fun}(\cA\to \cB)$ between 2-categories $\cA,\cB$ with the 2D diagrammatic calculus for $\cB$. For our 2D diagrammatic calculus for the hom 2-category ${\sf Fun}(\cA\to \cB)$, we represent the object functors by \emph{unshaded} regions with \emph{textured} decorations, e.g., $$ \tikzmath{ \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = F \qquad\qquad \tikzmath{ \filldraw[boxregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = F' \qquad\qquad \tikzmath{ \filldraw[plusregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = F'' \qquad\qquad \tikzmath{ \filldraw[starregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = F''' $$ We represent 2-transformations (see Definition \ref{defn:2Transformation} below) by \emph{textured} strings between these textured regions, e.g., $$ \tikzmath{ \begin{scope} \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.3,.6); \filldraw[boxregion=white, rounded corners = 5pt] (.3,0) rectangle (.6,.6); \end{scope} \draw[black,thick] (.3,0) -- (.3,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = \varphi: F\Rightarrow F' \qquad\qquad \tikzmath{ \begin{scope} \filldraw[boxregion=white, rounded corners = 5pt] (0,0) rectangle (.3,.6); \filldraw[plusregion=white, rounded corners = 5pt] (.3,0) rectangle (.6,.6); \end{scope} \draw[snake,thick] (.3,0) -- (.3,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = \psi: F'\Rightarrow F'' \qquad\qquad \tikzmath{ \begin{scope} \filldraw[plusregion=white, rounded corners = 5pt] (0,0) rectangle (.3,.6); \filldraw[starregion=white, rounded corners = 5pt] (.3,0) rectangle (.6,.6); \end{scope} \draw[saw,thick] (.3,0) -- (.3,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = \gamma: F''\Rightarrow F''' $$ We represent 2-modifications (see Definition \ref{Defn:2Modification} below) by coupons as usual. To depict a 2-morphism in $\cB$ in the image of $F$, we \emph{overlay} the 2D string diagrammatic calculus for ${\sf Fun}(\cA\to \cB)$ on top of the 2D string diagrammatic calculus for $\cA$. For example, given $F,F': \cA \to \cB$, $\varphi,\varphi' : F\Rightarrow F'$, and $m: \varphi\Rrightarrow \varphi'$, we can `overlay' the coupon for $m$ over the shaded region for $a\in \cA$ to obtain the 2-cell $m_a:\varphi_a\Rightarrow \varphi'_a$: $$ \left(\, \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.7,-.7) rectangle (.7,.7); \filldraw[primedregion=white] (-.8,-.9) rectangle (0,.9); \filldraw[boxregion=white] (0,-.9) rectangle (.8,.9); \end{scope} \draw[black,thick] (0,-.7) -- (0,0); \draw[black,thick] (0,.7) -- (0,0); \roundNbox{unshaded}{(0,0)}{.3}{0}{0}{\scriptsize{$m$}}; \node at (0,-.9) {\scriptsize{$\varphi$}}; \node at (0,.9) {\scriptsize{$\varphi'$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-.7) rectangle (.7,.7); } \,\right) \left( \tikzmath{ \fill[gray!30,rounded corners=5pt] (0,0) rectangle (.6,.6); \node at (.3,.8) {\phantom{\scriptsize{$a$}}}; \node at (.3,-.2) {\scriptsize{$a$}}; } \right) = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.7,-.7) rectangle (.7,.7); \filldraw[primedregion=gray!30] (-.8,-.9) rectangle (0,.9); \filldraw[boxregion=gray!30] (0,-.9) rectangle (.8,.9); \end{scope} \draw[black,thick] (0,-.7) -- (0,0); \draw[black,thick] (0,.7) -- (0,0); \roundNbox{unshaded}{(0,0)}{.3}{0}{0}{\scriptsize{$m_a$}}; \node at (0,-.9) {\scriptsize{$\varphi_{a}$}}; \node at (0,.9) {\scriptsize{$\varphi'_a$}}; } \qquad\qquad\qquad \tikzmath{ \filldraw[primedregion=gray!30, rounded corners = 5pt] (0,0) rectangle (.6,.6); } =F(a), \quad \tikzmath{ \filldraw[boxregion=gray!30, rounded corners = 5pt] (0,0) rectangle (.6,.6); } =F'(a) \,. $$ We do not attempt to formalize this `overlay' operation, as all string diagrams can be interpreted uniquely as 2-cells in $\cB$; see Remark \ref{rem:Overlay} below for further discussion. \end{nota} \begin{defn} \label{defn:2Functor} Suppose $\cA,\cB$ are 2-categories. We use the following conventions for the coheretors of a 2-functor $F=(F,F^2,F^1):\cA \to \cB$: $$ F^2_{X,Y}\in \cB( F(X)\otimes_{F(b)} F(Y) \Rightarrow F(X\otimes_b Y)) \qquad\text{and}\qquad F^1_a \in \cB(1_{F(a)} \Rightarrow F(1_a)), $$ which satisfy the hexagon associativity equation and triangle unit equations. We depict these axioms below in the graphical calculus for $\cB$. Denoting objects in $\cB$ by the shaded regions $$ \tikzmath{ \filldraw[primedregion=gray!30, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[rounded corners=5, thin, dotted] (0,0) rectangle (.6,.6); } =F(a) \qquad\qquad \tikzmath{ \filldraw[primedregion=gray!55, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[rounded corners=5, thin, dotted] (0,0) rectangle (.6,.6); } =F(b) \qquad\qquad \tikzmath{ \filldraw[primedregion=gray!75, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[rounded corners=5, thin, dotted] (0,0) rectangle (.6,.6); } =F(c) \qquad\qquad \tikzmath{ \filldraw[primedregion=gray!95, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[rounded corners=5, thin, dotted] (0,0) rectangle (.6,.6); } =F(d), $$ and 1-cells in $\cB$ by shaded strands, e.g. \[ \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \filldraw[gray!30] (0,0) rectangle (-.3,.6); \filldraw[gray!55] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, \XColor] (0,0) -- (0,.6); }={}_aX_b \qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \filldraw[gray!55] (0,0) rectangle (-.3,.6); \filldraw[gray!75] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, orange] (0,0) -- (0,.6); }={}_bY_c \qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \filldraw[gray!75] (0,0) rectangle (-.3,.6); \filldraw[gray!95] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, violet] (0,0) -- (0,.6); }={}_cZ_d \qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.5,-.3) rectangle (.5,.3); \filldraw[primedregion=gray!30] (-.5,-.3) rectangle (-.2,.3); \filldraw[primedregion=gray!55] (-.2,-.3) rectangle (.2,.3); \filldraw[primedregion=gray!75] (.5,-.3) rectangle (.2,.3); \end{scope} \draw[\XColor,thick] (-.2,-.3) -- (-.2,.3); \draw[orange,thick] (.2,-.3) -- (.2,.3); } = F(X)\otimes_{F(b)} F(Y) \qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.35,-.3) rectangle (.35,.3); \filldraw[primedregion=gray!30] (-.05,-.3) rectangle (-.35,.3); \filldraw[primedregion=gray!55] (-.05,-.3) rectangle (.05,.3); \filldraw[primedregion=gray!75] (.05,-.3) rectangle (.35,.3); \end{scope} \draw[\XColor,thick] (-.05,-.3) -- (-.05,.3); \draw[orange,thick] (.05,-.3) -- (.05,.3); } = F(X\otimes_b Y), \] the hexagon and triangle equations are given by \[ \underset{ \tiny (F(X)\otimes_{F(b)} F(Y))\otimes_{F(c)}F(Z)\Rightarrow F(X\otimes_b(Y\otimes_c Z)) } { \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.8,.3) rectangle (.8,3.7); \filldraw[primedregion=gray!30] (-.4,0) -- (-.4,1) -- (-.25,1) -- (-.25,2) -- (-.1,2) -- (-.1,4) -- (-.8,4) -- (-.8,0); \filldraw[primedregion=gray!55] (0,0) -- (0,1) -- (-.15,1) -- (-.15,2) -- (0,2) -- (0,4) -- (-.1,4) -- (-.1,2) -- (-.25,2) -- (-.25,1) -- (-.4,1) -- (-.4,0); \filldraw[primedregion=gray!75] (.4,0) -- (.4,2) -- (.1,2) -- (.1,4) -- (0,4) -- (0,2) -- (-.15,2) -- (-.15,1) -- (0,1) -- (0,0); \filldraw[primedregion=gray!95] (.4,0) -- (.4,2) -- (.1,2) -- (.1,4) -- (.8,4) -- (.8,0); \end{scope} \draw[\XColor,thick] (-.4,.3) -- (-.4,1); \draw[orange,thick] (0,.3) -- (0,1); \draw[violet,thick] (.4,.3) -- (.4,2); \draw[\XColor,thick] (-.25,1) -- (-.25,2); \draw[orange,thick] (-.15,1) -- (-.15,2); \draw[\XColor,thick] (-.1,2) -- (-.1,3.7); \draw[orange,thick] (0,2) -- (0,3.7); \draw[violet,thick] (.1,2) -- (.1,3.7); \roundNbox{unshaded}{(-.2,1)}{.3}{.05}{.05}{\scriptsize{$F^2_{X,Y}$}}; \roundNbox{unshaded}{(0,2)}{.3}{.25}{.25}{\scriptsize{$F^2_{X\otimes Y,Z}$}}; \roundNbox{unshaded}{(0,3)}{.3}{.25}{.25}{\scriptsize{$F(\alpha^{\cC})$}}; } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.8,.3) rectangle (.8,3.7); \filldraw[primedregion=gray!30] (-.4,0) -- (-.4,3) -- (-.1,3) -- (-.1,4) -- (-.8,4) -- (-.8,0); \filldraw[primedregion=gray!55] (-.4,0) -- (-.4,3) -- (-.1,3) -- (-.1,4) -- (0,4) -- (0,3) -- (.15,3) -- (.15,2) -- (0,2) -- (0,0); \filldraw[primedregion=gray!75] (.4,0) -- (.4,2) -- (.25,2) -- (.25,3) -- (.1,3) -- (.1,4) -- (0,4) -- (0,3) -- (.15,3) -- (.15,2) -- (0,2) -- (0,0); \filldraw[primedregion=gray!95] (.4,0) -- (.4,2) -- (.25,2) -- (.25,3) -- (.1,3) -- (.1,4) -- (.8,4) -- (.8,0); \end{scope} \draw[\XColor,thick] (-.4,.3) -- (-.4,3); \draw[orange,thick] (0,.3) -- (0,2); \draw[violet,thick] (.4,.3) -- (.4,2); \draw[orange,thick] (.15,2) -- (.15,3); \draw[violet,thick] (.25,2) -- (.25,3); \draw[\XColor,thick] (-.1,3) -- (-.1,3.7); \draw[orange,thick] (0,3) -- (0,3.7); \draw[violet,thick] (.1,3) -- (.1,3.7); \roundNbox{unshaded}{(0,1)}{.3}{.25}{.25}{\scriptsize{$\alpha^\cB$}}; \roundNbox{unshaded}{(.2,2)}{.3}{.05}{.05}{\scriptsize{$F^2_{Y,Z}$}}; \roundNbox{unshaded}{(-.1,3)}{.3}{.15}{.35}{\scriptsize{$F^2_{X,Y\otimes Z}$}}; } } \qquad\qquad \underset{ \tiny F(X)\otimes_{F(b)}1_{F(b)}\Rightarrow F(X) }{ \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,.3) rectangle (.7,3.7); \filldraw[primedregion=gray!30] (-.2,0) -- (-.2,2) -- (-.05,2) -- (-.05,3) -- (0,3) -- (0,4) -- (-.7,4) -- (-.7,0); \filldraw[primedregion=gray!55] (-.2,0) -- (-.2,2) -- (-.05,2) -- (-.05,3) -- (0,3) -- (0,4) -- (.7,4) -- (.7,0); \end{scope} \draw[\XColor,thick] (-.2,.3) -- (-.2,2); \draw[dotted,thick] (.2,.3) -- (.2,2); \draw[\XColor,thick] (-.05,2) -- (-.05,3); \draw[dotted,thick] (.05,2) -- (.05,3); \draw[\XColor,thick] (0,3) -- (0,3.7); \roundNbox{unshaded}{(.2,1)}{.25}{0}{0}{\scriptsize{$F^1_b$}}; \roundNbox{unshaded}{(0,2)}{.3}{.15}{.15}{\scriptsize{$F^2_{X,1_b}$}}; \roundNbox{unshaded}{(0,3)}{.3}{.15}{.15}{\scriptsize{$F(\rho_X^b)$}}; } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,0) rectangle (.7,2); \filldraw[primedregion=gray!30] (-.2,0) -- (-.2,1) -- (0,1) -- (0,2) -- (-.7,2) -- (-.7,0); \filldraw[primedregion=gray!55] (-.2,0) -- (-.2,1) -- (0,1) -- (0,2) -- (.7,2) -- (.7,0); \end{scope} \draw[\XColor,thick] (-.2,0) -- (-.2,1); \draw[dotted,thick] (.2,0) -- (.2,1); \draw[\XColor,thick] (0,1) -- (0,2); \roundNbox{unshaded}{(0,1)}{.3}{.15}{.15}{\scriptsize{$\rho_{F(X)}^{F(b)}$}}; } } \qquad\qquad \underset{ \tiny 1_{F(a)}\otimes_{F(a)}F(X)\Rightarrow F(X) }{ \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,.3) rectangle (.7,3.7); \filldraw[primedregion=gray!30] (.2,0) -- (.2,2) -- (.05,2) -- (.05,3) -- (0,3) -- (0,4) -- (-.7,4) -- (-.7,0); \filldraw[primedregion=gray!55] (.2,0) -- (.2,2) -- (.05,2) -- (.05,3) -- (0,3) -- (0,4) -- (.7,4) -- (.7,0); \end{scope} \draw[\XColor,thick] (.2,.3) -- (.2,2); \draw[dotted,thick] (-.2,.3) -- (-.2,2); \draw[\XColor,thick] (.05,2) -- (.05,3); \draw[dotted,thick] (-.05,2) -- (-.05,3); \draw[\XColor,thick] (0,3) -- (0,3.7); \roundNbox{unshaded}{(-.2,1)}{.25}{0}{0}{\scriptsize{$F^1_a$}}; \roundNbox{unshaded}{(0,2)}{.3}{.15}{.15}{\scriptsize{$F^2_{1_a,X}$}}; \roundNbox{unshaded}{(0,3)}{.3}{.15}{.15}{\scriptsize{$F(\lambda_X^a)$}}; } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,0) rectangle (.7,2); \filldraw[primedregion=gray!30] (.2,0) -- (.2,1) -- (0,1) -- (0,2) -- (-.7,2) -- (-.7,0); \filldraw[primedregion=gray!55] (.2,0) -- (.2,1) -- (0,1) -- (0,2) -- (.7,2) -- (.7,0); \end{scope} \draw[\XColor,thick] (.2,0) -- (.2,1); \draw[dotted,thick] (-.2,0) -- (-.2,1); \draw[\XColor,thick] (0,1) -- (0,2); \roundNbox{unshaded}{(0,1)}{.3}{.15}{.15}{\scriptsize{$\lambda_{F(X)}^{F(a)}$}}; } }\,. \] Whenever possible, we will suppress the associator and unitor coheretors in our 2-categories. \end{defn} \begin{defn} \label{defn:2Transformation} Suppose $\cA,\cB$ are 2-categories, $F,F': \cA \to \cB$ are 2-functors. A \emph{2-transformation} $\varphi:F\Rightarrow F'$ consists of: \begin{itemize} \item for every 0-cell $c\in \cA$, a 1-cell $\varphi_c \in \cB(F(c)\to F'(c))$, and \item for every 1-cell ${}_aX_b\in \cA(a\to b)$, an invertible $F(a)-F'(b)$ bimodular 2-cell $$ \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,2.4); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.4); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.4); \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,2.6) {\scriptsize{$F'(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_b$}}; \node at (0,2.6) {\scriptsize{$\varphi_a$}}; } =\varphi_X \in \cB(F(X)\otimes_{F(b)} \varphi_b \Rightarrow \varphi_a \otimes_{F'(a)} F'(X)). $$ \end{itemize} This data satisfies the following coherence properties: $$ \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (3,4.2); \filldraw[primedregion=gray!30] (0,0) -- (0,2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,2.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,3.2) -- (0,4.2) -- (-.6,4.2) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,2) -- (0,0); \filldraw[primedregion=gray!75] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (1.2,.4) -- (1.2,0); \filldraw[boxregion=gray!30] (0,4.2) -- (0,3.2) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.75,3.6) -- (1.75,4.2); \filldraw[boxregion=gray!55] (1.85,4.2) -- (1.85,3.6) -- (2.4,3.2) -- (2.4,1.6) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.75,3.6) -- (1.75,4.2); \filldraw[boxregion=gray!75] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(45:.2cm) and ++(270:.4cm) .. (2.4,1.6) -- (2.4,3.2) -- (1.85,3.6) -- (1.85,4.2) -- (3,4.2) -- (3,0); \filldraw[gray!55] (1,1.8) circle (.2cm); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.4) -- (0,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,3.2) -- (1.2,3.6); \draw[orange,thick] (1.2,0) -- (1.2,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (2.4,1.6) -- (2.4,3.6); \draw[black,thick] (2.4,0) -- (2.4,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,3.2) -- (0,4.2); \draw[\XColor,thick] (1.733,3.6) -- (1.733,4.2); \draw[orange,thick] (1.867,3.6) -- (1.867,4.2); \roundNbox{unshaded}{(1.8,3.4)}{.3}{.6}{.6}{\scriptsize{${F'}^2_{X,Y}$}}; \filldraw[white] (1.8,1) circle (.1cm); \draw[thick] (1.8,1) circle (.1cm); \filldraw[white] (.6,2.6) circle (.1cm); \draw[thick] (.6,2.6) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,-.2) {\scriptsize{$F(Y)$}}; \node at (1.8,4.4) {\scriptsize{$F'(X\otimes_b Y)$}}; \node at (2.4,-.2) {\scriptsize{$\varphi_c$}}; \node at (1,1.8) {\scriptsize{$\varphi_b$}}; \node at (0,4.4) {\scriptsize{$\varphi_a$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (2.4,3); \filldraw[primedregion=gray!30] (0,0) -- (0,.8) -- (.533,.8) -- (.533,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.2,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (.6,2.4) -- (.6,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!75] (1.8,0) -- (1.8,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (.65,1.2) -- (.65,.8) -- (1.2,.8) -- (1.2,0); \filldraw[primedregion=gray!55] (0,0) -- (0,.8) -- (.533,.8) -- (.533,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.133,1.8) -- (1.267,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (.667,1.2) -- (.667,.8) -- (1.2,.8) -- (1.2,0); \filldraw[boxregion=gray!30] (.6,3) -- (.6,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (1.2,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.75,2.4) -- (1.75,3); \filldraw[boxregion=gray!75] (1.8,0) -- (1.8,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.85,2.4) -- (1.85,3) -- (2.4,3) -- (2.4,0); \filldraw[boxregion=gray!55] (1.733,3) -- (1.733,2.4) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.133,1.8) -- (1.267,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.867,2.4) -- (1.867,3); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6); \draw[orange,thick] (1.2,0) -- (1.2,.6); \draw[\XColor,thick] (.533,.6) -- (.533,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.733,2.4) -- (1.733,3); \draw[orange,thick] (.667,.6) -- (.667,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.867,2.4) -- (1.867,3); \draw[black,thick] (1.8,0) -- (1.8,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.6,2.4) -- (.6,3); \roundNbox{unshaded}{(.6,.8)}{.3}{.6}{.6}{\scriptsize{$F^2_{X,Y}$}}; \filldraw[white] (1.2,1.8) circle (.1cm); \draw[thick] (1.2,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,-.2) {\scriptsize{$F(Y)$}}; \node at (1.8,3.2) {\scriptsize{$F'(X\otimes_b Y)$}}; \node at (1.8,-.2) {\scriptsize{$\varphi_c$}}; \node at (.6,3.2) {\scriptsize{$\varphi_a$}}; } \qquad \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick,dotted] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.2); \draw[\XColor,thick] (1.2,2.2) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,3); \roundNbox{unshaded}{(1.2,2.2)}{.3}{0}{0}{\scriptsize{${F'}^1_b$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$1_{F(b)}$}}; \node at (1.2,3.2) {\scriptsize{$F'(1_b)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_b$}}; \node at (0,3.2) {\scriptsize{$\varphi_b$}}; \node at (.2,1.2) {\scriptsize{$\id$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick,dotted] (0,0) -- (0,.8); \draw[\XColor,thick] (0,.8) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(0,.8)}{.3}{0}{0}{\scriptsize{$F^1_b$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$1_{F(b)}$}}; \node at (1.2,3.2) {\scriptsize{$F'(1_b)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_b$}}; \node at (0,3.2) {\scriptsize{$\varphi_b$}}; } \qquad \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.2); \draw[violet,thick] (1.2,2.2) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,3); \roundNbox{unshaded}{(1.2,2.2)}{.3}{.1}{.1}{\scriptsize{$F'(f)$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$F'(Z)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_b$}}; \node at (0,3.2) {\scriptsize{$\varphi_a$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.8); \draw[violet,thick] (0,.8) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(0,.8)}{.3}{.1}{.1}{\scriptsize{$F(f)$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$F'(Z)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_b$}}; \node at (0,3.2) {\scriptsize{$\varphi_a$}}; }\,. $$ \end{defn} \begin{defn} \label{Defn:2Modification} Suppose $\cA,\cB$ are 2-categories, $F,F': \cA \to \cB$ are 2-functors, and $\varphi, \psi : F\Rightarrow F'$ are 2-transformations. A \emph{2-modification} $n: \varphi\Rrightarrow \psi$ consists of a 2-cell $n_a\in \cB(\varphi_a \Rightarrow \psi_a)$ for all $a\in \cA$ such that $$ \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[snake,thick] (0,2.2) -- (0,3); \roundNbox{unshaded}{(0,2.2)}{.3}{0}{0}{\scriptsize{$n_a$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$F'(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_b$}}; \node at (0,3.2) {\scriptsize{$\psi_a$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.8); \draw[snake,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(1.2,.8)}{.3}{0}{0}{\scriptsize{$n_b$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$F'(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_b$}}; \node at (0,3.2) {\scriptsize{$\psi_a$}}; } \qquad\qquad \forall\,X\in\cA(a\to b) \qquad\qquad \begin{aligned} \tikzmath{ \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= F \\ \tikzmath{ \filldraw[boxregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= F' \end{aligned} $$ The 2-composition of 2-modifications in ${\sf Fun}(\cA\to \cB)$ is defined as follows. Suppose $F,F'\in {\sf Fun}(\cA\to \cB)$ and $\varphi,\varphi',\varphi''$ are 2-transformations $F\Rightarrow G$. Let $n:\varphi\Rrightarrow\varphi'$ and $n':\varphi'\Rrightarrow\varphi''$ be 2-modifications. The 2-composition in ${\sf Fun}(\cA\to \cB)$, denoted by $n'\star n:\varphi\Rrightarrow\varphi''$ is defined by $(n'\star n)_a:=n'_a\star n_a$ for $a\in \cA$ as composition of 2-cells in $\cB$. \end{defn} \begin{defn}[1-composition in ${\sf Fun}(\cA\to \cB)$] Suppose $F,F',F''\in {\sf Fun}(\cA\to \cB)$ are 2-functors, and let $\varphi:F\Rightarrow F'$ and $\psi:F'\Rightarrow F''$ be 2-transformations. The 1-composite $\varphi\otimes\psi :F\Rightarrow F''$ of 2-transformations is defined as follows. Let $X\in\cA(a\to b)$, we define $(\varphi\otimes\psi)_a:=\varphi_a\otimes\psi_a$ as 1-composition of 1-cells in $\cB$, and $(\varphi\otimes\psi)_X$ by \[ (\varphi\otimes\psi)_X := \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,2.4); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[plusregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[plusregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.4); \draw[black,thick] (1.15,0) -- (1.15,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (-.05,1.8) -- (-.05,2.4); \draw[snake,thick] (1.25,0) -- (1.25,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.05,1.8) -- (.05,2.4); \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,2.6) {\scriptsize{$F''(X)$}}; \node at (1.2,-.2) {\scriptsize{$(\varphi\otimes\psi)_b$}}; \node at (0,2.6) {\scriptsize{$(\varphi\otimes\psi)_a$}}; } := \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-1.8,-1.8) rectangle (1.8,1.8); \filldraw[primedregion=gray!30] (-1.2,-1.8) -- (-1.2,-1.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-.6,-.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.2,-.2) -- (-1.2,1.8) -- (-.55,1.8) -- (-.55,2.7) -- (-1.8,2.7) -- (-1.8,-1.8); \filldraw[primedregion=gray!55](0,-1.8) -- (0,-1.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (-.6,-.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (-1.2,-1.4) -- (-1.2,-1.8); \filldraw[boxregion=gray!30] (-1.2,1.8) -- (-1.2,-.2) .. controls ++(270:.4cm) and ++(135:.2cm) .. (-.6,-.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (0,-.2) -- (0,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.4) -- (0,1.8); \filldraw[boxregion=gray!55] (0,-1.8) -- (0,-1.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (-.6,-.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (0,-.2) -- (0,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,.8) .. controls ++(-45:.2cm) and ++(90:.4cm) .. (1.2,.2) -- (1.2,-1.8); \filldraw[plusregion=gray!30] (-.55,2.7) -- (-.55,1.8) -- (0,1.8) -- (0,1.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.4) -- (1.2,2.7); \filldraw[plusregion=gray!55] (1.2,-1.8) -- (1.2,.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.4) -- (1.2,2.7) -- (1.8,2.7) -- (1.8,-1.8); \end{scope} \draw[\XColor,thick] (-1.2,-1.8) -- (-1.2,-1.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,-.2) -- (0,.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.4) -- (1.2,1.8); \draw[black,thick] (0,-1.8) -- (0,-1.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (-1.2,-.2) -- (-1.2,1.8); \draw[snake,thick] (1.2,-1.8) -- (1.2,.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.4) -- (0,1.8); \filldraw[white] (-.6,-.8) circle (.1cm); \draw[thick] (-.6,-.8) circle (.1cm); \filldraw[white] (.6,.8) circle (.1cm); \draw[thick] (.6,.8) circle (.1cm); \node at (-1.2,-2) {\scriptsize{$F(X)$}}; \node at (0,-2) {\scriptsize{$\varphi_b$}}; \node at (1.2,-2) {\scriptsize{$\psi_b$}}; \node at (-1.2,2) {\scriptsize{$\varphi_a$}}; \node at (0,2) {\scriptsize{$\psi_a$}}; \node at (1.2,2) {\scriptsize{$F''(X)$}}; } \qquad\qquad \forall\,X\in\cA(a\to b) \qquad\qquad \begin{aligned} \tikzmath{ \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= F \\ \tikzmath{ \filldraw[boxregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= F' \\ \tikzmath{ \filldraw[plusregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= F'' \end{aligned} \] Suppose $\varphi,\varphi': F\Rightarrow F'$ and $\psi,\psi':F'\Rightarrow F''$ are 2-transformations, and let $n:\varphi\Rrightarrow\varphi'$ and $t:\psi\Rrightarrow\psi'$ be 2-modifications. The 1-composite $n\otimes t:\varphi\otimes\psi \Rrightarrow \varphi'\otimes\psi'$ of 2-modifications is defined component-wise as 1-composition of 2-cells in $\cB$ by $(n\otimes t)_a:=n_a\otimes t_a$ for $a\in\cA$. Finally, we define the associator for 1-composition in ${\sf Fun}(\cA\to \cB)$ as follows. Suppose $\varphi:F\Rightarrow F'$, $\psi:F'\Rightarrow F'':$ and $\gamma:F''\Rightarrow F''':$ are 2-transformations. The associator $\alpha^\otimes_{\varphi,\psi,\gamma}$ is an invertible modification $(\varphi\otimes\psi)\otimes\gamma\Rrightarrow \varphi\otimes(\psi\otimes\gamma)$ which is given component-wise by \begin{equation} \label{eq:ComponentwiseAssociator} \left(\alpha^\otimes_{\varphi,\psi,\gamma}\right)_a:= \alpha^\cB_{\varphi(a),\psi(a),\gamma(a)}, \end{equation} which is the associator in $\cB$ between 1-cells $\varphi(a),\psi(a),\gamma(a)$. One checks that $\alpha^\otimes_{\varphi,\psi,\gamma}$ is a modification, and that $\alpha^\otimes$ satisfies the pentagon axiom. The left/right unitors $\lambda_\varphi^F:1_F\otimes\varphi\Rrightarrow \varphi$ and $\rho_\varphi^{F'}:\varphi\otimes 1_{F'}\Rrightarrow \varphi$ are an invertible 2-modifications which are given component-wise by \begin{equation} \label{eq:ComponentwiseUnitor} \left(\lambda_\varphi^F\right)_a:=\lambda_{\varphi(a)}^{{F(a)}} \qquad\qquad \left(\rho_\varphi^{F'}\right)_a:=\rho_{\varphi(a)}^{{F'(a)}}, \end{equation} which are the unitors in $\cB$ for 1-cell $\varphi(a)$. \end{defn} \begin{rem} \label{rem:Overlay} We do not attempt to formalize this overlay operation in this article, as all such string diagrams can be interpreted uniquely as a 2-cell in $\cB$ without confusion. However, we sketch the following strategy to formalize this graphical calculus, which was communicated to us by David Reutter. First, by \cite{1903.05777}, the 3D graphical calculus for ${\mathsf{Gray}}$-categories \cite{1211.0529,1409.2148} may be applied in any 3-category, in particular, to $2{\mathsf{Cat}}$. Second, given a 2-category $\cA\in 2{\mathsf{Cat}}$, we may identify $\cA = {\sf Fun}(* \to \cA)$ where $*$ is the trivial 2-category. This identification allows us to identify the \emph{internal} 2D string diagrammatic calculus for $\cA$ with the \emph{external} 2D string diagrammatic calculus for ${\sf Fun}(* \to \cA)$ as a hom 2-category of $2{\mathsf{Cat}}$. Finally, identifying a 2-functor $F: \cA\to \cB$ with the 2-functor ${\sf Fun}(*\to \cA) \to {\sf Fun}(*\to \cB)$ given by post-composition with $F$, and similarly for transformations and modifications, we see that our overlay graphical calculus is exactly stacking of 2D sheets in the 3D graphical calculus for $2{\mathsf{Cat}}$. \[ \left(\, \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,-.8) rectangle (.6,.8); \filldraw[primedregion=white] (-.6,-.9) rectangle (0,.9); \filldraw[boxregion=white] (0,-.9) rectangle (.6,.9); \end{scope} \draw[black,thick] (0,-.8) -- (0,0); \draw[snake,thick] (0,.8) -- (0,0); \roundNbox{unshaded}{(0,0)}{.3}{0}{0}{\scriptsize{$m$}}; \draw[thin, dotted, rounded corners = 5pt] (-.6,-.8) rectangle (.6,.8); } \,\right) \left(\, \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-.7) rectangle (.7,.7); \filldraw[gray!30] (-.2,-.7) -- (-.2,0) -- (0,0) -- (0,.7) -- (-.7,.7) -- (-.7,-.7); \filldraw[gray!55] (-.2,-.7) rectangle (.2,0); \filldraw[gray!75] (.2,-.7) -- (.2,0) -- (0,0) -- (0,.7) -- (.7,.7) -- (.7,-.7); \end{scope} \draw[violet,thick] (0,.3) -- (0,.7); \draw[\XColor,thick] (-.2,-.3) -- (-.2,.-.7); \draw[orange,thick] (.2,-.3) -- (.2,-.7); \roundNbox{unshaded}{(0,0)}{.3}{.1}{.1}{\scriptsize{$f$}}; } \,\right) = \tikzmath{ \begin{scope} \filldraw[gray!30] (-.45,.6) -- (-.45,2.6) -- (.55,2.6) -- (.55,2.2) -- (-.15,2.2) -- (-.15,.6); \filldraw[gray!75] (.55,2.2) rectangle (1.55,2.6); \filldraw[primedregion=gray!30] (-.15,.6) -- (-.15,2.2) -- (.55,2.2) -- (.55,1.8) -- (.35,1.8) -- (.35,.6); \filldraw[primedregion=gray!75] (.55,2.2) rectangle (.85,1.2); \filldraw[primedregion=gray!55] (.35,.6) rectangle (.75,1.8); \filldraw[primedregion=gray!75] (.75,.6) rectangle (.85,1.2); \filldraw[boxregion=gray!75] (.85,.6) rectangle (1.55,2.2); \filldraw[primedregion=white] (-.15,.2) rectangle (.85,.6); \filldraw[boxregion=white] (.85,.2) -- (.85,.6) -- (1.55,.6) -- (1.55,2.2) -- (1.85,2.2) -- (1.85,.2); \end{scope} \draw[\XColor,thick] (.35,1.8) -- (.35,.6); \draw[orange,thick] (.75,1.8) -- (.75,.6); \draw[violet,thick] (.55,1.8) -- (.55,2.6); \roundNbox{unshaded}{(.55,1.8)}{.22}{.18}{.18}{\scriptsize{$f$}}; \filldraw[primedregion=white,rounded corners = 5] (.18,1.6) rectangle (.92,2.0); \node at (.55,1.8) {\scriptsize{$f$}}; \draw[snake,thick] (.85,1.2) -- (.85,2.2); \draw[black,thick] (.85,1.2) -- (.85,.2); \roundNbox{unshaded}{(.85,1.1)}{.22}{0}{0}{\scriptsize{$m$}}; \draw[dotted] (-.15,.2) rectangle (1.85,2.2); \draw[dotted] (-.45,.6) rectangle (1.55,2.6); \node at (-.6,.2) [rotate=-53] {$\to$}; \node at (1,-.3) {$\Rightarrow$}; \node at (2.3,1) [rotate=90] {$\Rrightarrow$}; \draw[gray!30] (0,0) rectangle (2,2); \draw[gray!30] (0,0) -- (-.6,.8) -- (-.6,2.8) -- (1.4,2.8) -- (2,2); \draw[gray!30] (-.6,2.8) -- (0,2); \draw[gray!30,dashed] (-.6,.8) -- (1.4,.8) -- (1.4,2.8); \draw[gray!30,dashed] (1.4,.8) -- (2,0); \node at (-.65,.75) {$*$}; \node at (-.35,.4) {\scriptsize{$\cA$}}; \node at (-.05,0) {\scriptsize{$\cB$}}; } \] Now in order to interpret each diagram as a unique 2-morphism in $\cB$, one should require the strings and coupons of our $\cA$-diagram and our ${\sf Fun}(\cA\to \cB)$ diagram not overlap, except at finitely many points where strings can cross transversely. The axioms of 2-functor, 2-transformation, and 2-modification will then ensure than any two ways of resolving non-generic intersections agree. For example, we may overlay the 2-transformation $\varphi: F\Rightarrow F'$ on the identity 2-morphism $\id_X \otimes_b \id_Y$ in $\cA$ in several ways. The equality of two such ways below produces the monoidal coherence axiom: \[ \left( \underset{\varphi: F\Rightarrow F'}{ \tikzmath{ \begin{scope} \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.3,.6); \filldraw[boxregion=white, rounded corners = 5pt] (.3,0) rectangle (.6,.6); \end{scope} \draw[black,thick] (.3,0) -- (.3,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } } \right) \left( \tikzmath{ \begin{scope} \clip[rounded corners = 5] (0,0) rectangle (.9,.6); \fill[gray!30] (0,0) rectangle (.3,.6); \fill[gray!55] (.3,0) rectangle (.6,.6); \fill[gray!75] (.6,0) rectangle (.9,.6); \end{scope} \draw[\XColor,thick] (.3,0) -- (.3,.6); \draw[orange,thick] (.6,0) -- (.6,.6); \node at (.3,-.2) {\scriptsize{$X$}}; \node at (.6,-.2) {\scriptsize{$Y$}}; } \right) = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (3,4.2); \filldraw[primedregion=gray!30] (0,0) -- (0,2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,2.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,3.2) -- (0,4.2) -- (-.6,4.2) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,2) -- (0,0); \filldraw[primedregion=gray!75] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (1.2,.4) -- (1.2,0); \filldraw[boxregion=gray!30] (0,4.2) -- (0,3.2) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.75,3.6) -- (1.75,4.2); \filldraw[boxregion=gray!55] (1.85,4.2) -- (1.85,3.6) -- (2.4,3.2) -- (2.4,1.6) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.75,3.6) -- (1.75,4.2); \filldraw[boxregion=gray!75] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(45:.2cm) and ++(270:.4cm) .. (2.4,1.6) -- (2.4,3.2) -- (1.85,3.6) -- (1.85,4.2) -- (3,4.2) -- (3,0); \filldraw[gray!55] (1,1.8) circle (.2cm); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.4) -- (0,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,3.2) -- (1.2,3.6); \draw[orange,thick] (1.2,0) -- (1.2,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (2.4,1.6) -- (2.4,3.6); \draw[black,thick] (2.4,0) -- (2.4,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,3.2) -- (0,4.2); \draw[\XColor,thick] (1.733,3.6) -- (1.733,4.2); \draw[orange,thick] (1.867,3.6) -- (1.867,4.2); \roundNbox{unshaded}{(1.8,3.4)}{.3}{.6}{.6}{\scriptsize{${F'}^2_{X,Y}$}}; \filldraw[white] (1.8,1) circle (.1cm); \draw[thick] (1.8,1) circle (.1cm); \filldraw[white] (.6,2.6) circle (.1cm); \draw[thick] (.6,2.6) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,-.2) {\scriptsize{$F(Y)$}}; \node at (1.8,4.4) {\scriptsize{$F'(X\otimes_b Y)$}}; \node at (2.4,-.2) {\scriptsize{$\varphi_c$}}; \node at (1,1.8) {\scriptsize{$\varphi_b$}}; \node at (0,4.4) {\scriptsize{$\varphi_a$}}; } \quad \text{ or } \quad \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (2.4,3); \filldraw[primedregion=gray!30] (0,0) -- (0,.8) -- (.533,.8) -- (.533,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.2,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (.6,2.4) -- (.6,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!75] (1.8,0) -- (1.8,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (.65,1.2) -- (.65,.8) -- (1.2,.8) -- (1.2,0); \filldraw[primedregion=gray!55] (0,0) -- (0,.8) -- (.533,.8) -- (.533,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.133,1.8) -- (1.267,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (.667,1.2) -- (.667,.8) -- (1.2,.8) -- (1.2,0); \filldraw[boxregion=gray!30] (.6,3) -- (.6,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (1.2,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.75,2.4) -- (1.75,3); \filldraw[boxregion=gray!75] (1.8,0) -- (1.8,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.85,2.4) -- (1.85,3) -- (2.4,3) -- (2.4,0); \filldraw[boxregion=gray!55] (1.733,3) -- (1.733,2.4) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.133,1.8) -- (1.267,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.867,2.4) -- (1.867,3); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6); \draw[orange,thick] (1.2,0) -- (1.2,.6); \draw[\XColor,thick] (.533,.6) -- (.533,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.733,2.4) -- (1.733,3); \draw[orange,thick] (.667,.6) -- (.667,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.867,2.4) -- (1.867,3); \draw[black,thick] (1.8,0) -- (1.8,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.6,2.4) -- (.6,3); \roundNbox{unshaded}{(.6,.8)}{.3}{.6}{.6}{\scriptsize{$F^2_{X,Y}$}}; \filldraw[white] (1.2,1.8) circle (.1cm); \draw[thick] (1.2,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,-.2) {\scriptsize{$F(Y)$}}; \node at (1.8,3.2) {\scriptsize{$F'(X\otimes_b Y)$}}; \node at (1.8,-.2) {\scriptsize{$\varphi_c$}}; \node at (.6,3.2) {\scriptsize{$\varphi_a$}}; }\,. \] For another example, when we have a 2-modification between 2-transformations, we may overlay it on an identity 2-morphism $\id_X$ in many ways. The equality of two such ways below produces the modification coherence axiom: \[ \left( \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,-.8) rectangle (.6,.8); \filldraw[primedregion=white] (-.6,-.9) rectangle (0,.9); \filldraw[boxregion=white] (0,-.9) rectangle (.6,.9); \end{scope} \draw[black,thick] (0,-.8) -- (0,0); \draw[snake,thick] (0,.8) -- (0,0); \roundNbox{unshaded}{(0,0)}{.3}{0}{0}{\scriptsize{$n$}}; \node at (0,1) {\scriptsize{$\varphi'$}}; \node at (0,-1) {\scriptsize{$\varphi$}}; \draw[thin, dotted, rounded corners = 5pt] (-.6,-.8) rectangle (.6,.8); } \right) \left( \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[gray!30] (0,0) rectangle (-.3,.6); \fill[gray!55] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, \XColor] (0,0) -- (0,.6); \node at (0,-.2) {\scriptsize{$X$}}; } \right) = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[snake,thick] (0,2.2) -- (0,3); \roundNbox{unshaded}{(0,2.2)}{.3}{0}{0}{\scriptsize{$n_a$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$F'(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_b$}}; \node at (0,3.2) {\scriptsize{$\psi_a$}}; } \quad \text{ or } \quad \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.8); \draw[snake,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(1.2,.8)}{.3}{0}{0}{\scriptsize{$n_b$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$F'(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_b$}}; \node at (0,3.2) {\scriptsize{$\psi_a$}}; }\,. \] Here, the white dots which appear may be interpreted as interchangers in $2{\mathsf{Cat}}$ (see Construction \ref{const:1CompositionIn2Cat} below) which arise from resolving the two stacked 2D diagrams in $2{\mathsf{Cat}}$. (Recall that ${}_aX_b\in \cA$ is a transformation when viewed as a 1-morphism in ${\sf Fun}(*\to A)$.) We leave a rigorous proof of our formalization strategy of this `overlay' graphical calculus to the interested reader. \end{rem} \subsection{The \texorpdfstring{$\rm C^*/W^*$}{C*/W*} 2-category \texorpdfstring{${\sf Fun}^\dag(\cA\to \cB)$}{Fun(C->D)} between \texorpdfstring{$\rm C^*/W^*$}{C*/W*} 2-categories} To the best of our knowledge, the notion of $\rm C^*$ 2-category first appeared in \cite{MR1444286}, and the notion of $\rm W^*$ 2-category first appeared in \cite{MR2325696}. The notion of $\rm W^*$-category was studied in detail in \cite{MR808930}. We refer the reader to \cite[\S2.1]{2105.12010} for an introduction to $\rm C^*/W^*$ 2-categories. \begin{defn} Suppose $\cA,\cB$ are $\rm C^*/W^*$ 2-categories. A $\dag$ 2-functor $F:\cA\to\cB$ is a 2-functor $F=(F,F^2,F^1):\cA \to \cB$ such that $F^2_{X,Y}$ and $F^1_a$ are unitary for all composable 1-cells $X,Y$ in $\cA$ and all objects $a\in \cA$. When $\cA,\cB$ are $\rm W^*$, we call a $\dag$ 2-functor \emph{normal} when each hom functor $F_{a\to b}: \cA(a\to b) \to \cB(F(a)\to F(b))$ is a normal $\dag$ functor. Suppose now $F,G: \cA \to \cB$ are $\dag$-2-functors. A $\dag$-2-transformation $\varphi:F\Rightarrow G$ consists of a 2-transformation $\varphi=(\varphi_c, \varphi_X): F\Rightarrow G$ such that every invertible 2-cell $\varphi_X \in \cB(F(X)\otimes_{F(b)} \varphi_b \Rightarrow \varphi_a \otimes_{G(a)} G(X))$ is unitary. Given two $\dag$-2-transformations $\varphi, \psi : F\Rightarrow G$, a 2-modification $n: \varphi\Rrightarrow \psi$ is \emph{(uniformly) bounded} if the 2-cells $n_a\in \cB(\varphi_a \Rightarrow \psi_a)$ for all $a\in \cA$ are uniformly bounded. Now consider the 2-subcategory ${\sf Fun}^\dag(\cA\to \cB)$ of ${\sf Fun}(\cA\to \cB)$ consisting of $\dag$ 2-functors, $\dag$ 2-transformations, and uniformly bounded modifications. When $\cA,\cB$ are $\rm W^*$, we further require all $\dag$ 2-functors to be normal. \end{defn} \begin{rem} \label{rem:Underlying2FunctorEquivalence} It is well known (e.g., see \cite[Thm.~7.4.1]{2002.06055}) that a 2-functor is an equivalence if and only if it is an equivalence on hom 1-categories (fully faithful on 2-morphisms and essentially surjective on 1-morphisms) and essentially surjective on objects. Similarly, a $\dag$ 2-functor is an equivalence if and only if it is a $\dag$-equivalence on hom categories (fully faithful on 2-morphisms and unitarily essentially surjective on 1-morphisms) and unitarily essentially surjective on objects. When $F: \cC\to \cD$ is a $\dag$ 2-functor between $\rm C^*$ 2-categories, observe that $F$ is a dagger equivalence if and only if the underlying 2-fucntor is an equivalence. Indeed, $F$ is unitarily essentially surjective on 1-morphisms and objects if and only if it is essentially surjective on 1-morphisms and objects by the existence of polar decomposition for invertible 2-morphisms in $\cD$. Finally, observe that when $\cC,\cD$ are $\rm W^*$, any inverse $\dag$ 2-functor will automatically be normal. This is an immediate consequence of the fact that every unital $*$-isomorphism between von Neumann algebras is automatically normal using Roberts' $2\times 2$ trick \cite[Lem.~2.6]{MR808930} on linking algebras of hom 1-categories. \end{rem} In Proposition \ref{prop:FunDagC*W*} below, we prove that whenever $\cA,\cB$ are $\rm C^*/W^*$, then so is ${\sf Fun}^\dag(\cA\to \cB)$ respectively. In order to prove this result, we prove Lemma \ref{Lem:ProdvNa} on weak* convergence in a product von Neumann algebra, which is certainly known to experts. Suppose that $(M_i)_{i\in I}$ is a family of von Neumann algebras, and consider the product von Neumann algebra $\prod_{i\in I} M_i$, which is defined as the double commutant of the unital $*$-algebra of uniformly bounded elements $(m_i)$ in the algebraic product of the $M_i$ acting on the Hilbert space $\prod_{i\in I} H_i$, which consists of $L^2$-summable sequences of vectors. For $j\in I$, there are mutually orthogonal projections $p_j : \prod_i H_i \to H_j$ such that $\sum p_j = 1$ SOT, so every element $m\in \prod_i M_i$ is diagonal, i.e., $m$ be written as $m=(m_i:= p_imp_i)_{i\in I}$. \begin{lem} \label{Lem:ProdvNa} A norm-bounded net $(m_i)^j \to (m_i)$ in the weak* topology on $\prod M_i$ if and only if every component net $m_i^j \to m_i$ in the weak* topology on $M_i$. \end{lem} \begin{proof} On norm-bounded sets in a von Neumann algebra, the weak* topology agrees with the weak operator topology. Suppose $\eta, \xi \in \prod_i H_i$. It is clear that $\langle (m_i)^j\eta, \xi \rangle \to \langle (m_i)\eta, \xi\rangle$ for all $\eta, \xi$ implies $\langle m_i^j\eta_i, \xi_i \rangle \to \langle m_i\eta_i, \xi_i\rangle$ for all $i$. For the converse, let $\varepsilon>0$. Suppose $M$ is the norm bound for $(m_i)^j$ and $(m_i)$. It suffices to show $\langle (m_i)^j\eta,\xi\rangle\to (m_i)\eta,\xi\rangle$ for all given $\eta,\xi\in \prod_i H_i$ with $\|\eta\|,\|\xi\|<1$. Now choose $\eta',\xi'$ in a finite product with $\|\eta'\|<1$ and $\|\xi'\|<1$ such that $$ \|\eta-\eta'\|<\frac{\varepsilon}{5M} \qquad \text{and} \qquad \|\xi-\xi'\|<\frac{\varepsilon}{5M}. $$ Since $\eta',\xi'$ are finitely supported and $m_i^j\to m_i$ weak* for all components $i\in I$ by assumption, we can choose $j_0$ such that for all $j\geq j_0$, $$ |\langle [(m_i)^j-(m_i)]\eta',\xi'\rangle|<\frac{\varepsilon}{5}. $$ Then for all $j\geq j_0$, we have \begin{align*} |\langle [(m_i)^j - (m_i)]\eta, \xi \rangle | &\leq |\langle (m_i)^j(\eta-\eta'), \xi \rangle | + |\langle (m_i)^j\eta', (\xi-\xi') \rangle | \\ &\qquad+ |\langle [(m_i)^j - (m_i)]\eta', \xi' \rangle | + |\langle (m_i)(\eta-\eta'), \xi \rangle | + |\langle (m_i)\eta', (\xi-\xi') \rangle | \\ & \le \|(m_i)^j\| \|\eta-\eta'\| \|\xi \| + \|(m_i)^j\| \|\eta'\| \|(\xi-\xi')\| \\ &\qquad+ |\langle [(m_i)^j - (m_i)]\eta', \xi' \rangle | + \|(m_i)\| \|\eta-\eta'\| \|\xi\| + \|(m_i)\| \|\eta'\| \|\xi-\xi'\| \\ & < \varepsilon. \qedhere \end{align*} \end{proof} \begin{construction} \label{construction:DaggerStructureOnFunAB} We construct a $\dag$-structure on ${\sf Fun}^\dag(\cA\to \cB)$ (c.f.~\cite{2004.12760}). Suppose $F,F'\in {\sf Fun}^\dag(\cA\to\cB)$, $\varphi,\psi: F\Rightarrow F'$, and $n:\varphi\Rrightarrow \psi$ is a uniformly bounded modification. For each 0-cell $b\in\cB$, we define $(n^\dag)_b:=(n_b)^\dag$, where $(n_b)^\dag$ is the dagger in $\cB$. We now verify that $n^\dag$ is a modification $\psi\Rightarrow\varphi$ with $\|n^\dag\|=\|n\|$. First, note that $\varphi_X,\psi_X$ are unitaries for all $X\in \cA(a\to b)$. We compose $\psi_X^\dag$ on the top and $\varphi_X^\dag$ on the bottom, and apply the dagger in $\cB$, to obtain \[ \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[boxregion=gray!30] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[primedregion=gray!55] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (0,1.8) circle (.3cm); \end{scope} \draw[black,thick] (0,0) -- (0,.8); \draw[snake,thick] (0,.8) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[\XColor,thick] (1.2,0) -- (1.2,.8); \draw[\XColor,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(0,.8)}{.3}{0}{0}{\scriptsize{$n_a$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$\varphi_a$}}; \node at (1.2,3.2) {\scriptsize{$\psi_b$}}; \node at (1.2,-.2) {\scriptsize{$F'(X)$}}; \node at (0,3.2) {\scriptsize{$F(X)$}}; \node at (0,1.8) {\scriptsize{$\psi_X^\dag$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[boxregion=gray!30] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[primedregion=gray!55] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (0,1.2) circle (.3cm); \end{scope} \draw[black,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.2); \draw[snake,thick] (1.2,2.2) -- (1.2,3); \draw[\XColor,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[\XColor,thick] (0,2.2) -- (0,3); \roundNbox{unshaded}{(1.2,2.2)}{.3}{0}{0}{\scriptsize{$n_b$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$\varphi_a$}}; \node at (1.2,3.2) {\scriptsize{$\psi_b$}}; \node at (1.2,-.2) {\scriptsize{$F'(X)$}}; \node at (0,3.2) {\scriptsize{$F(X)$}}; \node at (0,1.2) {\scriptsize{$\varphi_{X}^\dag$}}; } \qquad\overset{\dag}{\Longrightarrow}\qquad \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (0,1.2) circle (.3cm); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3); \draw[snake,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[black,thick] (0,2.2) -- (0,3); \roundNbox{unshaded}{(0,2.2)}{.3}{0}{0}{\scriptsize{$n_a^\dag$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$F'(X)$}}; \node at (1.2,-.2) {\scriptsize{$\psi_b$}}; \node at (0,3.2) {\scriptsize{$\varphi_a$}}; \node at (0,1.2) {\scriptsize{$\psi_{X}$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (0,1.8) circle (.3cm); \end{scope} \draw[\XColor,thick] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[snake,thick] (1.2,0) -- (1.2,.8); \draw[black,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(1.2,.8)}{.3}{0}{0}{\scriptsize{$n_b^\dag$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$F'(X)$}}; \node at (1.2,-.2) {\scriptsize{$\psi_b$}}; \node at (0,3.2) {\scriptsize{$\varphi_a$}}; \node at (0,1.8) {\scriptsize{$\varphi_X$}}; } \] Thus, $n^\dag$ is a 2-modification $\psi\Rightarrow\varphi$. Since $\dag$ preserves the norm on all 2-cells of $\cB$, we have $\|n_b\|=\|n_b^\dag\|$ for all $b\in \cB$, and thus $n^\dag$ is uniformly bounded with $\|n^\dag\|=\|n\|$. We show $(n\otimes k)^\dag = n^\dag\otimes k^\dag$ and $(n\star t)^\dag = t^\dag\star n^\dag$, and clearly $n^{\dag\dag}=n$ by construction. For $a\in\cA$, \begin{align*} (n\otimes k)^\dag_a &= ((n\otimes k)_a)^\dag = (n_a \otimes k_a)^\dag = n_a^\dag\otimes k_a^\dag = (n^\dag)_a \otimes(k^\dag)_a = (n^\dag\otimes k^\dag)_a \\ (n\star t)^\dag_a &= ((n\star t)_a)^\dag = (n_a \star t_a)^\dag = t_a^\dag \star n_a^\dag = (t^\dag)_a \star (n^\dag)_a = (t^\dag\star n^\dag)_a. \end{align*} Finally, we observe that since all associators and unitors in $\cB$ are unitary, so are the associators and unitors in ${\sf Fun}^\dag(\cA\to \cB)$, as all their components are unitary by (\ref{eq:ComponentwiseAssociator},\ref{eq:ComponentwiseUnitor}). \end{construction} \begin{prop} \label{prop:FunDagC*W*} When $\cA,\cB$ are $\rm C^*/W^*$ 2-categories, so is ${\sf Fun}^\dag(\cA\to \cB)$. \end{prop} \begin{proof} By Construction \ref{construction:DaggerStructureOnFunAB}, ${\sf Fun}^\dag(\cA\to \cB)$ is a $\dag$ 2-category. Since ${\sf Fun}^\dag(\cA\to\cB)$ admits direct sums of 1-morphisms, to show ${\sf Fun}^\dag(\cA\to \cB)$ is $\rm C^*$, by Roberts' $2\times 2$ trick \cite[Lem.~2.6]{MR808930}, it suffices to show that for each 1-morphism/2-transformation $\varphi : F\Rightarrow G$, $\End_{{\sf Fun}^\dag(\cA\to\cB)}(\varphi)$ is a $\rm C^*$ algebra. Indeed, the uniformly bounded modifications $n: \varphi\Rrightarrow \varphi$ do form a $\rm C^*$-algebra under the supreme norm: $$ \|n^\dag\cdot n\| = \sup_{a\in\cA}\|(n^\dag\cdot n)_a\| = \sup_{a\in\cA}\|(n^\dag)_a\star n_a\| = \sup_{a\in\cA}\|(n_a)^\dag\star n_a\| = \sup_{a\in\cA}\|n_a\|^2 = \|n\|^2. $$ Now suppose $\cA,\cB$ are $\rm W^*$ 2-categories. It remains to prove $\End_{{\sf Fun}^\dag(\cA\to \cB)}(\varphi)$ is a $\rm W^*$-algebra and that 1-compositions with identity 2-transformations is a normal $\dag$ functor on hom categories. Note that $$ n = (n_a)_{a\in\cA} \in \End(\varphi: F\to G) \subset \prod_{a\in\cA} \End(\varphi_a), $$ where $n$ satisfies $\varphi_X\star(1_{F(X)}\otimes_{F(b)} n_b)=(n_a\otimes_{G(a)}1_{G(X)})\star\varphi_X$, for all $X\in\cA(a\to b)$. By either the Krein-Smulian or Kaplansky Density Theorems, to prove $\End_{{\sf Fun}^\dag(\cA\to \cB)}(\varphi)$ is a $\rm W^*$-algebra, it suffices to show the unit ball in $\End(\varphi)$ is weak* closed. Let $(n_j=(n_a^j))$ be a weak* convergent net in the unit ball of $\End(\varphi)\subset\prod_a \End(\varphi_a)$, a $\rm W^*$-algebra. By Lemma \ref{Lem:ProdvNa}, each component net $(n_a^j)$ converges weak* to an element $n_a$ in the unit ball of $\prod_a \End(\varphi_a)$. We verify that $n:=(n_a)$ is a 2-modification in $\End(\varphi)$. By the axioms of a $\rm W^*$ 2-category (see ($\rm W^*$2') in \cite[Prop.~2.4]{2105.12010}), $1_{F(X)}\otimes_{F(b)}-$, $-\otimes_{G(a)}1_{G(X)}$, $\varphi_X\star -$, and $-\star\varphi_X$ are normal operations on 2-cells in $\cB$. We thus have $$ \varphi_X\star(1_{F(X)}\otimes_{F(b)} n_b) = \lim_k\varphi_X\star(1_{F(X)}\otimes_{F(b)} (n_k)_b) = \lim_k((n_k)_a\otimes_{G(a)}1_{G(X)})\star\varphi_X= (n_a\otimes_{G(a)}1_{G(X)})\star\varphi_X, $$ which implies that $n$ is a 2-modification $\varphi\Rrightarrow\varphi$. We now show that 1-composition with an identity 2-transformation is normal. Let $\varphi:F\Rightarrow G$; we show $1_\varphi\otimes-$ is normal. Suppose $n^j,n:\psi\Rrightarrow\gamma$ are modifications with $n^j \to n$ weak*. Again by Lemma \ref{Lem:ProdvNa}, $n^j_a \to n_a$ weak* for all $a\in \cA$. Since $1_{\varphi(a)}\otimes-$ is normal, $$ (1_\varphi\otimes n^j)_a=1_{\varphi(a)}\otimes n^j_a\to 1_{\varphi(a)}\otimes n_a=(1_\varphi\otimes n)_a, $$ for each $a\in\cA$, which implies $1_\varphi\otimes n_i\to 1_\varphi\otimes n$ weak* as desired. Similarly, $-\otimes 1_\varphi$ is normal. This completes the proof. \end{proof} \subsection{The 3-category of 2-categories} It is well-known that 2-categories form a 3-category $2{\mathsf{Cat}}$, whose hom 2-categories $2{\mathsf{Cat}}(\cA\to \cB)$ are given by ${\sf Fun}(\cA\to \cB)$. We now explain 1-composition in this 3-category following \cite[\S5.1]{MR3076451}. We will then discuss the 3-subcategories $\rm C^*2{\mathsf{Cat}}$ and $\rm W^*2{\mathsf{Cat}}$. \begin{construction} \label{const:1CompositionIn2Cat} By \cite[Prop.~5.1]{MR3076451}, given 2-categories $\cA,\cB,\cC$, there is a 2-functor $$ \circ: 2{\mathsf{Cat}}(\cB\to \cC) \times 2{\mathsf{Cat}}(\cA\to \cB) \to 2{\mathsf{Cat}}(\cA\to \cC). $$ The 2-functor $\circ$ is the 1-composition in $2{\mathsf{Cat}}$. We now describe its definition on 1-morphisms, 2-morphism, and 3-morphisms in $2{\mathsf{Cat}}$. \item[\underline{1-composition of 1-morphisms}:] For $F\in 2{\mathsf{Cat}}(\cA\to\cB)$ and $G\in 2{\mathsf{Cat}}(\cB\to\cC)$ the 1-composite 2-functor $G\circ F\in 2{\mathsf{Cat}}(\cA\to \cC)$ is given by: \begin{itemize} \item $(G\circ F)(a)=G(F(a))$ for $a\in\cA$, $(G\circ F)(X)=G(F(X))$ for $X\in\cA(a\to b)$, and $(G\circ F)(f)=G(F(f))$ for $f\in\cA(X\Rightarrow Y)$. \item $(G\circ F)^1_a:=G(F^1_a)\star G^1_{F(a)}\in \cC(1_{G(F(a))}\Rightarrow G(F(1_a)))$ for $a\in\cA$. \item $(G\circ F)^2_{X,Y}:=G(F^2_{X,Y})\star G^2_{F(X),F(Y)}\in\cC(G(F(X))\otimes G(F(Y))\Rightarrow G(F(X\otimes Y)))$ for $X\in\cA(a\to b)$ and $Y\in\cA(b\to c)$. \end{itemize} \item[\underline{1-composition of 2-morphisms}:] Suppose $F,F'\in 2{\mathsf{Cat}}(\cA\to\cB)$ and $G,G'\in 2{\mathsf{Cat}}(\cB\to\cC)$. In the remainder of this definition, we use the following texture decorations to denote the following composite 2-functors: $$ \tikzmath{ \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = GF \qquad\qquad \tikzmath{ \filldraw[boxregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = GF' \qquad\qquad \tikzmath{ \filldraw[plusregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = G'F \qquad\qquad \tikzmath{ \filldraw[starregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = G'F'. $$ Given 2-transformations $\varphi \in 2{\mathsf{Cat}}(F\Rightarrow F')$ and $\gamma\in2{\mathsf{Cat}}(G\Rightarrow G')$, we define $\gamma\circ F\in 2{\mathsf{Cat}}(G\circ F\Rightarrow G'\circ F)$ component-wise by \begin{itemize} \item For $a\in \cA$, we define $(\gamma \circ F)_a := \gamma_{F(a)}$, and \item for $X\in \cA(a\to b)$, we define $$ (\gamma\circ F)_X:= \gamma_{F(X)} = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,2.4); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[plusregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[plusregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.4); \draw[saw,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.4); \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$G(F(X))$}}; \node at (1.2,2.6) {\scriptsize{$G'(F(X))$}}; \node at (1.2,-.2) {\scriptsize{$\gamma_{F(b)}$}}; \node at (0,2.6) {\scriptsize{$\gamma_{F(a)}$}}; } \qquad\qquad \forall\, X\in \cA(a\to b). $$ \end{itemize} Similarly, we define $G\circ\varphi\in 2{\mathsf{Cat}}(G\circ F\Rightarrow G\circ F')$ by \begin{itemize} \item For $a\in \cA$, we define $(G\circ\varphi)_a := G(\varphi(a))$, and \item for $X\in \cA(a\to b)$, we define $$ (G\circ\varphi)_X:= \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,2.4); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.4); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.4); \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$G(F(X))$}}; \node at (1.2,2.6) {\scriptsize{$G(F'(X))$}}; \node at (1.2,-.2) {\scriptsize{$G(\varphi_b)$}}; \node at (0,2.6) {\scriptsize{$G(\varphi_a)$}}; } := \left( \resizebox{.35\hsize}{!}{$ \begin{aligned} G(F(X))\otimes G(\varphi_b) &\xrightarrow{G^2} G(F(X)\otimes\varphi_b) \\&\xrightarrow{G(\varphi_X)} G(\varphi_a\otimes F'(X)) \\&\xrightarrow{(G^2)^\dag} G(\varphi_a)\otimes G(F(X)) \end{aligned} $} \right) \qquad\qquad \forall\,X\in \cA(a\to b). $$ \end{itemize} We then use the \emph{cubical convention} to define the 1-composite $\gamma\circ\varphi:=(G\circ\varphi)\otimes(\gamma\circ F') \in2{\mathsf{Cat}}(G\circ F\Rightarrow G'\circ F')$, whose components are then given by \[ \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,2.4); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[starregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[starregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.4); \draw[black,thick] (1.15,0) -- (1.15,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (-.05,1.8) -- (-.05,2.4); \draw[saw,thick] (1.25,0) -- (1.25,.65) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.05,1.85) -- (.05,2.4); \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (-.1,-.2) {\scriptsize{$G(F(X))$}}; \node at (1.3,2.6) {\scriptsize{$G'(F'(X))$}}; \node at (1.3,-.2) {\scriptsize{$(\gamma\circ\varphi)_b$}}; \node at (-.1,2.6) {\scriptsize{$(\gamma\circ\varphi)_a$}}; } \ :=\ \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-1.8,-1.8) rectangle (1.8,1.8); \filldraw[primedregion=gray!30] (-1.2,-1.8) -- (-1.2,-1.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-.6,-.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.2,-.2) -- (-1.2,1.8) -- (-.55,1.8) -- (-.55,2.7) -- (-1.8,2.7) -- (-1.8,-1.8); \filldraw[primedregion=gray!55](0,-1.8) -- (0,-1.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (-.6,-.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (-1.2,-1.4) -- (-1.2,-1.8); \filldraw[boxregion=gray!30] (-1.2,1.8) -- (-1.2,-.2) .. controls ++(270:.4cm) and ++(135:.2cm) .. (-.6,-.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (0,-.2) -- (0,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.4) -- (0,1.8); \filldraw[boxregion=gray!55] (0,-1.8) -- (0,-1.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (-.6,-.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (0,-.2) -- (0,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,.8) .. controls ++(-45:.2cm) and ++(90:.4cm) .. (1.2,.2) -- (1.2,-1.8); \filldraw[starregion=gray!30] (-.55,2.7) -- (-.55,1.8) -- (0,1.8) -- (0,1.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.4) -- (1.2,2.7); \filldraw[starregion=gray!55] (1.2,-1.8) -- (1.2,.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.4) -- (1.2,2.7) -- (1.8,2.7) -- (1.8,-1.8); \end{scope} \draw[\XColor,thick] (-1.2,-1.8) -- (-1.2,-1.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,-.2) -- (0,.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.4) -- (1.2,1.8); \draw[black,thick] (0,-1.8) -- (0,-1.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (-1.2,-.2) -- (-1.2,1.8); \draw[saw,thick] (1.2,-1.8) -- (1.2,.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.4) -- (0,1.8); \filldraw[white] (-.6,-.8) circle (.1cm); \draw[thick] (-.6,-.8) circle (.1cm); \filldraw[white] (.6,.8) circle (.1cm); \draw[thick] (.6,.8) circle (.1cm); \node at (-1.2,-2) {\scriptsize{$G(F(X))$}}; \node at (0,-2) {\scriptsize{$G(\varphi_b)$}}; \node at (1.2,-2) {\scriptsize{$\gamma_{F'(b)}$}}; \node at (-1.2,2) {\scriptsize{$G(\varphi_a)$}}; \node at (0,2) {\scriptsize{$\gamma_{F'(a)}$}}; \node at (1.3,2) {\scriptsize{$G'(F'(X))$}}; } \qquad\qquad \begin{aligned} \tikzmath{ \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= GF \\ \tikzmath{ \filldraw[boxregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= GF' \\ \tikzmath{ \filldraw[starregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= G'F'. \end{aligned} \] \item[\underline{1-composition of 3-morphisms}:] Suppose $F,F'\in 2{\mathsf{Cat}}(\cA\to\cB)$ and $G,G'\in 2{\mathsf{Cat}}(\cB\to\cC)$ are 2-functors, $\varphi,\varphi'\in 2{\mathsf{Cat}}(F\Rightarrow F')$ and $\gamma,\gamma'\in 2{\mathsf{Cat}}(G\Rightarrow G')$ are 2-transformations, and let $n\in 2{\mathsf{Cat}}(\varphi\Rrightarrow\varphi')$ and $k\in 2{\mathsf{Cat}}(\gamma\Rrightarrow\gamma')$ be 2-modifications. We define $k\circ n\in 2{\mathsf{Cat}}(\gamma\circ\varphi\Rrightarrow\gamma'\circ\varphi')$ component-wise at $a\in \cA$ by $(k\circ n)_a:= G(n_a)\otimes k_{F(a)}$ as 1-composition of 2-cells in $\cC$. \item[\underline{Interchanger:}] For each pair of 1-composable 2-transformations $\varphi,\gamma$, there is a distinguished invertible modification $\chi^{\varphi,\gamma}:(G\circ \varphi)\otimes(\gamma\circ F')\Rrightarrow (\gamma\circ F)\otimes(G'\circ \varphi)$ between the \emph{cubical} and \emph{opcubical} 1-composition conventions for 2-morphisms called the \emph{interchanger}, which is defined component-wise by \[ \chi^{\varphi,\gamma}_a:= \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,2.4); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[boxregion=gray!30] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[plusregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[starregion=gray!30] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[black,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.4); \draw[saw,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.4); \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$G(\varphi_a)$}}; \node at (1.2,2.6) {\scriptsize{$G'(\varphi_a)$}}; \node at (1.2,-.2) {\scriptsize{$\gamma_{F'(a)}$}}; \node at (0,2.6) {\scriptsize{$\gamma_{F(a)}$}}; } =\gamma_{\varphi_a} \qquad\qquad \forall\,a\in \cA \qquad\qquad \begin{aligned} \tikzmath{ \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= GF & \tikzmath{ \filldraw[boxregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= GF' \\ \tikzmath{ \filldraw[plusregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= G'F & \tikzmath{ \filldraw[starregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= G'F'. \end{aligned} \] (Recall here that $\varphi_a\in\cB(F(a)\to F'(a))$.) The interchanger modification is used to prove the \emph{interchange relation} between $\circ, \otimes$. In more detail, given $\varphi\in 2{\mathsf{Cat}}(F\Rightarrow F')$, $\varphi'\in 2{\mathsf{Cat}}(F'\Rightarrow F'')$, $\psi\in 2{\mathsf{Cat}}(G\Rightarrow G')$, and $\psi'\in 2{\mathsf{Cat}}(G'\Rightarrow G'')$, the interchanger provides an invertible modification $$ (\psi\circ\varphi)\otimes(\psi'\circ\varphi') \Rrightarrow (\psi\otimes\psi')\circ(\varphi\otimes\varphi'). $$ We refer the reader to \cite[p.88]{MR3076451} for more details. \end{construction} By \cite[p.115]{2002.06055}, $\circ$ is strictly associative. That is, for $F\in 2{\mathsf{Cat}}(\cA\to\cB)$, $G\in2{\mathsf{Cat}}(\cB\to \cC)$ and $H\in2{\mathsf{Cat}}(\cC\to\cD)$, then $(H\circ G)\circ F=H\circ (G\circ F):\cA\to\cD$. By \cite[Props.~5.3 and 5.5]{MR3076451}, we may choose our adjoint equivalences $a:\circ(\circ\times \mathbf{1})\Rightarrow \circ(\mathbf{1}\times \circ)$, $\ell:\circ(I_\cA\times \mathbf{1})\Rightarrow \mathbf{1}$, and $r:\circ(\mathbf{1}\times I_\cA)\Rightarrow \mathbf{1}$ to be identity transformations, whose inverses are also identity transformations. Thus by \cite[Thm.~5.7]{MR3076451}, $2{\mathsf{Cat}}$ is a 3-category. \begin{defn} The 3-category $\rm C^*2{\mathsf{Cat}}$ of $\rm C^*$ 2-categories is the 3-subcategory of $2{\mathsf{Cat}}$ whose: \begin{itemize} \item objects are $\rm C^*$ 2-categories, \item 1-morphisms are $\dag$ 2-functors, \item 2-morphisms are $\dag$ 2-transformations \item 3-morphisms are bounded 2-modifications \end{itemize} Observe that all higher coherence data in this 3-category is unitary. The 3-category $\rm W^*2{\mathsf{Cat}}$ of $\rm W^*$ 2-categories is the full 3-subcategory of $\rm C^*2{\mathsf{Cat}}$ whose objects are $\rm W^*$ 2-categories and whose 1-morphisms are normal $\dag$ 2-functors. Observe that $\rm C^*2{\mathsf{Cat}}$ and $\rm W^*2{\mathsf{Cat}}$ may be equipped with $\dag$-structures making them into $\dag$ 3-categories. Indeed, all hom 2-categories are $\rm C^*/W^*$ by Proposition \ref{prop:FunDagC*W*}, 1-composition 2-functors are clearly compatible with the $\dag$-structure, and strictness of associativity of $\circ$ means all coheretors are inherently unitary. \end{defn} \subsection{3-endofunctors on \texorpdfstring{$2{\mathsf{Cat}}$}{2Cat}} In this section, we give a graphical definition of a (weak) 3-endofunctor $\Phi$ on $2{\mathsf{Cat}}$. The definition is considerably easier due to strictness of 1-composition $\circ$. Our treatment is adapted from \cite[\S4.3]{MR3076451}. Beyond an assignment of a $k$-morphism in $2{\mathsf{Cat}}$ for every $k$-morphism in $2{\mathsf{Cat}}$, $\Phi$ satisfies the following properties: \begin{itemize} \item $\Phi$ is a 2-functor on all hom 2-categories $2{\mathsf{Cat}}(\cA\to \cB)={\sf Fun}(\cA\to\cB)$ in $2{\mathsf{Cat}}$. That is, for all transformations $\varphi\in 2{\mathsf{Cat}}(F\Rightarrow F')$ and $\psi\in 2{\mathsf{Cat}}(F'\Rightarrow F'')$ for $F,F',F'': \cA \to \cB$, there exist invertible modifications, $\Phi^\otimes_{\varphi,\psi}:\Phi(\varphi)\otimes\Phi(\psi)\Rrightarrow\Phi(\varphi\otimes\psi)$ and $\Phi^\otimes_{F}:1_{\Phi(F)}\Rrightarrow \Phi(1_{F})$, which we represent graphically by \[ \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,.3) rectangle (.7,1.7); \filldraw[primedregion=white] (-.7,0) rectangle (.7,2); \filldraw[boxregion=white] (.2,0) -- (.2,1) -- (.05,1) -- (.05,2) -- (-.05,2) -- (-.05,1) -- (-.2,1) -- (-.2,0); \filldraw[plusregion=white] (.2,0) -- (.2,1) -- (.05,1) -- (.05,2) -- (.7,2) -- (.7,0); \end{scope} \draw[black,thick] (-.2,.3) -- (-.2,1); \draw[snake,thick] (.2,.3) -- (.2,1); \draw[black,thick] (-.05,1) -- (-.05,1.7); \draw[snake,thick] (.05,1) -- (.05,1.7); \roundNbox{unshaded}{(0,1)}{.3}{.15}{.15}{\scriptsize{$\Phi^\otimes_{\varphi,\psi}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,.3) rectangle (.7,1.7); } \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.6,.3) rectangle (.6,1.7); \filldraw[primedregion=white] (-.6,0) rectangle (.6,2); \end{scope} \draw[thick,dotted] (0,.3) -- (0,1.7); \roundNbox{unshaded}{(0,1)}{.3}{0}{0}{\scriptsize{$\Phi^\otimes_F$}}; \draw[thin, dotted, rounded corners = 5pt] (-.6,.3) rectangle (.6,1.7); } \qquad\qquad \begin{aligned} \tikzmath{ \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= \Phi(F) & \tikzmath{ \begin{scope} \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.3,.6); \filldraw[boxregion=white, rounded corners = 5pt] (.3,0) rectangle (.6,.6); \end{scope} \draw[black,thick] (.3,0) -- (.3,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= \Phi(\varphi) \\ \tikzmath{ \filldraw[boxregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= \Phi(F') & \tikzmath{ \begin{scope} \filldraw[boxregion=white, rounded corners = 5pt] (0,0) rectangle (.3,.6); \filldraw[plusregion=white, rounded corners = 5pt] (.3,0) rectangle (.6,.6); \end{scope} \draw[snake,thick] (.3,0) -- (.3,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= \Phi(\psi) \\ \tikzmath{ \filldraw[plusregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= \Phi(F'') & \tikzmath{ \begin{scope} \filldraw[primedregion=white, rounded corners = 5pt] (-.35,0) rectangle (-.05,.6); \filldraw[boxregion=white, rounded corners = 5pt] (-.05,0) rectangle (.05,.6); \filldraw[plusregion=white, rounded corners = 5pt] (.05,0) rectangle (.35,.6); \end{scope} \draw[black,thick] (-.05,0) -- (-.05,.6); \draw[snake,thick] (.05,0) -- (.05,.6); \draw[thin, dotted, rounded corners = 5pt] (-.35,0) rectangle (.35,.6); } &= \Phi(\varphi\otimes\psi) \end{aligned} \] These modifications are subject to the usual associativity and unitality coherence axioms: \[ \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.8,.3) rectangle (.8,3.7); \filldraw[primedregion=white] (-.4,0) -- (-.4,1) -- (-.25,1) -- (-.25,2) -- (-.1,2) -- (-.1,4) -- (-.8,4) -- (-.8,0); \filldraw[boxregion=white] (0,0) -- (0,1) -- (-.15,1) -- (-.15,2) -- (0,2) -- (0,4) -- (-.1,4) -- (-.1,2) -- (-.25,2) -- (-.25,1) -- (-.4,1) -- (-.4,0); \filldraw[plusregion=white] (.4,0) -- (.4,2) -- (.1,2) -- (.1,4) -- (0,4) -- (0,2) -- (-.15,2) -- (-.15,1) -- (0,1) -- (0,0); \filldraw[starregion=white] (.4,0) -- (.4,2) -- (.1,2) -- (.1,4) -- (.8,4) -- (.8,0); \end{scope} \draw[black,thick] (-.4,.3) -- (-.4,1); \draw[snake,thick] (0,.3) -- (0,1); \draw[saw,thick] (.4,.3) -- (.4,2); \draw[black,thick] (-.25,1) -- (-.25,2); \draw[snake,thick] (-.15,1) -- (-.15,2); \draw[black,thick] (-.1,2) -- (-.1,3.7); \draw[snake,thick] (0,2) -- (0,3.7); \draw[saw,thick] (.1,2) -- (.1,3.7); \roundNbox{unshaded}{(-.2,1)}{.3}{.05}{.05}{\scriptsize{$\Phi^\otimes_{\varphi,\psi}$}}; \roundNbox{unshaded}{(0,2)}{.3}{.25}{.25}{\scriptsize{$\Phi^\otimes_{\varphi\otimes \psi,\gamma}$}}; \roundNbox{unshaded}{(0,3)}{.3}{.25}{.25}{\scriptsize{$\Phi(\alpha^{\otimes})$}}; \draw[thin, dotted, rounded corners = 5pt] (-.8,.3) rectangle (.8,3.7); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.8,.3) rectangle (.8,3.7); \filldraw[primedregion=white] (-.4,0) -- (-.4,3) -- (-.1,3) -- (-.1,4) -- (-.8,4) -- (-.8,0); \filldraw[boxregion=white] (-.4,0) -- (-.4,3) -- (-.1,3) -- (-.1,4) -- (0,4) -- (0,3) -- (.15,3) -- (.15,2) -- (0,2) -- (0,0); \filldraw[plusregion=white] (.4,0) -- (.4,2) -- (.25,2) -- (.25,3) -- (.1,3) -- (.1,4) -- (0,4) -- (0,3) -- (.15,3) -- (.15,2) -- (0,2) -- (0,0); \filldraw[starregion=white] (.4,0) -- (.4,2) -- (.25,2) -- (.25,3) -- (.1,3) -- (.1,4) -- (.8,4) -- (.8,0); \end{scope} \draw[black,thick] (-.4,.3) -- (-.4,3); \draw[snake,thick] (0,.3) -- (0,2); \draw[saw,thick] (.4,.3) -- (.4,2); \draw[snake,thick] (.15,2) -- (.15,3); \draw[saw,thick] (.25,2) -- (.25,3); \draw[black,thick] (-.1,3) -- (-.1,3.7); \draw[snake,thick] (0,3) -- (0,3.7); \draw[saw,thick] (.1,3) -- (.1,3.7); \roundNbox{unshaded}{(0,1)}{.3}{.25}{.25}{\scriptsize{$\alpha^\otimes$}}; \roundNbox{unshaded}{(.2,2)}{.3}{.05}{.05}{\scriptsize{$\Phi^\otimes_{\psi,\gamma}$}}; \roundNbox{unshaded}{(-.1,3)}{.3}{.15}{.35}{\scriptsize{$\Phi^\otimes_{\varphi,\psi\otimes \gamma}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.8,.3) rectangle (.8,3.7); } \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,.3) rectangle (.7,3.7); \filldraw[primedregion=white] (-.2,0) -- (-.2,2) -- (-.05,2) -- (-.05,3) -- (0,3) -- (0,4) -- (-.7,4) -- (-.7,0); \filldraw[boxregion=white] (-.2,0) -- (-.2,2) -- (-.05,2) -- (-.05,3) -- (0,3) -- (0,4) -- (.7,4) -- (.7,0); \end{scope} \draw[black,thick] (-.2,.3) -- (-.2,2); \draw[thick,dotted] (.2,.3) -- (.2,2); \draw[black,thick] (-.05,2) -- (-.05,3); \draw[thick,dotted] (.05,2) -- (.05,3); \draw[black,thick] (0,3) -- (0,3.7); \roundNbox{unshaded}{(.2,1)}{.3}{0}{0}{\scriptsize{$\Phi^\otimes_{F'}$}}; \roundNbox{unshaded}{(0,2)}{.3}{.17}{.17}{\scriptsize{$\Phi^\otimes_{\varphi,1_{F'}}$}}; \roundNbox{unshaded}{(0,3)}{.3}{.17}{.17}{\scriptsize{$\Phi(\rho_\varphi^{F'})$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,.3) rectangle (.7,3.7); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,0) rectangle (.7,2); \filldraw[primedregion=white] (-.2,0) -- (-.2,1) -- (0,1) -- (0,2) -- (-.7,2) -- (-.7,0); \filldraw[boxregion=white] (-.2,0) -- (-.2,1) -- (0,1) -- (0,2) -- (.7,2) -- (.7,0); \end{scope} \draw[black,thick] (-.2,0) -- (-.2,1); \draw[thick,dotted] (.2,0) -- (.2,1); \draw[black,thick] (0,1) -- (0,2); \roundNbox{unshaded}{(0,1)}{.3}{.15}{.15}{\scriptsize{$\rho_{\Phi(\varphi)}^{\Phi(F')}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,0) rectangle (.7,2); } \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,.3) rectangle (.7,3.7); \filldraw[primedregion=white] (.2,0) -- (.2,2) -- (.05,2) -- (.05,3) -- (0,3) -- (0,4) -- (-.7,4) -- (-.7,0); \filldraw[boxregion=white] (.2,0) -- (.2,2) -- (.05,2) -- (.05,3) -- (0,3) -- (0,4) -- (.7,4) -- (.7,0); \end{scope} \draw[black,thick] (.2,.3) -- (.2,2); \draw[thick,dotted] (-.2,.3) -- (-.2,2); \draw[black,thick] (.05,2) -- (.05,3); \draw[thick,dotted] (-.05,2) -- (-.05,3); \draw[black,thick] (0,3) -- (0,3.7); \roundNbox{unshaded}{(-.2,1)}{.3}{0}{0}{\scriptsize{$\Phi^\otimes_F$}}; \roundNbox{unshaded}{(0,2)}{.3}{.15}{.15}{\scriptsize{$\Phi^\otimes_{1_F,\varphi}$}}; \roundNbox{unshaded}{(0,3)}{.3}{.15}{.15}{\scriptsize{$\Phi(\lambda_\varphi^F)$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,.3) rectangle (.7,3.7); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,0) rectangle (.7,2); \filldraw[primedregion=white] (.2,0) -- (.2,1) -- (0,1) -- (0,2) -- (-.7,2) -- (-.7,0); \filldraw[boxregion=white] (.2,0) -- (.2,1) -- (0,1) -- (0,2) -- (.7,2) -- (.7,0); \end{scope} \draw[black,thick] (.2,0) -- (.2,1); \draw[thick,dotted] (-.2,0) -- (-.2,1); \draw[black,thick] (0,1) -- (0,2); \roundNbox{unshaded}{(0,1)}{.3}{.15}{.15}{\scriptsize{$\lambda_{\Phi(\varphi)}^{\Phi(F)}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,0) rectangle (.7,2); } \] \item We have 1-compositor adjoint equivalence transformations $\Phi^\circ_{G,F}:\Phi(G)\circ \Phi(F)\Rightarrow \Phi(G\circ F)$ for all $F\in 2{\mathsf{Cat}}(\cA\to\cB)$ and $G\in 2{\mathsf{Cat}}(\cB\to\cC)$ and $\Phi^\circ_{\cA}:1_{\Phi(\cA)}\Rightarrow \Phi(1_{\cA})$ for all $\cA\in 2{\mathsf{Cat}}$. These transformations come equipped with an invertible associator modification $\omega^{\circ}_{H,G,F}$: \[ \hspace*{.5cm} \tikzmath{ \draw[gray!75,thick] (-.12,.16) -- (-.12,2.16); \draw[gray!75,thick] (-.3,.4) -- (-.3,2.4); \draw[gray!75,thick] (-.48,.64) -- (-.48,2.64); \draw[gray!75,thick] (1.7,.4) -- (1.7,2.4); \draw[gray!75,thick] (-.48,.64) to[bend left=15] (.28,.52); \draw[gray!75,thick] (-.3,.4) to[bend right=15] (.28,.52); \draw[gray!75,thick] (.28,.52) to[bend left=14] (1.04,.4); \draw[gray!75,thick] (-.12,.16) to[bend right=12] (1.04,.4); \draw[gray!75,thick] (1.04,.4) -- (1.7,.4); \draw[gray!75,thick] (-.12,2.16) to[bend right=15] (.46,2.26); \draw[gray!75,thick] (-.3,2.4) to[bend left=15] (.46,2.26); \draw[gray!75,thick] (.46,2.26) to[bend right=15] (1.04,2.4); \draw[gray!75,thick] (-.48,2.6) to[bend left=12] (1.04,2.4); \draw[gray!75,thick] (1.04,2.4) -- (1.7,2.4); \draw[thick] (.28,.52) to[bend left=15] (.7,1.4); \draw[thick] (1.04,.4) to[bend right=15] (.7,1.4); \draw[thick] (.46,2.26) to[bend right=15] (.7,1.4); \draw[thick] (1.04,2.4) to[bend left=15] (.7,1.4); \filldraw[white] (.7,1.4) circle (.07cm); \draw[thick] (.7,1.4) circle (.07cm); \node at (1,1.4) {\scriptsize{$\omega^\circ$}}; \filldraw[gray!75] (.28,.52) circle (.05cm); \filldraw[gray!75] (1.04,.4) circle (.05cm); \filldraw[gray!75] (.46,2.26) circle (.05cm); \filldraw[gray!75] (1.04,2.4) circle (.05cm); \node at (-.6,.2) [rotate=-53] {$\to$}; \node at (1,-.3) {$\Rightarrow$}; \node at (2.3,1) [rotate=90] {$\Rrightarrow$}; \draw[gray!30] (0,0) rectangle (2,2); \draw[gray!30] (0,0) -- (-.6,.8) -- (-.6,2.8) -- (1.4,2.8) -- (2,2); \draw[gray!30] (-.6,2.8) -- (0,2); \draw[gray!30,dashed] (-.6,.8) -- (1.4,.8) -- (1.4,2.8); \draw[gray!30,dashed] (1.4,.8) -- (2,0); } = \begin{tikzcd}[row sep=1em, column sep=1em] & (HG,F) \arrow[dr,Rightarrow,"\scriptstyle\Phi^\circ_{HG,F}"] \\ (H,G,F) \arrow[ur,Rightarrow,"\scriptstyle\Phi^\circ_{H,G}\circ 1_{\Phi(F)}"] \arrow[dr,Rightarrow,swap,"\scriptstyle1_{\Phi(H)}\circ \Phi^\circ_{G,F}"] && (HGF) \\ & (H,GF) \arrow[ur,Rightarrow,swap,"\scriptstyle\Phi^\circ_{H,GF}"] \arrow[uu,triplecd,shorten <= 1em, shorten >= 1em,"\scriptstyle\omega^\circ_{H,G,F}"] \end{tikzcd} = \tikzmath[scale=.75, transform shape]{ \draw[thick,black] (-.8,-1.2) -- (-.8,1.2); \draw[thick,black] (.8,-1.2) -- (.8,1.2); \roundNbox{unshaded}{(0,0)}{.4}{.6}{.6}{\normalsize{$\omega^\circ_{H,G,F}$}}; \node at (-1.8,0) {\scriptsize{$(H,G,F)$}}; \node at (1.8,0) {\scriptsize{$(HGF)$}}; \node at (0,-.9) {\scriptsize{$(H,GF)$}}; \node at (0,.9) {\scriptsize{$(HG,F)$}}; \node at (-.8,-1.4) {\scriptsize{$1_{\Phi(H)}\circ \Phi^\circ_{G,F}$}}; \node at (-.8,1.4) {\scriptsize{$\Phi^\circ_{H,G}\circ 1_{\Phi(F)}$}}; \node at (.8,-1.4) {\scriptsize{$\Phi^\circ_{H,GF}$}}; \node at (.8,1.4) {\scriptsize{$\Phi^\circ_{HG,F}$}}; \draw[thin, dotted, rounded corners = 5pt] (-2.6,-1.2) rectangle (2.5,1.2); }\,. \] Here, we use the abbreviated notation $(GF):=\Phi(G\circ F)$ and $(G,F):=\Phi(G)\circ\Phi(F)$, so that $(K,HG,F):=\Phi(K)\circ\Phi(H\circ G)\circ\Phi(F)$ and $\Phi^\circ_{H,GF}:=\Phi^\circ_{H,G\circ F}:(H,GF)\Rightarrow (HGF)$. The associator $\omega^\circ$ satisfies the coherence axiom \[ \tikzmath[scale=.75, transform shape]{ \draw[thick,black] (-2,-2.4) -- (-2,2.4); \draw[thick,black] (0,-2.4) -- (0,2.4); \draw[thick,black] (2,-2.4) -- (2,2.4); \roundNbox{unshaded}{(-1,-1.2)}{.4}{.85}{.85}{\normalsize{$1_{\Phi(K)}\circ\omega^\circ_{H,G,F}$}}; \roundNbox{unshaded}{(-1,1.2)}{.4}{.85}{.85}{\normalsize{$\omega^\circ_{K,H,G}\circ 1_{\Phi(F)}$}}; \roundNbox{unshaded}{(1,0)}{.4}{.8}{.8}{\normalsize{$\omega^\circ_{K,HG,F}$}}; \node at (-3,0) {\scriptsize{$(K,H,G,F)$}}; \node at (-1.1,0) {\scriptsize{$(K,HG,F)$}}; \node at (-1,-2.1) {\scriptsize{$(K,H,GF)$}}; \node at (-1,2.1) {\scriptsize{$(KH,G,F)$}}; \node at (1,-2.1) {\scriptsize{$(K,HGF)$}}; \node at (1,2.1) {\scriptsize{$(KHG,F)$}}; \node at (3,0) {\scriptsize{$(KHGF)$}}; \draw[thin, dotted, rounded corners = 5pt] (-4,-2.4) rectangle (4,2.4); } = \tikzmath[scale=.75, transform shape]{ \draw[thick,black] (-2,-2.4) -- (-2,2.4); \draw[thick,black] (0,-2.4) -- (0,2.4); \draw[thick,black] (2,-2.4) -- (2,2.4); \roundNbox{unshaded}{(1,-1.2)}{.4}{.8}{.8}{\normalsize{$\omega^\circ_{K,H,GF}$}}; \roundNbox{unshaded}{(1,1.2)}{.4}{.8}{.8}{\normalsize{$\omega^\circ_{KH,G,F}$}}; \roundNbox{unshaded}{(-1,0)}{.4}{.8}{.8}{\normalsize{$\cong$}}; \node at (-3.2,0) {\scriptsize{$(K,H,G,F)$}}; \node at (1.1,0) {\scriptsize{$(KH,GF)$}}; \node at (-1,-2.1) {\scriptsize{$(K,H,GF)$}}; \node at (-1,2.1) {\scriptsize{$(KH,G,F)$}}; \node at (1,-2.1) {\scriptsize{$(K,HGF)$}}; \node at (1,2.1) {\scriptsize{$(KHG,F)$}}; \node at (2.8,0) {\scriptsize{$(KHGF)$}}; \draw[thin, dotted, rounded corners = 5pt] (-4.2,-2.4) rectangle (3.8,2.4); }\,, \] where the isomorphism on the left of the right hand side is the interchanger from Construction \ref{const:1CompositionIn2Cat}. Finally, we have invertible unitor modifications $\ell^\circ_F$ and $r^\circ_F$: \[ \tikzmath{ \draw[gray!75,thick] (-.4,.533) rectangle (1.6,2.533); \draw[gray!75,thick,dashed] (-.2,2.267) to[bend right=20] (.6,2.533); \draw[gray!75,thick,dashed] (-.2,2.267) -- (-.2,.267); \draw[gray!75,thick,dashed] (-.2,.267) -- (.4,.267); \draw[gray!75,thick,dashed] (.6,2.533) to[bend right=10] (.4,.267); \filldraw[gray!75] (.6,2.533) circle (.05cm); \node at (-.6,.2) [rotate=-53] {$\to$}; \node at (1,-.3) {$\Rightarrow$}; \node at (2.3,1) [rotate=90] {$\Rrightarrow$}; \draw[gray!30] (0,0) rectangle (2,2); \draw[gray!30] (0,0) -- (-.6,.8) -- (-.6,2.8) -- (1.4,2.8) -- (2,2); \draw[gray!30] (-.6,2.8) -- (0,2); \draw[gray!30,dashed] (-.6,.8) -- (1.4,.8) -- (1.4,2.8); \draw[gray!30,dashed] (1.4,.8) -- (2,0); } = \begin{tikzcd}[row sep=1em, column sep=1em] & {} \\ (1_\cB,F) \arrow[rr,bend left=60,Rightarrow,"\scriptstyle\Phi^\circ_{1_\cB,F}"] \arrow[rr,bend right=60,Rightarrow,swap,"\scriptstyle\Phi^\circ_\cB\circ 1_{\Phi(F)}"] && (F) \\ & {} \arrow[uu,triplecd,shorten <= 1em, shorten >= 1em,"\scriptstyle\ell^\circ_F"] \end{tikzcd} = \tikzmath{ \draw[thick] (0,-.8) -- (0,.8); \roundNbox{fill=white}{(0,0)}{.3}{.1}{.1}{\scriptsize{$\ell^\circ_F$}}; \node at (-1,0) {\scriptsize{$(1_\cB,F)$}}; \node at (1,0) {\scriptsize{$(F)$}}; \node at (0,1) {\scriptsize{$\Phi^\circ_{1_\cB,F}$}}; \node at (0,-1) {\scriptsize{$\Phi^\circ_\cB\circ 1_{\Phi(F)}$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.6,-.8) rectangle (1.4,.8); } \] \[ \tikzmath{ \draw[gray!75,thick] (-.2,.267) rectangle (1.8,2.267); \draw[gray!75,thick,dashed] (-.4,2.533) to[bend left=15] (.8,2.267); \draw[gray!75,thick,dashed] (-.4,.533) -- (-.4,2.533); \draw[gray!75,thick,dashed] (-.4,.533) -- (.2,.533); \draw[gray!75,thick,dashed] (.8,2.267) to[bend right=10] (.2,.533); \filldraw[gray!75] (.8,2.267) circle (.05cm); \node at (-.6,.2) [rotate=-53] {$\to$}; \node at (1,-.3) {$\Rightarrow$}; \node at (2.3,1) [rotate=90] {$\Rrightarrow$}; \draw[gray!30] (0,0) rectangle (2,2); \draw[gray!30] (0,0) -- (-.6,.8) -- (-.6,2.8) -- (1.4,2.8) -- (2,2); \draw[gray!30] (-.6,2.8) -- (0,2); \draw[gray!30,dashed] (-.6,.8) -- (1.4,.8) -- (1.4,2.8); \draw[gray!30,dashed] (1.4,.8) -- (2,0); } = \begin{tikzcd}[row sep=1em, column sep=1em] & {} \\ (G,1_\cB) \arrow[rr,bend left=60,Rightarrow,"\scriptstyle\Phi^\circ_{G,1_\cB}"] \arrow[rr,bend right=60,Rightarrow,swap,"\scriptstyle1_{\Phi(G)}\circ\Phi^\circ_\cB"] && (G) \\ & {} \arrow[uu,triplecd,shorten <= 1em, shorten >= 1em,"\scriptstyle r^\circ_G"] \end{tikzcd} = \tikzmath{ \draw[thick] (0,-.8) -- (0,.8); \roundNbox{fill=white}{(0,0)}{.3}{.1}{.1}{\scriptsize{$r^\circ_G$}}; \node at (-1,0) {\scriptsize{$(G,1_\cB)$}}; \node at (1,0) {\scriptsize{$(G)$}}; \node at (0,1) {\scriptsize{$\Phi^\circ_{G,1_\cB}$}}; \node at (0,-1) {\scriptsize{$1_{\Phi(G)}\circ \Phi^\circ_\cB$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.6,-.8) rectangle (1.4,.8); }\,. \] These unitors satisfy the coherence axiom \[ \hspace*{1.3cm} \tikzmath{ \draw[gray!75,thick] (1.04,.4) -- (1.7,.4) -- (1.7,2.4) -- (1.04,2.4); \draw[gray!75,thick] (-.48,2.6) -- (1.04,2.4); \draw[gray!75,thick] (-.48,.6) -- (-.48,2.6); \draw[gray!75,thick] (-.48,.6) -- (1.04,.4); \draw[gray!75,thick] (-.12,2.2) -- (1.04,2.4); \draw[gray!75,thick] (-.12,.2) -- (-.12,2.2); \draw[gray!75,thick] (-.12,.2) -- (1.04,.4); \draw[gray!75,dashed,thick] (-.3,.4) -- (-.3,2.4); \draw[gray!75,dashed,thick] (-.3,2.4) to[bend left=12] (.46,2.3); \draw[gray!75,dashed,thick] (-.3,.4) to[bend right=17] (.28,.5); \draw[thick] (.28,.5) to[bend left=15] (.7,1.4); \draw[thick] (1.04,.4) to[bend right=15] (.7,1.4); \draw[thick] (.46,2.3) to[bend right=15] (.7,1.4); \draw[thick] (1.04,2.4) to[bend left=15] (.7,1.4); \filldraw[white] (.7,1.4) circle (.07cm); \draw[thick] (.7,1.4) circle (.07cm); \filldraw[gray!75] (.28,.5) circle (.05cm); \filldraw[gray!75] (1.04,.4) circle (.05cm); \filldraw[gray!75] (.46,2.3) circle (.05cm); \filldraw[gray!75] (1.04,2.4) circle (.05cm); \draw[gray!30] (0,0) rectangle (2,2); \draw[gray!30] (0,0) -- (-.6,.8) -- (-.6,2.8) -- (1.4,2.8) -- (2,2); \draw[gray!30] (-.6,2.8) -- (0,2); \draw[gray!30,dashed] (-.6,.8) -- (1.4,.8) -- (1.4,2.8); \draw[gray!30,dashed] (1.4,.8) -- (2,0); } = \tikzmath{ \draw[gray!75,thick] (1.04,.4) rectangle (1.7,2.4); \draw[gray!75,thick] (-.48,2.6) -- (1.04,2.4); \draw[gray!75,thick] (-.48,.6) -- (-.48,2.6); \draw[gray!75,thick] (-.48,.6) -- (1.04,.4); \draw[gray!75,thick] (-.12,2.2) -- (1.04,2.4); \draw[gray!75,thick] (-.12,.2) -- (-.12,2.2); \draw[gray!75,thick] (-.12,.2) -- (1.04,.4); \draw[gray!75,dashed,thick] (-.3,.4) -- (-.3,2.4); \draw[gray!75,dashed,thick] (-.3,2.4) to[bend left=12] (.46,2.3); \draw[gray!75,dashed,thick] (-.3,.4) to[bend right=17] (.28,.5); \draw[gray!75,dashed,thick] (.46,2.3) to[bend right=10] (.28,.5); \draw[gray!75,dashed,thick] (-.3,1.4) -- (.3,1.4); \filldraw[gray!75] (.28,.5) circle (.05cm); \filldraw[gray!75] (1.04,.4) circle (.05cm); \filldraw[gray!75] (.46,2.3) circle (.05cm); \filldraw[gray!75] (1.04,2.4) circle (.05cm); \draw[gray!30] (0,0) rectangle (2,2); \draw[gray!30] (0,0) -- (-.6,.8) -- (-.6,2.8) -- (1.4,2.8) -- (2,2); \draw[gray!30] (-.6,2.8) -- (0,2); \draw[gray!30,dashed] (-.6,.8) -- (1.4,.8) -- (1.4,2.8); \draw[gray!30,dashed] (1.4,.8) -- (2,0); } \quad \leftrightarrow\quad \tikzmath[scale=.75, transform shape]{ \draw[thick,black] (-.8,-1.2) -- (-.8,1.2); \draw[thick,black] (.8,-1.2) -- (.8,1.2); \roundNbox{unshaded}{(0,0)}{.4}{.6}{.6}{\normalsize{$\omega^\circ_{G,1_\cB,F}$}}; \node at (-1.8,0) {\scriptsize{$(G,1_{\cB},F)$}}; \node at (1.6,0) {\scriptsize{$(GF)$}}; \node at (0,.9) {\scriptsize{$(G1_{\cB},F)$}}; \node at (0,-.9) {\scriptsize{$(G,1_{\cB}F)$}}; \node at (-.8,1.4) {\scriptsize{$\Phi^\circ_{G,1_\cB}\circ 1_{\Phi(F)}$}}; \node at (-.8,-1.4) {\scriptsize{$1_{\Phi(G)}\circ\Phi^\circ_{1_\cB,F}$}}; \node at (.8,-1.4) {\scriptsize{$\Phi^\circ_{G,F}$}}; \node at (.8,1.4) {\scriptsize{$\Phi^\circ_{G,F}$}}; \draw[thin, dotted, rounded corners = 5pt] (-2.7,-1.2) rectangle (2.2,1.2); } = \tikzmath[scale=.75, transform shape]{ \draw[thick,black] (-.8,-1.9) -- (-.8,1.5); \draw[thick,black] (.8,-1.9) -- (.8,1.5); \roundNbox{unshaded}{(-.8,.7)}{.4}{.6}{.6}{\normalsize{$r^\circ_G \circ 1_{\Phi(F)}$}}; \roundNbox{unshaded}{(-.8,-.7)}{.4}{.6}{.6}{\scriptsize{$1_{\Phi(G)}\circ (\ell^\circ_F)^{-1}$}}; \node at (-1.7,-1.5) {\scriptsize{$(G,1_{\cB},F)$}}; \node at (0,-1.5) {\scriptsize{$(G,F)$}}; \node at (1.5,-1.5) {\scriptsize{$(GF)$}}; \node at (-1.3,0) {\scriptsize{$1_{\Phi(G)}\circ \Phi^{\circ}_{\cB}\circ 1_{\Phi(F)}$}}; \node at (-.8,-2.1) {\scriptsize{$1_{\Phi(G)}\circ\Phi^\circ_{1_\cB,F}$}}; \node at (-.8,1.7) {\scriptsize{$\Phi^\circ_{G,1_\cB}\circ 1_{\Phi(F)}$}}; \node at (.8,-2.1) {\scriptsize{$\Phi^\circ_{G,F}$}}; \draw[thin, dotted, rounded corners = 5pt] (-2.7,-1.9) rectangle (2.2,1.5); }\,. \] Here, we note that $F\circ 1_{\cA}=F=1_{\cB}\circ F$, so $(G1_{\cB}F)=(GF)$, $(G1_{\cB},F)=(G,F) =(G,1_{\cB}F)$ and $(G,F)=\Phi(G)\circ 1_{\Phi(\cB)}\circ \Phi(F)$. \end{itemize} Given a weak 3-functor $\Phi$ on $2{\mathsf{Cat}}$ which preserves the 3-subcategories $\rm C^*2{\mathsf{Cat}}$ and $\rm W^*2{\mathsf{Cat}}$, we can ask whether $\Phi$ restricts to a $\dag$ 3-functor. This consists of the following conditions: \begin{itemize} \item $\Phi(n^\dag)=\Phi(n)^\dag$ for all bounded 2-modifications $n$, \item the coheretors $\Phi^\otimes_{\varphi,\psi}$ and $\Phi^\otimes_{F}$ are unitary, \item $\Phi^\circ_{G,F}$ and $\Phi^\circ_{\cA}$ are unitary adjoint equivalences, and \item the associators $\omega^\circ_{H,G,F}$ and unitors $\ell^\circ_{F},r^\circ_F$ are unitary. \end{itemize} \section{Q-system completion is a 3-functor} In this section, we rapidly recall the definition of Q-system completion for a $\rm C^*/W^*$ 2-category from \cite[\S3]{2105.12010}, and we prove Theorem \ref{thm:QSys3Functor} that Q-system completion is a 3-functor. \subsection{Graphical calculus for Q-systems and their bimodules} Q-systems were first defined in \cite{MR1257245}, and were subsequently studied in \cite{MR1444286,MR2298822,MR3308880}. For this section, we fix a $\rm C^*/W^*$ 2-category $\cC$ which we assume is locally unitarily Cauchy complete, i.e., every hom 1-category has orthogonal direct sums and all orthogonal projections split orthogonally. \begin{defn} A \emph{Q-system} in $\cC$ consists of a triple $(Q,m,i)$ where $Q\in \cC(b\to b)$, $m\in \cC(Q\otimes Q \Rightarrow Q)$, and $i\in \cC(1_b \Rightarrow Q)$, which satisfy certain axioms. We represent $b,Q,m,i$ and the adjoints $m^\dag,i^\dag$ graphically as follows: $$ \tikzmath{\filldraw[gray!55, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=b \qquad \tikzmath{ \fill[gray!55, rounded corners=5pt ] (0,0) rectangle (.6,.6); \draw[DarkGreen,thick] (.3,0) -- (.3,.6); }={}_bQ_b. \qquad \tikzmath{ \fill[gray!55, rounded corners=5pt] (-.3,0) rectangle (.9,.6); \draw[DarkGreen,thick] (0,0) arc (180:0:.3cm); \draw[DarkGreen,thick] (.3,.3) -- (.3,.6); \filldraw[DarkGreen] (.3,.3) circle (.05cm); }=m \qquad \tikzmath{ \fill[gray!55, rounded corners=5pt] (-.3,0) rectangle (.9,-.6); \draw[DarkGreen,thick] (0,0) arc (-180:0:.3cm); \draw[DarkGreen,thick] (.3,-.3) -- (.3,-.6); \filldraw[DarkGreen] (.3,-.3) circle (.05cm); }=m^\dag \qquad \tikzmath{ \fill[gray!55, rounded corners=5pt] (0,0) rectangle (.6,.6); \draw[DarkGreen,thick] (.3,.3) -- (.3,.6); \filldraw[DarkGreen] (.3,.3) circle (.05cm); }=i \qquad \tikzmath{ \fill[gray!55, rounded corners=5pt] (0,0) rectangle (.6,-.6); \draw[DarkGreen,thick] (.3,-.3) -- (.3,-.6); \filldraw[DarkGreen] (.3,-.3) circle (.05cm); }=i^\dag. $$ The Q-system axioms are as follows: \begin{enumerate}[label=(Q\arabic*)] \item \label{Q:associativity} (associativity) $\tikzmath{ \fill[gray!55, rounded corners=5pt] (-.3,-.3) rectangle (1.2,.6); \draw[DarkGreen,thick] (0,-.3) -- (0,0) arc (180:0:.3cm); \draw[DarkGreen,thick] (.3,-.3) arc (180:0:.3cm); \draw[DarkGreen,thick] (.3,.3) -- (.3,.6); \filldraw[DarkGreen] (.3,.3) circle (.05cm); \filldraw[DarkGreen] (.6,0) circle (.05cm); } = \tikzmath{ \fill[gray!55, rounded corners=5pt] (-.6,-.3) rectangle (.9,.6); \draw[DarkGreen,thick] (0,0) arc (180:0:.3cm) -- (.6,-.3); \draw[DarkGreen,thick] (-.3,-.3) arc (180:0:.3cm); \draw[DarkGreen,thick] (.3,.3) -- (.3,.6); \filldraw[DarkGreen] (.3,.3) circle (.05cm); \filldraw[DarkGreen] (0,0) circle (.05cm); }$ \item \label{Q:unitality} (unitality) $\tikzmath{ \fill[gray!55, rounded corners=5pt] (-.3,-.3) rectangle (.9,.6); \draw[DarkGreen,thick] (0,-.1) -- (0,0) arc (180:0:.3cm) -- (.6,-.3); \draw[DarkGreen,thick] (.3,.3) -- (.3,.6); \filldraw[DarkGreen] (.3,.3) circle (.05cm); \filldraw[DarkGreen] (0,-.1) circle (.05cm); } = \tikzmath{ \fill[gray!55, rounded corners=5pt ] (0,-.3) rectangle (.6,.6); \draw[DarkGreen,thick] (.3,-.3) -- (.3,.6); } = \tikzmath{ \fill[gray!55, rounded corners=5pt] (-.3,-.3) rectangle (.9,.6); \draw[DarkGreen,thick] (0,-.3) -- (0,0) arc (180:0:.3cm) -- (.6,-.1); \draw[DarkGreen,thick] (.3,.3) -- (.3,.6); \filldraw[DarkGreen] (.3,.3) circle (.05cm); \filldraw[DarkGreen] (.6,-.1) circle (.05cm); }$ \item \label{Q:Frobenius} (Frobenius) $ \tikzmath{ \fill[gray!55, rounded corners=5pt] (-.3,-.6) rectangle (1.5,.6); \draw[DarkGreen,thick] (0,-.6) -- (0,0) arc (180:0:.3cm) arc (-180:0:.3cm) -- (1.2,.6); \draw[DarkGreen,thick] (.3,.3) -- (.3,.6); \draw[DarkGreen,thick] (.9,-.3) -- (.9,-.6); \filldraw[DarkGreen] (.3,.3) circle (.05cm); \filldraw[DarkGreen] (.9,-.3) circle (.05cm); } = \tikzmath{ \fill[gray!55, rounded corners=5pt] (-.3,0) rectangle (.9,1.2); \draw[DarkGreen,thick] (0,0) arc (180:0:.3cm); \draw[DarkGreen,thick] (0,1.2) arc (-180:0:.3cm); \draw[DarkGreen,thick] (.3,.3) -- (.3,.9); \filldraw[DarkGreen] (.3,.3) circle (.05cm); \filldraw[DarkGreen] (.3,.9) circle (.05cm); } = \tikzmath{ \fill[gray!55, rounded corners=5pt] (-.3,.6) rectangle (1.5,-.6); \draw[DarkGreen,thick] (0,.6) -- (0,0) arc (-180:0:.3cm) arc (180:0:.3cm) -- (1.2,-.6); \draw[DarkGreen,thick] (.3,-.3) -- (.3,-.6); \draw[DarkGreen,thick] (.9,.3) -- (.9,.6); \filldraw[DarkGreen] (.3,-.3) circle (.05cm); \filldraw[DarkGreen] (.9,.3) circle (.05cm); } $ \item \label{Q:separable} (separable) $ \tikzmath{ \fill[gray!55, rounded corners=5pt] (-.3,0) rectangle (.9,1.2); \draw[DarkGreen,thick] (0,.6) arc (180:-180:.3cm); \draw[DarkGreen,thick] (.3,1.2) -- (.3,.9); \draw[DarkGreen,thick] (.3,0) -- (.3,.3); \filldraw[DarkGreen] (.3,.3) circle (.05cm); \filldraw[DarkGreen] (.3,.9) circle (.05cm); } = \tikzmath{ \fill[gray!55, rounded corners=5pt ] (0,0) rectangle (.6,1.2); \draw[DarkGreen,thick] (.3,0) -- (.3,1.2); } $ \end{enumerate} We refer the reader to \cite[Prop.~5.17]{MR2298822} or \cite[Facts~3.4]{2105.12010} for various dependencies amongst these axioms. \end{defn} \begin{defn} Suppose $P\in \cC(a\to a)$ and $Q\in \cC(b\to b)$ are Q-systems. A $P-Q$ bimodule is a triple $(X,\lambda_X, \rho_X)$ consisting of $X\in \cC(a\to b)$, $\lambda_X \in \cC( P\otimes X \Rightarrow X)$, and $\rho_X\in \cC(X\otimes Q\Rightarrow X)$, again satisfying certain properties. We represent $a,b,X,P,Q$ graphically by $$ \tikzmath{\filldraw[gray!30, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=a \qquad\qquad \tikzmath{\filldraw[gray!55, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=b \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[gray!30] (0,0) rectangle (-.3,.6); \fill[gray!55] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, \XColor] (0,0) -- (0,.6); }={}_aX_b \qquad\qquad \tikzmath{ \fill[gray!30, rounded corners=5pt ] (0,0) rectangle (.6,.6); \draw[\PsColor,thick] (.3,0) -- (.3,.6); }={}_aP_a \qquad\qquad \tikzmath{ \fill[gray!55, rounded corners=5pt ] (0,0) rectangle (.6,.6); \draw[DarkGreen,thick] (.3,0) -- (.3,.6); }={}_bQ_b. $$ We denote $\lambda_X,\rho_X$ and $\lambda_X^\dag,\rho_X^\dag$ by trivalent vertices: $$ \lambda_X = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.7,-.2) rectangle (.3,.5); \filldraw[gray!30] (-.7,-.2) rectangle (0,.5); \filldraw[gray!55] (0,-.2) rectangle (.3,.5); \end{scope} \draw[\XColor,thick] (0,-.2) -- (0,.5); \draw[\PsColor,thick] (-.4,-.2) arc (180:90:.4cm); \filldraw[\XColor] (0,.2) circle (.05cm); } \qquad\qquad \rho_X = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.2) rectangle (.7,.5); \filldraw[gray!30] (-.3,-.2) rectangle (0,.5); \filldraw[gray!55] (0,-.2) rectangle (.7,.5); \end{scope} \draw[\XColor,thick] (0,-.2) -- (0,.5); \draw[DarkGreen,thick] (.4,-.2) arc (0:90:.4cm); \filldraw[\XColor] (0,.2) circle (.05cm); } \qquad\qquad \lambda_X^\dag = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.7,-.5) rectangle (.3,.2); \filldraw[gray!30] (-.7,-.5) rectangle (0,.2); \filldraw[gray!55] (0,-.5) rectangle (.3,.2); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.2); \draw[\PsColor,thick] (-.4,.2) arc (180:270:.4cm); \filldraw[\XColor] (0,-.2) circle (.05cm); } \qquad\qquad \rho_X^\dag = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.5) rectangle (.7,.2); \filldraw[gray!30] (-.3,-.5) rectangle (0,.2); \filldraw[gray!55] (0,-.5) rectangle (.7,.2); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.2); \draw[DarkGreen,thick] (.4,.2) arc (0:-90:.4cm); \filldraw[\XColor] (0,-.2) circle (.05cm); } $$ The bimodule axioms are as follows: \begin{enumerate}[label=(B\arabic*)] \item \label{M:associativity} (associativity) $ \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.9,-.6) rectangle (.3,.5); \filldraw[gray!30] (-.9,-.6) rectangle (0,.5); \filldraw[gray!55] (0,-.6) rectangle (.3,.5); \end{scope} \draw[\XColor,thick] (0,-.6) -- (0,.5); \draw[\PsColor,thick] (-.6,-.6) -- (-.6,-.4) arc (180:90:.6cm); \draw[\PsColor,thick] (-.3,-.6) -- (-.3,-.4) arc (180:90:.3cm); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[\XColor] (0,-.1) circle (.05cm); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.9,-.6) rectangle (.3,.5); \filldraw[gray!30] (-.9,-.6) rectangle (0,.5); \filldraw[gray!55] (0,-.6) rectangle (.3,.5); \end{scope} \draw[\XColor,thick] (0,-.6) -- (0,.5); \draw[\PsColor,thick] (-.4,-.2) arc (180:90:.4cm); \draw[\PsColor,thick] (-.6,-.6) -- (-.6,-.4) arc (180:0:.2cm) -- (-.2,-.6); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[\PsColor] (-.4,-.2) circle (.05cm); } $, $ \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.6) rectangle (.9,.5); \filldraw[gray!30] (-.3,-.6) rectangle (0,.5); \filldraw[gray!55] (0,-.6) rectangle (.9,.5); \end{scope} \draw[\XColor,thick] (0,-.6) -- (0,.5); \draw[DarkGreen,thick] (.6,-.6) -- (.6,-.4) arc (0:90:.6cm); \draw[DarkGreen,thick] (.3,-.6) -- (.3,-.4) arc (0:90:.3cm); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[\XColor] (0,-.1) circle (.05cm); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.6) rectangle (.9,.5); \filldraw[gray!30] (-.3,-.6) rectangle (0,.5); \filldraw[gray!55] (0,-.6) rectangle (.9,.5); \end{scope} \draw[\XColor,thick] (0,-.6) -- (0,.5); \draw[DarkGreen,thick] (.4,-.2) arc (0:90:.4cm); \draw[DarkGreen,thick] (.6,-.6) -- (.6,-.4) arc (0:180:.2cm) -- (.2,-.6); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[DarkGreen] (.4,-.2) circle (.05cm); } $, and $ \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.7,-.5) rectangle (.7,.5); \filldraw[gray!30] (-.7,-.5) rectangle (0,.5); \filldraw[gray!55] (0,-.5) rectangle (.7,.5); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.5); \draw[\PsColor,thick] (-.4,-.5) -- (-.4,-.2) arc (180:90:.4cm); \draw[DarkGreen,thick] (.4,-.5) arc (0:90:.4cm); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[\XColor] (0,-.1) circle (.05cm); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.7,-.5) rectangle (.7,.5); \filldraw[gray!30] (-.7,-.5) rectangle (0,.5); \filldraw[gray!55] (0,-.5) rectangle (.7,.5); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.5); \draw[\PsColor,thick] (-.4,-.5) arc (180:90:.4cm); \draw[DarkGreen,thick] (.4,-.5) -- (.4,-.2) arc (0:90:.4cm); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[\XColor] (0,-.1) circle (.05cm); } $ \item \label{M:unitality} (unitality) $ \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.7,-.5) rectangle (.3,.5); \filldraw[gray!30] (-.7,-.5) rectangle (0,.5); \filldraw[gray!55] (0,-.5) rectangle (.3,.5); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.5); \draw[\PsColor,thick] (-.4,-.2) arc (180:90:.4cm); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[\PsColor] (-.4,-.2) circle (.05cm); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.5) rectangle (.3,.5); \filldraw[gray!30] (-.3,-.5) rectangle (0,.5); \filldraw[gray!55] (0,-.5) rectangle (.3,.5); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.5); } $ = $ \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.5) rectangle (.7,.5); \filldraw[gray!30] (-.3,-.5) rectangle (0,.5); \filldraw[gray!55] (0,-.5) rectangle (.7,.5); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.5); \draw[DarkGreen,thick] (.4,-.2) arc (0:90:.4cm); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[DarkGreen] (.4,-.2) circle (.05cm); } $ \item \label{M:Frobenius} (Frobenius) $ \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-1.3,-.5) rectangle (.3,.8); \filldraw[gray!30] (-1.3,-.5) rectangle (0,.8); \filldraw[gray!55] (0,-.5) rectangle (.3,.8); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.8); \draw[\PsColor,thick] (-1,-.5) -- (-1,.2) arc (180:0:.3cm) arc (180:270:.4cm); \draw[\PsColor,thick] (-.7,.5) -- (-.7,.8); \filldraw[\XColor] (0,-.2) circle (.05cm); \filldraw[\PsColor] (-.7,.5) circle (.05cm); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.7,-.2) rectangle (.3,1.1); \filldraw[gray!30] (-.7,-.2) rectangle (0,1.1); \filldraw[gray!55] (0,-.2) rectangle (.3,1.1); \end{scope} \draw[\XColor,thick] (0,-.2) -- (0,1.1); \draw[\PsColor,thick] (-.4,-.2) arc (180:90:.4cm); \draw[\PsColor,thick] (-.4,1.1) arc (180:270:.4cm); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[\XColor] (0,.7) circle (.05cm); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-1.3,-.8) rectangle (.3,.5); \filldraw[gray!30] (-1.3,-.8) rectangle (0,.5); \filldraw[gray!55] (0,-.8) rectangle (.3,.5); \end{scope} \draw[\XColor,thick] (0,-.8) -- (0,.5); \draw[\PsColor,thick] (-1,.5) -- (-1,-.2) arc (-180:0:.3cm) arc (180:90:.4cm); \draw[\PsColor,thick] (-.7,-.5) -- (-.7,-.8); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[\PsColor] (-.7,-.5) circle (.05cm); } $ and $ \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.5) rectangle (1.3,.8); \filldraw[gray!30] (-.3,-.5) rectangle (0,.8); \filldraw[gray!55] (0,-.5) rectangle (1.3,.8); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.8); \draw[DarkGreen,thick] (1,-.5) -- (1,.2) arc (0:180:.3cm) arc (0:-90:.4cm); \draw[DarkGreen,thick] (.7,.5) -- (.7,.8); \filldraw[\XColor] (0,-.2) circle (.05cm); \filldraw[DarkGreen] (.7,.5) circle (.05cm); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.2) rectangle (.7,1.1); \filldraw[gray!30] (-.3,-.2) rectangle (0,1.1); \filldraw[gray!55] (0,-.2) rectangle (.7,1.1); \end{scope} \draw[\XColor,thick] (0,-.2) -- (0,1.1); \draw[DarkGreen,thick] (.4,-.2) arc (0:90:.4cm); \draw[DarkGreen,thick] (.4,1.1) arc (0:-90:.4cm); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[\XColor] (0,.7) circle (.05cm); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.8) rectangle (1.3,.5); \filldraw[gray!30] (-.3,-.8) rectangle (0,.5); \filldraw[gray!55] (0,-.8) rectangle (1.3,.5); \end{scope} \draw[\XColor,thick] (0,-.8) -- (0,.5); \draw[DarkGreen,thick] (1,.5) -- (1,-.2) arc (0:-180:.3cm) arc (0:90:.4cm); \draw[DarkGreen,thick] (.7,-.5) -- (.7,-.8); \filldraw[\XColor] (0,.2) circle (.05cm); \filldraw[DarkGreen] (.7,-.5) circle (.05cm); } $ \item \label{M:separable} (separable) $ \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.5,-.5) rectangle (.3,.5); \filldraw[gray!30] (-.6,-.5) rectangle (0,.5); \filldraw[gray!55] (0,-.5) rectangle (.3,.5); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.5); \draw[\PsColor,thick] (0,-.3) arc (270:90:.3cm); \filldraw[\XColor] (0,.3) circle (.05cm); \filldraw[\XColor] (0,-.3) circle (.05cm); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.5) rectangle (.3,.5); \filldraw[gray!30] (-.3,-.5) rectangle (0,.5); \filldraw[gray!55] (0,-.5) rectangle (.3,.5); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.5); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.5) rectangle (.5,.5); \filldraw[gray!30] (-.3,-.5) rectangle (0,.5); \filldraw[gray!55] (0,-.5) rectangle (.6,.5); \end{scope} \draw[\XColor,thick] (0,-.5) -- (0,.5); \draw[DarkGreen,thick] (0,-.3) arc (-90:90:.3cm); \filldraw[\XColor] (0,.3) circle (.05cm); \filldraw[\XColor] (0,-.3) circle (.05cm); } $ \end{enumerate} We refer the reader to \cite[Facts~3.16]{2105.12010} for various dependencies amongst these axioms. \end{defn} \begin{defn} For $\cC$ a $\rm C^*/\rm W^*$ 2-category, its \emph{Q-system completion} is the $\rm C^*/\rm W^*$ 2-category ${\sf QSys}(\cC)$ whose: \begin{itemize} \item 0-cells are Q-systems $(Q,m,i)\in \cC(b\to b)$, \item 1-cells between Q-systems $P\in \cC(a\to a)$ and $Q\in \cC(b\to b)$ are (unital Frobenius) bimodules $({}_aX_b,\lambda_X,\rho_X)\in \cC(a\to b)$, and \item 2-cells are bimodule intertwiners, i.e., given Q-systems ${}_aP_a,{}_bQ_b$ and $P-Q$ bimodules ${}_aX_b, {}_aY_b$, ${\sf QSys}(\cC)({}_PX_Q\Rightarrow {}_PY_Q)$ is the set of $f\in \cC({}_aX_b\Rightarrow {}_aY_b)$ such that \begin{equation*} \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.8,.5) rectangle (.5,-1.5); \fill[gray!30] (-.8,.5) rectangle (0,-1.5); \fill[gray!55] (0,.5) rectangle (.5,-1.5); \end{scope} \draw[orange,thick] (0,.5) -- (0,-.25); \draw[\XColor,thick] (0,-.25) -- (0,-1.5); \draw[\PsColor,thick] (-.5,-1.5) arc (180:90:.5cm); \filldraw[\XColor] (0,-1) circle (.05cm); \roundNbox{unshaded}{(0,-.25)}{.3}{0}{0}{$f$}; } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.8,-.5) rectangle (.5,-2.5); \fill[gray!30] (-.8,-.5) rectangle (0,-2.5); \fill[gray!55] (0,-.5) rectangle (.5,-2.5); \end{scope} \draw[\XColor,thick] (0,-1.75) -- (0,-2.5); \draw[orange,thick] (0,-0.5) -- (0,-1.75); \draw[\PsColor,thick] (-.5,-2.5) -- (-.5,-1.5) arc (180:90:.5cm); \filldraw[orange] (0,-1) circle (.05cm); \roundNbox{unshaded}{(0,-1.75)}{.3}{0}{0}{$f$}; } \qquad\text{and}\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (.8,.5) rectangle (-.5,-1.5); \fill[gray!55] (.8,.5) rectangle (0,-1.5); \fill[gray!30] (0,.5) rectangle (-.5,-1.5); \end{scope} \draw[orange,thick] (0,.5) -- (0,-.25); \draw[\XColor,thick] (0,-.25) -- (0,-1.5); \draw[DarkGreen,thick] (.5,-1.5) arc (0:90:.5cm); \filldraw[\XColor] (0,-1) circle (.05cm); \roundNbox{unshaded}{(0,-.25)}{.3}{0}{0}{$f$}; } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (.8,-.5) rectangle (-.5,-2.5); \fill[gray!55] (.8,-.5) rectangle (0,-2.5); \fill[gray!30] (0,-.5) rectangle (-.5,-2.5); \end{scope} \draw[\XColor,thick] (0,-1.75) -- (0,-2.5); \draw[orange,thick] (0,-0.5) -- (0,-1.75); \draw[DarkGreen,thick] (.5,-2.5) -- (.5,-1.5) arc (0:90:.5cm); \filldraw[orange] (0,-1) circle (.05cm); \roundNbox{unshaded}{(0,-1.75)}{.3}{0}{0}{$f$}; }\,. \end{equation*} \item 1-composition in ${\sf QSys}(\cC)$ is performed by orthogonally splitting the \emph{separability projector} \begin{equation} \label{eq:SeparabilityProjector} p^Q_{X,Y} := \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.5,-.5) rectangle (.5,.5); \filldraw[gray!30] (-.5,-.5) rectangle (-.2,.5); \filldraw[gray!55] (-.2,-.5) rectangle (.2,.5); \filldraw[gray!75] (.2,-.5) rectangle (.5,.5); \end{scope} \draw[thick, \XColor] (-.2,-.5) -- (-.2,.5); \draw[thick,DarkGreen] (-.2,0) -- (.2,0); \draw[thick, orange] (.2,-.5) -- (.2,.5); } := \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.6,-.6) rectangle (.6,.6); \filldraw[gray!30] (-.6,-.6) rectangle (-.3,.6); \filldraw[gray!55] (-.3,-.6) rectangle (.3,.6); \filldraw[gray!75] (.3,-.6) rectangle (.6,.6); \end{scope} \draw[thick,DarkGreen] (-.3,-.3) arc (-90:0:.3cm) arc (180:90:.3cm); \draw[thick, \XColor] (-.3,-.6) -- (-.3,.6); \draw[thick, orange] (.3,-.6) -- (.3,.6); \filldraw[\XColor] (-.3,-.3) circle (.05cm); \filldraw[orange] (.3,.3) circle (.05cm); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.7,-.7) rectangle (.7,.5); \filldraw[gray!30] (-.7,-.7) rectangle (-.4,.5); \filldraw[gray!55] (-.4,-.7) rectangle (.4,.5); \filldraw[gray!75] (.4,-.7) rectangle (.7,.5); \end{scope} \draw[thick,DarkGreen] (-.4,.2) arc (90:0:.2cm) arc (-180:0:.2cm) arc (180:90:.2cm); \draw[thick,DarkGreen] (0,-.4) -- (0,-.2); \draw[thick, \XColor] (-.4,-.7) -- (-.4,.5); \draw[thick, orange] (.4,-.7) -- (.4,.5); \filldraw[\XColor] (-.4,.2) circle (.05cm); \filldraw[orange] (.4,.2) circle (.05cm); \filldraw[DarkGreen] (0,-.2) circle (.05cm); \filldraw[DarkGreen] (0,-.4) circle (.05cm); } \end{equation} The object ${}_aX\otimes_Q Y_b \in {\sf QSys}(\cC)(P \to R)$ and a $P-R$ bimodular coisometry $u_{X,Y}^Q: X\otimes_b Y \to X\otimes_Q Y$, unique up to canonical unitary, such that $p_{X,Y}^Q=(u_{X,Y}^Q)^\dag\star u_{X,Y}^Q$. \end{itemize} We refer the reader to \cite[\S3.2]{2105.12010} for the full details that ${\sf QSys}(\cC)$ is a $\dag$ 2-category, which is $\rm C^*/W^*$ whenever $\cC$ is respectively. \end{defn} \begin{nota} \label{nota:QSys(C)andOther} We use the graphical notation for ${\sf QSys}(\cC)$ from \cite[\S3.3]{2105.12010}, where shaded regions for Q-systems are denoted by colored regions, but trivial Q-systems are still represented in gray-scale: $$ \tikzmath{\filldraw[\PrColor, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=P \qquad\qquad \tikzmath{\filldraw[green!30, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=Q \qquad\qquad \tikzmath{\filldraw[gray!30, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=1_a \qquad\qquad \tikzmath{\filldraw[gray!55, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=1_b. $$ If ${}_aP_a, {}_bQ_b\in {\sf QSys}(\cC)$ are Q-systems and $X\in {\sf QSys}(\cC)(P\to Q)$, then $X$ may be also viewed as a $1_a-Q$, $P-1_b$, and a $1_a-1_b$ bimodule; we represent these four possibilities by varying the shadings: $$ \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[\PrColor] (0,0) rectangle (-.3,.6); \fill[green!30] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, \XColor] (0,0) -- (0,.6); }={}_PX_Q \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[gray!30] (0,0) rectangle (-.3,.6); \fill[green!30] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, \XColor] (0,0) -- (0,.6); }={}_{1_a}X_Q \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[\PrColor] (0,0) rectangle (-.3,.6); \fill[gray!55] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, \XColor] (0,0) -- (0,.6); }={}_PX_{1_b} \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[gray!30] (0,0) rectangle (-.3,.6); \fill[gray!55] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, \XColor] (0,0) -- (0,.6); }={}_{1_a}X_{1_b}. $$ We use a similar convention for intertwiners of bimodules. We often suppress the external shading when drawing 2-cells in ${\sf QSys}(\cC)$; when we do so, it should be inferred that the diagram/relation depicted holds for any consistent external shading applied to the diagram(s). Given $X\in {\sf QSys}(\cC)(P\to Q)$ and $Y\in {\sf QSys}(\cC)(Q\to R)$, we denote the coisometry $u_{X,Y}^Q$ and its adjoint in the graphical calculus of ${\sf QSys}(\cC)$ by \[ u^Q_{X,Y} := \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.5,-.5) rectangle (.5,.5); \filldraw[green!30] (-.2,0) rectangle (.2,.5); \filldraw[gray!55] (-.2,-.5) rectangle (.2,0); \end{scope} \draw[thick, \XColor] (-.2,-.5) -- (-.2,.5); \draw[thick, DarkGreen] (-.2,0) -- (.2,0); \draw[thick, orange] (.2,-.5) -- (.2,.5); } : X\otimes_b Y \to X\otimes_Q Y \qquad\text{and}\qquad (u^Q_{X,Y})^\dag = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.5,-.5) rectangle (.5,.5); \filldraw[green!30] (-.2,0) rectangle (.2,-.5); \filldraw[gray!55] (-.2,.5) rectangle (.2,0); \end{scope} \draw[thick, \XColor] (-.2,-.5) -- (-.2,.5); \draw[thick, DarkGreen] (-.2,0) -- (.2,0); \draw[thick, orange] (.2,-.5) -- (.2,.5); }\,. \] We thus get the following relations: \[ u^Q_{X,Y}\star (u^Q_{X,Y})^\dag = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.5,-.5) rectangle (.5,1); \filldraw[green!30] (-.2,0) rectangle (.2,-.5); \filldraw[gray!55] (-.2,.5) rectangle (.2,0); \filldraw[green!30] (-.2,1) rectangle (.2,.5); \end{scope} \draw[thick, \XColor] (-.2,-.5) -- (-.2,1); \draw[thick, DarkGreen] (-.2,0) -- (.2,0); \draw[thick, DarkGreen] (-.2,.5) -- (.2,.5); \draw[thick, orange] (.2,-.5) -- (.2,1); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.5,-.5) rectangle (.5,1); \filldraw[green!30] (-.2,1) rectangle (.2,-.5); \end{scope} \draw[thick, \XColor] (-.2,-.5) -- (-.2,1); \draw[thick, orange] (.2,-.5) -- (.2,1); } = \id_{X\otimes_Q Y} \qquad (u^Q_{X,Y})^\dag \star u^Q_{X,Y} = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.5,-.5) rectangle (.5,1); \filldraw[gray!55] (-.2,0) rectangle (.2,-.5); \filldraw[green!30] (-.2,.5) rectangle (.2,0); \filldraw[gray!55] (-.2,1) rectangle (.2,.5); \end{scope} \draw[thick, \XColor] (-.2,-.5) -- (-.2,1); \draw[thick, DarkGreen] (-.2,0) -- (.2,0); \draw[thick, DarkGreen] (-.2,.5) -- (.2,.5); \draw[thick, orange] (.2,-.5) -- (.2,1); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.5,-.5) rectangle (.5,1); \filldraw[gray!55] (-.2,1) rectangle (.2,-.5); \end{scope} \draw[thick, \XColor] (-.2,-.5) -- (-.2,1); \draw[thick, DarkGreen] (-.2,.25) -- (.2,.25); \draw[thick, orange] (.2,-.5) -- (.2,1); } = p^Q_{X, Y}. \] We define canonical unitor trivalent vertices by \[ \lambda^P_X= \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.7,-.2) rectangle (.3,.5); \filldraw[gray!30] (-.7,-.2) rectangle (0,.5); \filldraw[\PrColor,thick] (-.4,-.2) arc (180:90:.4cm) -- (0,-.2); \filldraw[gray!55] (0,-.2) rectangle (.3,.5); \end{scope} \draw[\XColor,thick] (0,-.2) -- (0,.5); \draw[\PsColor,thick] (-.4,-.2) arc (180:90:.4cm); \filldraw[\XColor] (0,.2) circle (.05cm); } := \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.7,-.6) rectangle (.3,.5); \filldraw[gray!30] (-.7,-.6) rectangle (0,.5); \filldraw[\PrColor,thick] (-.4,-.6) rectangle (0,-.2); \filldraw[gray!55] (0,-.6) rectangle (.3,.5); \end{scope} \draw[\XColor,thick] (0,-.6) -- (0,.5); \draw[\PsColor,thick] (-.4,-.6) -- (-.4,-.2) arc (180:90:.4cm); \draw[\PsColor,thick] (-.4,-.2) -- (0,-.2); \filldraw[\XColor] (0,.2) circle (.05cm); } =\lambda_X\star (u^P_{P,X})^\dag \qquad\text{and}\qquad \rho_X^Q= \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.2) rectangle (.7,.5); \filldraw[gray!30] (-.3,-.2) rectangle (0,.5); \filldraw[gray!55] (0,-.2) rectangle (.7,.5); \filldraw[green!30] (.4,-.2) arc (0:90:.4cm) -- (0,-.2); \end{scope} \draw[\XColor,thick] (0,-.2) -- (0,.5); \draw[DarkGreen,thick] (.4,-.2) arc (0:90:.4cm); \filldraw[\XColor] (0,.2) circle (.05cm); } := \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.3,-.6) rectangle (.7,.5); \filldraw[gray!30] (-.3,-.6) rectangle (0,.5); \filldraw[gray!55] (0,-.6) rectangle (.7,.5); \filldraw[green!30] (.4,-.6) rectangle (0,-.2); \end{scope} \draw[\XColor,thick] (0,-.6) -- (0,.5); \draw[DarkGreen,thick] (.4,-.6) -- (.4,-.2) arc (0:90:.4cm); \draw[DarkGreen,thick] (0,-.2) -- (.4,-.2); \filldraw[\XColor] (0,.2) circle (.05cm); } = \rho_X\star (u_{X,Q}^Q)^\dag. \] It is straightforward to verify that $\lambda^P_X$ and $\rho^Q_X$ are unitaries (see \cite[\S3.3]{2105.12010}). In this graphical notation, the associator of ${\sf QSys}(\cC)$ is uniquely determined by the formula on the left hand side: \[ \tikzmath{ \filldraw[gray!55] (-.4,.5) rectangle (0,0); \filldraw[green!30] (-.4,.5) rectangle (0,2); \filldraw[gray!75] (.4,.2) rectangle (0,0); \filldraw[cyan!30] (.4,.2) rectangle (0,2); \draw[DarkGreen,thick] (-.4,.5) -- (0,.5); \draw[cyan!90,thick] (.4,.2) -- (0,.2); \draw[\XColor,thick] (-.4,0) -- (-.4,2); \draw[orange,thick] (0,0) -- (0,2); \draw[violet,thick] (.4,0) -- (.4,2); \roundNbox{unshaded}{(0,1)}{.3}{.3}{.3}{\scriptsize{$\alpha^{{\sf QSys}(\cC)}_{X,Y,Z}$}}; } = \tikzmath{ \filldraw[gray!55] (-.4,0) rectangle (0,1.8); \filldraw[green!30] (-.4,2) rectangle (0,1.8); \filldraw[gray!75] (.4,0) rectangle (0,1.5); \filldraw[cyan!30] (.4,2) rectangle (0,1.5); \draw[DarkGreen,thick] (-.4,1.8) -- (0,1.8); \draw[cyan!90,thick] (.4,1.5) -- (0,1.5); \draw[\XColor,thick] (-.4,0) -- (-.4,2); \draw[orange,thick] (0,0) -- (0,2); \draw[violet,thick] (.4,0) -- (.4,2); \roundNbox{unshaded}{(0,1)}{.3}{.3}{.3}{\scriptsize{$\alpha^{\cC}_{X,Y,Z}$}}; } \qquad\qquad \Longrightarrow \qquad\qquad \underset{ (X\otimes_Q Y)\otimes_R Z \Rightarrow X\otimes_Q (Y\otimes_R Z) }{ \tikzmath{ \filldraw[green!30] (-.4,0) rectangle (0,2); \filldraw[cyan!30] (.4,0) rectangle (0,2); \draw[\XColor,thick] (-.4,0) -- (-.4,2); \draw[orange,thick] (0,0) -- (0,2); \draw[violet,thick] (.4,0) -- (.4,2); \roundNbox{unshaded}{(0,1)}{.3}{.3}{.3}{\scriptsize{$\alpha^{{\sf QSys}(\cC)}_{X,Y,Z}$}}; } = \tikzmath{ \filldraw[green!30] (-.4,0) rectangle (0,.5); \filldraw[gray!55] (-.4,1.8) rectangle (0,.5); \filldraw[green!30] (-.4,2) rectangle (0,1.8); \filldraw[cyan!30] (.4,0) rectangle (0,.2); \filldraw[gray!75] (.4,1.5) rectangle (0,.2); \filldraw[cyan!30] (.4,2) rectangle (0,1.5); \draw[DarkGreen,thick] (-.4,.5) -- (0,.5); \draw[cyan!90,thick] (.4,.2) -- (0,.2); \draw[DarkGreen,thick] (-.4,1.8) -- (0,1.8); \draw[cyan!90,thick] (.4,1.5) -- (0,1.5); \draw[\XColor,thick] (-.4,0) -- (-.4,2); \draw[orange,thick] (0,0) -- (0,2); \draw[violet,thick] (.4,0) -- (.4,2); \roundNbox{unshaded}{(0,1)}{.3}{.3}{.3}{\scriptsize{$\alpha^{\cC}_{X,Y,Z}$}}; } } . \] \end{nota} \subsection{Constructions on 1-morphisms, 2-morphisms, and 3-morphisms in \texorpdfstring{$2{\mathsf{Cat}}$}{2Cat}} For this section, we fix two $\rm C^*/W^*$ 2-categories $\cC,\cD$. \begin{construction}[{\cite[Const.~3.29]{2105.12010}}] \label{construction:Qsys(F)} A $\dag$ 2-functor $F: \cC \to \cD$ between $\rm C^*/W^*$ 2-categories induces a $\dag$-2-functor ${\sf QSys}(F): {\sf QSys}(\cC) \to {\sf QSys}(\cD)$. \begin{itemize} \item For $({}_bQ_b,m,i)\in {\sf QSys}(\cC)$, we define $$ {\sf QSys}(F)({}_bQ_b):=({}_{F(b)}F(Q)_{F(b)},F(m)\star F^2_{Q,Q},F(i)\star F^1_b)\in {\sf QSys}(\cD). $$ \item For $({}_PX_Q,\lambda, \rho)\in {\sf QSys}(\cC)(P\to Q)$, we define $$ {\sf QSys}(F)({}_PX_Q) := (F(X),F(\lambda)\star F^2_{P,X},F(\rho)\star F^2_{X,Q})\in {\sf QSys}(\cD)(F(P)\to F(Q)) $$ \item For $f \in {\sf QSys}(\cC)({}_PX_Q \Rightarrow {}_PY_Q)$ we define $$ {\sf QSys}(F)(f):= F(f)\in {\sf QSys}(\cD)({}_{F(P)}F(X)_{F(Q)} \Rightarrow {}_{F(P)}F(Y)_{F(Q)}). $$ Since $F$ is a $\dag$ 2-functor, ${\sf QSys}(F)$ will be as well. Moreover, when $\cA,\cB$ are $\rm W^*$ and $F: \cA\to \cB$ is normal, so is ${\sf QSys}(F)$. \item For ${}_P X_Q \in {\sf QSys}(\cC)(P \to Q)$ and ${}_Q Y_R \in {\sf QSys}(\cC)(Q\to R)$, we define \begin{equation} \label{eq:DefOfQSysF2} {\sf QSys}(F)_{X,Y}^{2} :=F(u_{X,Y}) \star F^2_{X,Y} \star u^\dag_{F(X), F(Y)} \in {\sf QSys}(\cD)(F(X)\otimes_{F(Q)}F(Y)\Rightarrow F(X\otimes_Q Y)). \end{equation} Finally, for a Q-system $Q\in \cC(b\to b)$, we define $$ {\sf QSys}(F)^1_{F(Q)}:=\id\in {\sf QSys}(\cD)(1_{F(Q)}\Rightarrow F(1_Q)). $$ \end{itemize} For convenience of the reader, we provide a diagrammatic proof below that ${\sf QSys}(F)$ is a $\dag$ 2-functor. We graphically represent \begin{align*} \tikzmath{ \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= F & \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.5,-.3) rectangle (.5,.3); \filldraw[primedregion=white] (-.5,-.3) rectangle (-.2,.3); \filldraw[primedregion=green!30] (-.2,-.3) rectangle (.2,.3); \filldraw[primedregion=white] (.5,-.3) rectangle (.2,.3); \end{scope} \draw[\XColor,thick] (-.2,-.3) -- (-.2,.3); \draw[orange,thick] (.2,-.3) -- (.2,.3); \draw[thin, dotted, rounded corners = 5pt] (-.5,-.3) rectangle (.5,.3); } &= F(X)\otimes_{F(Q)} F(Y) & \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.35,-.3) rectangle (.35,.3); \filldraw[primedregion=white] (-.05,-.3) rectangle (-.35,.3); \filldraw[primedregion=green!30] (-.05,-.3) rectangle (.05,.3); \filldraw[primedregion=white] (.05,-.3) rectangle (.35,.3); \end{scope} \draw[\XColor,thick] (-.05,-.3) -- (-.05,.3); \draw[orange,thick] (.05,-.3) -- (.05,.3); \draw[thin, dotted, rounded corners = 5pt] (-.35,-.3) rectangle (.35,.3); } &= F(X\otimes_Q Y) \\ \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.5,-.3) rectangle (.5,.3); \filldraw[primedregion=white] (-.5,-.3) rectangle (-.2,.3); \filldraw[primedregion=green!30] (-.2,0) rectangle (.2,.3); \filldraw[primedregion=gray!55] (-.2,0) rectangle (.2,-.3); \filldraw[primedregion=white] (.5,-.3) rectangle (.2,.3); \end{scope} \draw[\XColor,thick] (-.2,-.3) -- (-.2,.3); \draw[orange,thick] (.2,-.3) -- (.2,.3); \draw[DarkGreen,thick] (-.2,0) -- (.2,0); \draw[thin, dotted, rounded corners = 5pt] (-.5,-.3) rectangle (.5,.3); } &= u^{F(Q)}_{F(X),F(Y)} & \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.35,-.3) rectangle (.35,.3); \filldraw[primedregion=white] (-.05,-.3) rectangle (-.35,.3); \filldraw[primedregion=gray!55] (-.05,.3) rectangle (.05,0); \filldraw[primedregion=green!30] (-.05,-.3) rectangle (.05,0); \filldraw[primedregion=white] (.05,-.3) rectangle (.35,.3); \end{scope} \draw[\XColor,thick] (-.05,-.3) -- (-.05,.3); \draw[orange,thick] (.05,-.3) -- (.05,.3); \draw[DarkGreen,thick] (-.05,0) -- (.05,0); \draw[thin, dotted, rounded corners = 5pt] (-.35,-.3) rectangle (.35,.3); } &= F(u^Q_{X,Y})^\dag & \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.35,-.3) rectangle (.35,.3); \filldraw[primedregion=white] (-.05,-.3) rectangle (-.35,.3); \filldraw[primedregion=gray!55] (-.05,-.3) rectangle (.05,.3); \filldraw[primedregion=white] (.05,-.3) rectangle (.35,.3); \end{scope} \draw[\XColor,thick] (-.05,-.3) -- (-.05,.3); \draw[orange,thick] (.05,-.3) -- (.05,.3); \draw[DarkGreen,thick] (-.05,0) -- (.05,0); \draw[thin, dotted, rounded corners = 5pt] (-.35,-.3) rectangle (.35,.3); } &= F(p^Q_{X,Y}). \end{align*} We then define \[ \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners=5pt] (-1.4,-2) rectangle (1.4,2); \filldraw[primedregion=white] (-1.4,-2) rectangle (1.4,2); \filldraw[primedregion=green!30] (-.4,-2) rectangle (.4,0); \filldraw[primedregion=green!30] (-.1,2) rectangle (.1,0); \end{scope} \draw[\XColor,thick] (-.4,-2) -- (-.4,0); \draw[orange,thick] (.4,-2) -- (.4,0); \draw[\XColor,thick] (-.1,2) -- (-.1,0); \draw[orange,thick] (.1,2) -- (.1,0); \roundNbox{unshaded}{(0,0)}{.6}{.45}{.45}{\small{${\sf QSys}(F)^2_{X,Y}$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.4,-2) rectangle (1.4,2); } := \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-1) rectangle (.7,1); \filldraw[primedregion=white] (-.7,-1) rectangle (.7,1); \filldraw[primedregion=green!30] (-.2,-1) rectangle (.2,-.7); \filldraw[primedregion=gray!55] (-.2,-.3) rectangle (.2,-.7); \filldraw[primedregion=green!30] (-.05,1) rectangle (.05,.7); \filldraw[primedregion=gray!55] (-.05,.3) rectangle (.05,.7); \end{scope} \draw[DarkGreen,thick] (-.2,-.7) -- (.2,-.7); \draw[DarkGreen,thick] (-.05,.7) -- (.05,.7); \draw[\XColor,thick] (-.2,-1) -- (-.2,0); \draw[orange,thick] (.2,-1) -- (.2,0); \draw[\XColor,thick] (-.05,1) -- (-.05,0); \draw[orange,thick] (.05,1) -- (.05,0); \roundNbox{unshaded}{(0,0)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Y}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-1) rectangle (.7,1); }\,. \] By definition of the separability projector \eqref{eq:SeparabilityProjector} for $F(X)\otimes_{F(Q)}F(Y)$, we have \[ p_{F(X),F(Y)} = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.5,-1) rectangle (.5,1); \filldraw[primedregion=white] (-.5,-1) rectangle (.5,1); \filldraw[primedregion=gray!55] (-.2,-1) rectangle (.2,1); \end{scope} \draw[DarkGreen,thick] (-.2,0) -- (.2,0); \draw[\XColor,thick] (-.2,-1) -- (-.2,1); \draw[orange,thick] (.2,-1) -- (.2,1); \draw[thin, dotted, rounded corners = 5pt] (-.5,-1) rectangle (.5,1); } := \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-1.5) rectangle (.7,1.5); \filldraw[primedregion=white] (-.7,-1.5) rectangle (.7,1.5); \filldraw[primedregion=gray!55] (-.2,-1) rectangle (.2,-1.5); \filldraw[primedregion=gray!55] (-.05,-1) rectangle (.05,1); \filldraw[primedregion=gray!55] (-.2,1) rectangle (.2,1.5); \end{scope} \draw[DarkGreen,thick] (-.05,0) -- (.05,0); \draw[\XColor,thick] (-.2,-1.5) -- (-.2,-1); \draw[orange,thick] (.2,-1.5) -- (.2,-1); \draw[\XColor,thick] (-.05,-1) -- (-.05,1); \draw[orange,thick] (.05,-1) -- (.05,1); \draw[\XColor,thick] (-.2,1.5) -- (-.2,1); \draw[orange,thick] (.2,1.5) -- (.2,1); \roundNbox{unshaded}{(0,-.7)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Y}$}}; \roundNbox{unshaded}{(0,.7)}{.3}{.25}{.25}{\tiny{$(F^2_{X,Y})^\dag$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-1.5) rectangle (.7,1.5); } \qquad\qquad \Longrightarrow \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-.8) rectangle (.7,1.1); \filldraw[primedregion=white] (-.7,-.8) rectangle (.7,1.1); \filldraw[primedregion=gray!55] (-.2,-.8) rectangle (.2,-.3); \filldraw[primedregion=gray!55] (-.05,.3) rectangle (.05,1.1); \end{scope} \draw[DarkGreen,thick] (-.05,.7) -- (.05,.7); \draw[\XColor,thick] (-.2,-.8) -- (-.2,-.3); \draw[orange,thick] (.2,-.8) -- (.2,-.3); \draw[\XColor,thick] (-.05,.3) -- (-.05,1.1); \draw[orange,thick] (.05,.3) -- (.05,1.1); \roundNbox{unshaded}{(0,0)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Y}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-.8) rectangle (.7,1.1); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-1.1) rectangle (.7,.8); \filldraw[primedregion=white] (-.7,-1.1) rectangle (.7,.8); \filldraw[primedregion=gray!55] (-.2,-1.1) rectangle (.2,-.3); \filldraw[primedregion=gray!55] (-.05,.3) rectangle (.05,.8); \end{scope} \draw[DarkGreen,thick] (-.2,-.7) -- (.2,-.7); \draw[\XColor,thick] (-.2,-1.1) -- (-.2,-.3); \draw[orange,thick] (.2,-1.1) -- (.2,-.3); \draw[\XColor,thick] (-.05,.3) -- (-.05,.8); \draw[orange,thick] (.05,.3) -- (.05,.8); \roundNbox{unshaded}{(0,0)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Y}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-1.1) rectangle (.7,.8); }\,. \] This formula for $p_{F(X), F(Y)}$ immediately implies ${\sf QSys}(F)^2_{X,Y}$ is unitary: \[ \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners=5pt] (-1.6,-3) rectangle (1.6,3); \filldraw[primedregion=white] (-1.6,-3) rectangle (1.6,3); \filldraw[primedregion=green!30] (-.4,-2) rectangle (.4,-3); \filldraw[primedregion=green!30] (-.4,2) rectangle (.4,3); \filldraw[primedregion=green!30] (-.1,-2) rectangle (.1,2); \end{scope} \draw[\XColor,thick] (-.4,-3) -- (-.4,-2); \draw[orange,thick] (.4,-3) -- (.4,-2); \draw[\XColor,thick] (-.1,-2) -- (-.1,2); \draw[orange,thick] (.1,-2) -- (.1,2); \draw[\XColor,thick] (-.4,3) -- (-.4,2); \draw[orange,thick] (.4,3) -- (.4,2); \roundNbox{unshaded}{(0,-1.4)}{.6}{.7}{.7}{\small{${\sf QSys}(F)^2_{X,Y}$}}; \roundNbox{unshaded}{(0,1.4)}{.6}{.7}{.7}{\small{$({\sf QSys}(F)^2_{X,Y})^\dag$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.6,-3) rectangle (1.6,3); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-1.5) rectangle (.7,1.5); \filldraw[primedregion=white] (-.7,-1.5) rectangle (.7,1.5); \filldraw[primedregion=green!30] (-.2,1.5) rectangle (.2,1.2); \filldraw[primedregion=gray!55] (-.2,.7) rectangle (.2,1.2); \filldraw[primedregion=gray!55] (-.05,.2) rectangle (.05,.7); \filldraw[primedregion=green!30] (-.05,-.2) rectangle (.05,.2); \filldraw[primedregion=gray!55] (-.05,-.2) rectangle (.05,-.7); \filldraw[primedregion=gray!55] (-.2,-.7) rectangle (.2,-1.2); \filldraw[primedregion=green!30] (-.2,-1.5) rectangle (.2,-1.2); \end{scope} \draw[DarkGreen,thick] (-.2,1.2) -- (.2,1.2); \draw[DarkGreen,thick] (-.05,.2) -- (.05,.2); \draw[DarkGreen,thick] (-.05,-.2) -- (.05,-.2); \draw[DarkGreen,thick] (-.2,-1.2) -- (.2,-1.2); \draw[\XColor,thick] (-.2,-1.5) -- (-.2,-1); \draw[orange,thick] (.2,-1.5) -- (.2,-1); \draw[\XColor,thick] (-.05,-1) -- (-.05,1); \draw[orange,thick] (.05,-1) -- (.05,1); \draw[\XColor,thick] (-.2,1.5) -- (-.2,1); \draw[orange,thick] (.2,1.5) -- (.2,1); \roundNbox{unshaded}{(0,-.7)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Y}$}}; \roundNbox{unshaded}{(0,.7)}{.3}{.25}{.25}{\tiny{$(F^2_{X,Y})^\dag$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-1.5) rectangle (.7,1.5); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-1.5) rectangle (.7,1.5); \filldraw[primedregion=white] (-.7,-1.5) rectangle (.7,1.5); \filldraw[primedregion=green!30] (-.2,1.5) rectangle (.2,1.2); \filldraw[primedregion=gray!55] (-.2,.7) rectangle (.2,1.2); \filldraw[primedregion=gray!55] (-.05,.7) rectangle (.05,-.7); \filldraw[primedregion=gray!55] (-.2,-.7) rectangle (.2,-1.2); \filldraw[primedregion=green!30] (-.2,-1.5) rectangle (.2,-1.2); \end{scope} \draw[DarkGreen,thick] (-.2,1.2) -- (.2,1.2); \draw[DarkGreen,thick] (-.05,0) -- (.05,0); \draw[DarkGreen,thick] (-.2,-1.2) -- (.2,-1.2); \draw[\XColor,thick] (-.2,-1.5) -- (-.2,-1); \draw[orange,thick] (.2,-1.5) -- (.2,-1); \draw[\XColor,thick] (-.05,-1) -- (-.05,1); \draw[orange,thick] (.05,-1) -- (.05,1); \draw[\XColor,thick] (-.2,1.5) -- (-.2,1); \draw[orange,thick] (.2,1.5) -- (.2,1); \roundNbox{unshaded}{(0,-.7)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Y}$}}; \roundNbox{unshaded}{(0,.7)}{.3}{.25}{.25}{\tiny{$(F^2_{X,Y})^\dag$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-1.5) rectangle (.7,1.5); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-1.5) rectangle (.7,1.5); \filldraw[primedregion=white] (-.7,-1.5) rectangle (.7,1.5); \filldraw[primedregion=green!30] (-.2,1.5) rectangle (.2,1.2); \filldraw[primedregion=gray!55] (-.2,.7) rectangle (.2,1.2); \filldraw[primedregion=gray!55] (-.05,.7) rectangle (.05,-.7); \filldraw[primedregion=gray!55] (-.2,-.7) rectangle (.2,-1.2); \filldraw[primedregion=green!30] (-.2,-1.5) rectangle (.2,-1.2); \end{scope} \draw[DarkGreen,thick] (-.2,1.2) -- (.2,1.2); \draw[DarkGreen,thick] (-.2,-1.2) -- (.2,-1.2); \draw[\XColor,thick] (-.2,-1.5) -- (-.2,-1); \draw[orange,thick] (.2,-1.5) -- (.2,-1); \draw[\XColor,thick] (-.05,-1) -- (-.05,1); \draw[orange,thick] (.05,-1) -- (.05,1); \draw[\XColor,thick] (-.2,1.5) -- (-.2,1); \draw[orange,thick] (.2,1.5) -- (.2,1); \roundNbox{unshaded}{(0,-.7)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Y}$}}; \roundNbox{unshaded}{(0,.7)}{.3}{.25}{.25}{\tiny{$(F^2_{X,Y})^\dag$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-1.5) rectangle (.7,1.5); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.5,-1) rectangle (.5,1); \filldraw[primedregion=white] (-.5,-1) rectangle (.5,1); \filldraw[primedregion=green!30] (-.2,1) rectangle (.2,.5); \filldraw[primedregion=gray!55] (-.2,-.5) rectangle (.2,.5); \filldraw[primedregion=green!30] (-.2,-1) rectangle (.2,-.5); \end{scope} \draw[DarkGreen,thick] (-.2,-.5) -- (.2,-.5); \draw[DarkGreen,thick] (-.2,.5) -- (.2,.5); \draw[\XColor,thick] (-.2,-1) -- (-.2,1); \draw[orange,thick] (.2,-1) -- (.2,1); \draw[thin, dotted, rounded corners = 5pt] (-.5,-1) rectangle (.5,1); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.5,-1) rectangle (.5,1); \filldraw[primedregion=white] (-.5,-1) rectangle (.5,1); \filldraw[primedregion=green!30] (-.2,1) rectangle (.2,-1); \end{scope} \draw[\XColor,thick] (-.2,-1) -- (-.2,1); \draw[orange,thick] (.2,-1) -- (.2,1); \draw[thin, dotted, rounded corners = 5pt] (-.5,-1) rectangle (.5,1); }\,; \quad\text{similarly,}\quad \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners=5pt] (-1.6,-3) rectangle (1.6,3); \filldraw[primedregion=white] (-1.6,-3) rectangle (1.6,3); \filldraw[primedregion=green!30] (-.1,-2) rectangle (.1,-3); \filldraw[primedregion=green!30] (-.1,2) rectangle (.1,3); \filldraw[primedregion=green!30] (-.4,-2) rectangle (.4,2); \end{scope} \draw[\XColor,thick] (-.1,-3) -- (-.1,-2); \draw[orange,thick] (.1,-3) -- (.1,-2); \draw[\XColor,thick] (-.4,-2) -- (-.4,2); \draw[orange,thick] (.4,-2) -- (.4,2); \draw[\XColor,thick] (-.1,3) -- (-.1,2); \draw[orange,thick] (.1,3) -- (.1,2); \roundNbox{unshaded}{(0,1.4)}{.6}{.7}{.7}{\small{${\sf QSys}(F)^2_{X,Y}$}}; \roundNbox{unshaded}{(0,-1.4)}{.6}{.7}{.7}{\small{$({\sf QSys}(F)^2_{X,Y})^\dag$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.6,-3) rectangle (1.6,3); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.35,-1) rectangle (.35,1); \filldraw[primedregion=white] (-.35,-1) rectangle (.35,1); \filldraw[primedregion=green!30] (-.05,1) rectangle (.05,-1); \end{scope} \draw[\XColor,thick] (-.05,-1) -- (-.05,1); \draw[orange,thick] (.05,-1) -- (.05,1); \draw[thin, dotted, rounded corners = 5pt] (-.35,-1) rectangle (.35,1); }\,. \] Using \eqref{eq:DefOfQSysF2}, unitarity of ${\sf QSys}(F)^2$, and that $u$ is a coisometry, we have \[ \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners=5pt] (-1.4,-2) rectangle (1.4,2); \filldraw[primedregion=white] (-1.4,-2) rectangle (1.4,2); \filldraw[primedregion=gray!55] (-.4,-1.4) rectangle (.4,-2); \filldraw[primedregion=green!30] (-.4,-1.4) rectangle (.4,0); \filldraw[primedregion=green!30] (-.1,2) rectangle (.1,0); \end{scope} \draw[DarkGreen,thick] (-.4,-1.4) -- (.4,-1.4); \draw[\XColor,thick] (-.4,-2) -- (-.4,0); \draw[orange,thick] (.4,-2) -- (.4,0); \draw[\XColor,thick] (-.1,2) -- (-.1,0); \draw[orange,thick] (.1,2) -- (.1,0); \roundNbox{unshaded}{(0,0)}{.6}{.45}{.45}{\small{${\sf QSys}(F)^2_{X,Y}$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.4,-2) rectangle (1.4,2); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-1) rectangle (.7,1); \filldraw[primedregion=white] (-.7,-1) rectangle (.7,1); \filldraw[primedregion=gray!55] (-.2,-1) rectangle (.2,0); \filldraw[primedregion=gray!55] (-.05,0) rectangle (.05,.7); \filldraw[primedregion=green!30] (-.05,1) rectangle (.05,.7); \end{scope} \draw[DarkGreen,thick] (-.05,.7) -- (.05,.7); \draw[\XColor,thick] (-.2,-1) -- (-.2,0); \draw[orange,thick] (.2,-1) -- (.2,0); \draw[\XColor,thick] (-.05,1) -- (-.05,0); \draw[orange,thick] (.05,1) -- (.05,0); \roundNbox{unshaded}{(0,0)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Y}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-1) rectangle (.7,1); } \qquad\text{and}\qquad \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners=5pt] (-1.4,-2) rectangle (1.4,2); \filldraw[primedregion=white] (-1.4,-2) rectangle (1.4,2); \filldraw[primedregion=green!30] (-.4,-2) rectangle (.4,0); \filldraw[primedregion=green!30] (-.1,1.4) rectangle (.1,0); \filldraw[primedregion=gray!55] (-.1,1.4) rectangle (.1,2); \end{scope} \draw[DarkGreen,thick] (-.1,1.4) -- (.1,1.4); \draw[\XColor,thick] (-.4,-2) -- (-.4,0); \draw[orange,thick] (.4,-2) -- (.4,0); \draw[\XColor,thick] (-.1,2) -- (-.1,0); \draw[orange,thick] (.1,2) -- (.1,0); \roundNbox{unshaded}{(0,0)}{.6}{.45}{.45}{\small{${\sf QSys}(F)^2_{X,Y}$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.4,-2) rectangle (1.4,2); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-1) rectangle (.7,1); \filldraw[primedregion=white] (-.7,-1) rectangle (.7,1); \filldraw[primedregion=green!30] (-.2,-1) rectangle (.2,-.7); \filldraw[primedregion=gray!55] (-.2,0) rectangle (.2,-.7); \filldraw[primedregion=gray!55] (-.05,1) rectangle (.05,0); \end{scope} \draw[DarkGreen,thick] (-.2,-.7) -- (.2,-.7); \draw[\XColor,thick] (-.2,-1) -- (-.2,0); \draw[orange,thick] (.2,-1) -- (.2,0); \draw[\XColor,thick] (-.05,1) -- (-.05,0); \draw[orange,thick] (.05,1) -- (.05,0); \roundNbox{unshaded}{(0,0)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Y}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-1) rectangle (.7,1); } \] By naturality, we have \[ \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.85,-1) rectangle (.85,1); \filldraw[primedregion=white] (-.85,-1) rectangle (.85,1); \filldraw[primedregion=green!30] (-.3,-1) rectangle (-.2,0); \filldraw[primedregion=green!30] (-.1,0) rectangle (0,.7); \filldraw[primedregion=gray!55] (-.1,.7) rectangle (0,1); \filldraw[primedregion=gray!75] (-.2,-1) rectangle (.2,0); \filldraw[primedregion=gray!75] (0,0) rectangle (.1,1); \end{scope} \draw[DarkGreen,thick] (-.1,.7) -- (0,.7); \draw[\XColor,thick] (-.3,-1) -- (-.3,0); \draw[orange,thick] (-.2,-1) -- (-.2,0); \draw[violet,thick] (.2,-1) -- (.2,0); \draw[\XColor,thick] (-.1,1) -- (-.1,0); \draw[orange,thick] (0,1) -- (0,0); \draw[violet,thick] (.1,1) -- (.1,0); \roundNbox{unshaded}{(0,0)}{.3}{.35}{.35}{\scriptsize{$F^2_{X\otimes_Q Y,Z}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,-1) rectangle (.85,1); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.85,-1) rectangle (.85,1); \filldraw[primedregion=white] (-.85,-1) rectangle (.85,1); \filldraw[primedregion=gray!55] (-.1,0) rectangle (0,1); \filldraw[primedregion=gray!55] (-.3,-.7) rectangle (-.2,0); \filldraw[primedregion=green!30] (-.3,-1) rectangle (-.2,-.7); \filldraw[primedregion=gray!75] (-.2,-1) rectangle (.2,0); \filldraw[primedregion=gray!75] (0,0) rectangle (.1,1); \end{scope} \draw[DarkGreen,thick] (-.3,-.7) -- (-.2,-.7); \draw[\XColor,thick] (-.3,-1) -- (-.3,0); \draw[orange,thick] (-.2,-1) -- (-.2,0); \draw[violet,thick] (.2,-1) -- (.2,0); \draw[\XColor,thick] (-.1,1) -- (-.1,0); \draw[orange,thick] (0,1) -- (0,0); \draw[violet,thick] (.1,1) -- (.1,0); \roundNbox{unshaded}{(0,0)}{.3}{.35}{.35}{\scriptsize{$F^2_{X\otimes Y,Z}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,-1) rectangle (.85,1); } : F(X\otimes_Q Y)\otimes F(Z)\to F(X\otimes Y)\otimes F(Z). \] These identities are used to prove the hexagon associativity coherence for ${\sf QSys}(F)^2$ and the triangle unit coherences for ${\sf QSys}(F)^1$: \begin{align*} \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners=5pt] (-1.9,0) rectangle (1.7,8); \filldraw[primedregion=white] (-1.9,0) rectangle (1.7,8); \filldraw[primedregion=cyan!30] (1,0) rectangle (-.4,4); \filldraw[primedregion=green!30] (-1,0) rectangle (0,2); \filldraw[primedregion=green!30] (-.6,2) rectangle (-.4,4); \filldraw[primedregion=green!30] (-.2,4) rectangle (0,8); \filldraw[primedregion=cyan!30] (0,4) rectangle (.2,8); \end{scope} \draw[\XColor,thick] (-1,0) -- (-1,2); \draw[orange,thick] (0,0) -- (0,2); \draw[\XColor,thick] (-.6,2) -- (-.6,4); \draw[orange,thick] (-.4,2) -- (-.4,4); \draw[violet,thick] (1,0) -- (1,4); \draw[\XColor,thick] (-.2,4) -- (-.2,8); \draw[orange,thick] (0,4) -- (0,8); \draw[violet,thick] (.2,4) -- (.2,8); \roundNbox{unshaded}{(-.5,2)}{.6}{.45}{.45}{\small{${\sf QSys}(F)^2_{X,Y}$}}; \roundNbox{unshaded}{(0,4)}{.6}{.95}{.75}{\small{${\sf QSys}(F)^2_{X\otimes_Q Y,Z}$}}; \roundNbox{unshaded}{(0,6)}{.6}{.95}{.75}{\normalsize{$F(\alpha^{{\sf QSys}(\cC)})$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.9,0) rectangle (1.7,8); } &= \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.85,0) rectangle (.85,4.2); \filldraw[primedregion=white] (-.85,0) rectangle (.85,4.2); \filldraw[primedregion=green!30] (-.4,0) rectangle (0,.2); \filldraw[primedregion=green!30] (-.25,1.2) rectangle (-.15,2); \filldraw[primedregion=green!30] (-.1,2) rectangle (0,2.8); \filldraw[primedregion=green!30] (-.1,4) rectangle (0,4.2); \filldraw[primedregion=gray!55] (-.4,.2) rectangle (0,.7); \filldraw[primedregion=gray!55] (-.25,.7) rectangle (-.15,1.2); \filldraw[primedregion=gray!55] (-.1,2.8) rectangle (0,4); \filldraw[primedregion=cyan!30] (0,0) rectangle (.4,1.5); \filldraw[primedregion=cyan!30] (-.15,1) rectangle (.4,1.5); \filldraw[primedregion=cyan!30] (0,2.5) rectangle (.1,2.6); \filldraw[primedregion=cyan!30] (0,3.8) rectangle (.1,4.2); \filldraw[primedregion=gray!75] (-.15,1.5) rectangle (.4,2); \filldraw[primedregion=gray!75] (0,2) rectangle (.1,2.5); \filldraw[primedregion=gray!75] (0,2.6) rectangle (.1,3.8); \end{scope} \draw[DarkGreen,thick] (-.4,.2) -- (0,.2); \draw[DarkGreen,thick] (-.25,1.2) -- (-.15,1.2); \draw[DarkGreen,thick] (-.1,2.8) -- (0,2.8); \draw[DarkGreen,thick] (-.1,4) -- (0,4); \draw[cyan!90,thick] (-.15,1.5) -- (.4,1.5); \draw[cyan!90,thick] (0,2.6) -- (.1,2.6); \draw[cyan!90,thick] (0,2.5) -- (.1,2.5); \draw[cyan!90,thick] (0,3.8) -- (.1,3.8); \draw[\XColor,thick] (-.4,0) -- (-.4,1); \draw[orange,thick] (0,0) -- (0,1); \draw[violet,thick] (.4,0) -- (.4,2); \draw[\XColor,thick] (-.25,1) -- (-.25,2); \draw[orange,thick] (-.15,1) -- (-.15,2); \draw[\XColor,thick] (-.1,2) -- (-.1,4.2); \draw[orange,thick] (0,2) -- (0,4.2); \draw[violet,thick] (.1,2) -- (.1,4.2); \roundNbox{unshaded}{(-.2,.7)}{.3}{.15}{.05}{\scriptsize{$F^2_{X,Y}$}}; \roundNbox{unshaded}{(0,2)}{.3}{.35}{.35}{\tiny{$F^2_{X\otimes_Q Y,Z}$}}; \roundNbox{unshaded}{(0,3.3)}{.3}{.25}{.25}{\scriptsize{$F(\alpha^{\cC})$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,0) rectangle (.85,4.2); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.85,0) rectangle (.85,4.2); \filldraw[primedregion=white] (-.85,0) rectangle (.85,4.2); \filldraw[primedregion=green!30] (-.4,0) rectangle (0,.5); \filldraw[primedregion=green!30] (-.25,1.5) rectangle (-.15,2); \filldraw[primedregion=green!30] (-.1,2) rectangle (0,2.8); \filldraw[primedregion=green!30] (-.1,4) rectangle (0,4.2); \filldraw[primedregion=gray!55] (-.4,.5) rectangle (0,1); \filldraw[primedregion=gray!55] (-.25,1) rectangle (-.15,1.5); \filldraw[primedregion=gray!55] (-.1,2.8) rectangle (0,4); \filldraw[primedregion=cyan!30] (0,0) rectangle (.4,.2); \filldraw[primedregion=cyan!30] (0,3.8) rectangle (.1,4.2); \filldraw[primedregion=gray!75] (0,2) rectangle (.1,3.8); \filldraw[primedregion=gray!75] (-.15,2) -- (-.15,1) -- (0,1) -- (0,.2) -- (.4,.2) -- (.4,2); \end{scope} \draw[DarkGreen,thick] (-.4,.5) -- (0,.5); \draw[DarkGreen,thick] (-.25,1.5) -- (-.15,1.5); \draw[DarkGreen,thick] (-.1,2.8) -- (0,2.8); \draw[DarkGreen,thick] (-.1,4) -- (0,4); \draw[cyan!90,thick] (0,.2) -- (.4,.2); \draw[cyan!90,thick] (0,2.5) -- (.1,2.5); \draw[cyan!90,thick] (0,3.8) -- (.1,3.8); \draw[\XColor,thick] (-.4,0) -- (-.4,1); \draw[orange,thick] (0,0) -- (0,1); \draw[violet,thick] (.4,0) -- (.4,2); \draw[\XColor,thick] (-.25,1) -- (-.25,2); \draw[orange,thick] (-.15,1) -- (-.15,2); \draw[\XColor,thick] (-.1,2) -- (-.1,4.2); \draw[orange,thick] (0,2) -- (0,4.2); \draw[violet,thick] (.1,2) -- (.1,4.2); \roundNbox{unshaded}{(-.2,1)}{.3}{.15}{.05}{\scriptsize{$F^2_{X,Y}$}}; \roundNbox{unshaded}{(0,2)}{.3}{.35}{.35}{\tiny{$F^2_{X\otimes_Q Y,Z}$}}; \roundNbox{unshaded}{(0,3.3)}{.3}{.25}{.25}{\scriptsize{$F(\alpha^{\cC})$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,0) rectangle (.85,4.2); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.85,0) rectangle (.85,4.2); \filldraw[primedregion=white] (-.85,0) rectangle (.85,4.2); \filldraw[primedregion=green!30] (-.4,0) rectangle (0,.5); \filldraw[primedregion=green!30] (-.1,4) rectangle (0,4.2); \filldraw[primedregion=gray!55] (-.4,.5) rectangle (0,1); \filldraw[primedregion=gray!55] (-.25,1) rectangle (-.15,2); \filldraw[primedregion=gray!55] (-.1,2) rectangle (0,4); \filldraw[primedregion=cyan!30] (0,0) rectangle (.4,.2); \filldraw[primedregion=cyan!30] (0,3.8) rectangle (.1,4.2); \filldraw[primedregion=gray!75] (0,2) rectangle (.1,3.8); \filldraw[primedregion=gray!75] (-.15,2) -- (-.15,1) -- (0,1) -- (0,.2) -- (.4,.2) -- (.4,2); \end{scope} \draw[DarkGreen,thick] (-.4,.5) -- (0,.5); \draw[DarkGreen,thick] (-.25,1.5) -- (-.15,1.5); \draw[DarkGreen,thick] (-.1,4) -- (0,4); \draw[cyan!90,thick] (0,.2) -- (.4,.2); \draw[cyan!90,thick] (0,2.5) -- (.1,2.5); \draw[cyan!90,thick] (0,3.8) -- (.1,3.8); \draw[\XColor,thick] (-.4,0) -- (-.4,1); \draw[orange,thick] (0,0) -- (0,1); \draw[violet,thick] (.4,0) -- (.4,2); \draw[\XColor,thick] (-.25,1) -- (-.25,2); \draw[orange,thick] (-.15,1) -- (-.15,2); \draw[\XColor,thick] (-.1,2) -- (-.1,4.2); \draw[orange,thick] (0,2) -- (0,4.2); \draw[violet,thick] (.1,2) -- (.1,4.2); \roundNbox{unshaded}{(-.2,1)}{.3}{.15}{.05}{\scriptsize{$F^2_{X,Y}$}}; \roundNbox{unshaded}{(0,2)}{.3}{.35}{.35}{\tiny{$F^2_{X\otimes Y,Z}$}}; \roundNbox{unshaded}{(0,3.3)}{.3}{.25}{.25}{\scriptsize{$F(\alpha^{\cC})$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,0) rectangle (.85,4.2); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.85,0) rectangle (.85,4.2); \filldraw[primedregion=white] (-.85,0) rectangle (.85,4.2); \filldraw[primedregion=green!30] (-.4,0) rectangle (0,.5); \filldraw[primedregion=green!30] (-.1,4) rectangle (0,4.2); \filldraw[primedregion=gray!55] (-.4,.5) rectangle (0,1); \filldraw[primedregion=gray!55] (-.25,1) rectangle (-.15,2); \filldraw[primedregion=gray!55] (-.1,2) rectangle (0,4); \filldraw[primedregion=cyan!30] (0,0) rectangle (.4,.2); \filldraw[primedregion=cyan!30] (0,3.8) rectangle (.1,4.2); \filldraw[primedregion=gray!75] (0,2) rectangle (.1,3.8); \filldraw[primedregion=gray!75] (-.15,2) -- (-.15,1) -- (0,1) -- (0,.2) -- (.4,.2) -- (.4,2); \end{scope} \draw[DarkGreen,thick] (-.4,.5) -- (0,.5); \draw[DarkGreen,thick] (-.1,4) -- (0,4); \draw[cyan!90,thick] (0,.2) -- (.4,.2); \draw[cyan!90,thick] (0,2.5) -- (.1,2.5); \draw[cyan!90,thick] (0,3.8) -- (.1,3.8); \draw[\XColor,thick] (-.4,0) -- (-.4,1); \draw[orange,thick] (0,0) -- (0,1); \draw[violet,thick] (.4,0) -- (.4,2); \draw[\XColor,thick] (-.25,1) -- (-.25,2); \draw[orange,thick] (-.15,1) -- (-.15,2); \draw[\XColor,thick] (-.1,2) -- (-.1,4.2); \draw[orange,thick] (0,2) -- (0,4.2); \draw[violet,thick] (.1,2) -- (.1,4.2); \roundNbox{unshaded}{(-.2,1)}{.3}{.15}{.05}{\scriptsize{$F^2_{X,Y}$}}; \roundNbox{unshaded}{(0,2)}{.3}{.35}{.35}{\tiny{$F^2_{X\otimes Y,Z}$}}; \roundNbox{unshaded}{(0,3.3)}{.3}{.25}{.25}{\scriptsize{$F(\alpha^{\cC})$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,0) rectangle (.85,4.2); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.85,0) rectangle (.85,4.2); \filldraw[primedregion=white] (-.85,0) rectangle (.85,4.2); \filldraw[primedregion=green!30] (-.4,0) rectangle (0,.5); \filldraw[primedregion=green!30] (-.1,4) rectangle (0,4.2); \filldraw[primedregion=gray!55] (-.4,.5) rectangle (0,1); \filldraw[primedregion=gray!55] (-.25,1) rectangle (-.15,2); \filldraw[primedregion=gray!55] (-.1,2) rectangle (0,4); \filldraw[primedregion=cyan!30] (0,0) rectangle (.4,.2); \filldraw[primedregion=cyan!30] (0,3.8) rectangle (.1,4.2); \filldraw[primedregion=gray!75] (0,2) rectangle (.1,3.8); \filldraw[primedregion=gray!75] (-.15,2) -- (-.15,1) -- (0,1) -- (0,.2) -- (.4,.2) -- (.4,2); \end{scope} \draw[DarkGreen,thick] (-.4,.5) -- (0,.5); \draw[DarkGreen,thick] (-.1,4) -- (0,4); \draw[cyan!90,thick] (0,.2) -- (.4,.2); \draw[cyan!90,thick] (0,3.8) -- (.1,3.8); \draw[\XColor,thick] (-.4,0) -- (-.4,1); \draw[orange,thick] (0,0) -- (0,1); \draw[violet,thick] (.4,0) -- (.4,2); \draw[\XColor,thick] (-.25,1) -- (-.25,2); \draw[orange,thick] (-.15,1) -- (-.15,2); \draw[\XColor,thick] (-.1,2) -- (-.1,4.2); \draw[orange,thick] (0,2) -- (0,4.2); \draw[violet,thick] (.1,2) -- (.1,4.2); \roundNbox{unshaded}{(-.2,1)}{.3}{.15}{.05}{\scriptsize{$F^2_{X,Y}$}}; \roundNbox{unshaded}{(0,2)}{.3}{.35}{.35}{\tiny{$F^2_{X\otimes Y,Z}$}}; \roundNbox{unshaded}{(0,3.3)}{.3}{.25}{.25}{\scriptsize{$F(\alpha^{\cC})$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,0) rectangle (.85,4.2); } \\ &= \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.85,0) rectangle (.85,4.4); \filldraw[primedregion=white] (-.85,0) rectangle (.85,4.4); \filldraw[primedregion=green!30] (-.4,0) rectangle (0,.5); \filldraw[primedregion=green!30] (-.1,4.2) rectangle (0,4.4); \filldraw[primedregion=gray!55] (-.4,.5) -- (-.4,3.5) -- (.15,3.5) -- (.15,2.3) -- (0,2.3) -- (0,.5); \filldraw[primedregion=gray!55] (-.1,3.5) rectangle (0,4.2); \filldraw[primedregion=cyan!30] (0,0) rectangle (.4,.2); \filldraw[primedregion=cyan!30] (0,4) rectangle (.1,4.4); \filldraw[primedregion=gray!75] (0,.2) rectangle (.4,2.3); \filldraw[primedregion=gray!75] (.15,2.3) rectangle (.25,3.5); \filldraw[primedregion=gray!75] (0,3.5) rectangle (.1,4); \end{scope} \draw[DarkGreen,thick] (-.4,.5) -- (0,.5); \draw[DarkGreen,thick] (-.1,4.2) -- (0,4.2); \draw[cyan!90,thick] (0,.2) -- (.4,.2); \draw[cyan!90,thick] (0,4) -- (.1,4); \draw[\XColor,thick] (-.4,0) -- (-.4,3.5); \draw[orange,thick] (0,0) -- (0,2); \draw[violet,thick] (.4,0) -- (.4,2); \draw[orange,thick] (.15,2) -- (.15,3.5); \draw[violet,thick] (.25,2) -- (.25,3.5); \draw[\XColor,thick] (-.1,3.5) -- (-.1,4.4); \draw[orange,thick] (0,3.5) -- (0,4.4); \draw[violet,thick] (.1,3.5) -- (.1,4.4); \roundNbox{unshaded}{(0,1)}{.3}{.35}{.35}{\scriptsize{$\alpha^\cD$}}; \roundNbox{unshaded}{(.2,2.2)}{.3}{.05}{.15}{\scriptsize{$F^2_{Y,Z}$}}; \roundNbox{unshaded}{(0,3.5)}{.3}{.35}{.35}{\tiny{$F^2_{X,Y\otimes Z}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,0) rectangle (.85,4.4); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.85,0) rectangle (.85,4.4); \filldraw[primedregion=white] (-.85,0) rectangle (.85,4.4); \filldraw[primedregion=green!30] (-.4,0) rectangle (0,.5); \filldraw[primedregion=green!30] (-.1,4.2) rectangle (0,4.4); \filldraw[primedregion=gray!55] (-.4,.5) -- (-.4,3.5) -- (.15,3.5) -- (.15,2.3) -- (0,2.3) -- (0,.5); \filldraw[primedregion=gray!55] (-.1,3.5) rectangle (0,4.2); \filldraw[primedregion=cyan!30] (0,0) rectangle (.4,.2); \filldraw[primedregion=cyan!30] (0,4) rectangle (.1,4.4); \filldraw[primedregion=gray!75] (0,.2) rectangle (.4,2.3); \filldraw[primedregion=gray!75] (.15,2.3) rectangle (.25,3.5); \filldraw[primedregion=gray!75] (0,3.5) rectangle (.1,4); \end{scope} \draw[DarkGreen,thick] (-.4,.5) -- (0,.5); \draw[DarkGreen,thick] (-.4,3) -- (.15,3); \draw[DarkGreen,thick] (-.1,4.2) -- (0,4.2); \draw[cyan!90,thick] (0,.2) -- (.4,.2); \draw[cyan!90,thick] (0,4) -- (.1,4); \draw[\XColor,thick] (-.4,0) -- (-.4,3.5); \draw[orange,thick] (0,0) -- (0,2); \draw[violet,thick] (.4,0) -- (.4,2); \draw[orange,thick] (.15,2) -- (.15,3.5); \draw[violet,thick] (.25,2) -- (.25,3.5); \draw[\XColor,thick] (-.1,3.5) -- (-.1,4.4); \draw[orange,thick] (0,3.5) -- (0,4.4); \draw[violet,thick] (.1,3.5) -- (.1,4.4); \roundNbox{unshaded}{(0,1)}{.3}{.35}{.35}{\scriptsize{$\alpha^\cD$}}; \roundNbox{unshaded}{(.2,2.2)}{.3}{.05}{.15}{\scriptsize{$F^2_{Y,Z}$}}; \roundNbox{unshaded}{(0,3.5)}{.3}{.35}{.35}{\tiny{$F^2_{X,Y\otimes Z}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,0) rectangle (.85,4.4); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.85,0) rectangle (.85,4.4); \filldraw[primedregion=white] (-.85,0) rectangle (.85,4.4); \filldraw[primedregion=green!30] (-.4,0) rectangle (0,.5); \filldraw[primedregion=green!30] (-.1,4.2) rectangle (0,4.4); \filldraw[primedregion=gray!55] (-.4,.5) -- (-.4,3.5) -- (.15,3.5) -- (.15,2.3) -- (0,2.3) -- (0,.5); \filldraw[primedregion=gray!55] (-.1,3.5) rectangle (0,4.2); \filldraw[primedregion=cyan!30] (0,0) rectangle (.4,.2); \filldraw[primedregion=cyan!30] (.15,2.8) rectangle (.25,3.5); \filldraw[primedregion=cyan!30] (0,3.5) rectangle (.1,4.4); \filldraw[primedregion=gray!75] (0,.2) rectangle (.4,2.3); \filldraw[primedregion=gray!75] (.15,2.3) rectangle (.25,2.8); \end{scope} \draw[DarkGreen,thick] (-.4,.5) -- (0,.5); \draw[DarkGreen,thick] (-.4,3) -- (.15,3); \draw[DarkGreen,thick] (-.1,4.2) -- (0,4.2); \draw[cyan!90,thick] (0,.2) -- (.4,.2); \draw[cyan!90,thick] (.15,2.8) -- (.25,2.8); \draw[\XColor,thick] (-.4,0) -- (-.4,3.5); \draw[orange,thick] (0,0) -- (0,2); \draw[violet,thick] (.4,0) -- (.4,2); \draw[orange,thick] (.15,2) -- (.15,3.5); \draw[violet,thick] (.25,2) -- (.25,3.5); \draw[\XColor,thick] (-.1,3.5) -- (-.1,4.4); \draw[orange,thick] (0,3.5) -- (0,4.4); \draw[violet,thick] (.1,3.5) -- (.1,4.4); \roundNbox{unshaded}{(0,1)}{.3}{.35}{.35}{\scriptsize{$\alpha^\cD$}}; \roundNbox{unshaded}{(.2,2.2)}{.3}{.05}{.15}{\scriptsize{$F^2_{Y,Z}$}}; \roundNbox{unshaded}{(0,3.5)}{.3}{.35}{.35}{\tiny{$F^2_{X,Y\otimes_R Z}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,0) rectangle (.85,4.4); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.85,0) rectangle (.85,4.4); \filldraw[primedregion=white] (-.85,0) rectangle (.85,4.4); \filldraw[primedregion=green!30] (-.4,0) rectangle (0,.5); \filldraw[primedregion=green!30] (-.1,4.2) rectangle (0,4.4); \filldraw[primedregion=gray!55] (-.4,.5) -- (-.4,3.5) -- (.15,3.5) -- (.15,2.3) -- (0,2.3) -- (0,.5); \filldraw[primedregion=gray!55] (-.1,3.5) rectangle (0,4.2); \filldraw[primedregion=cyan!30] (0,0) rectangle (.4,.2); \filldraw[primedregion=cyan!30] (.15,2.8) rectangle (.25,3.5); \filldraw[primedregion=cyan!30] (0,3.5) rectangle (.1,4.4); \filldraw[primedregion=gray!75] (0,.2) rectangle (.4,2.3); \filldraw[primedregion=gray!75] (.15,2.3) rectangle (.25,2.8); \end{scope} \draw[DarkGreen,thick] (-.4,.5) -- (0,.5); \draw[DarkGreen,thick] (-.4,3) -- (.15,3); \draw[DarkGreen,thick] (-.1,4.2) -- (0,4.2); \draw[cyan!90,thick] (0,.2) -- (.4,.2); \draw[cyan!90,thick] (0,1.5) -- (.4,1.5); \draw[cyan!90,thick] (.15,2.8) -- (.25,2.8); \draw[\XColor,thick] (-.4,0) -- (-.4,3.5); \draw[orange,thick] (0,0) -- (0,2); \draw[violet,thick] (.4,0) -- (.4,2); \draw[orange,thick] (.15,2) -- (.15,3.5); \draw[violet,thick] (.25,2) -- (.25,3.5); \draw[\XColor,thick] (-.1,3.5) -- (-.1,4.4); \draw[orange,thick] (0,3.5) -- (0,4.4); \draw[violet,thick] (.1,3.5) -- (.1,4.4); \roundNbox{unshaded}{(0,1)}{.3}{.35}{.35}{\scriptsize{$\alpha^\cD$}}; \roundNbox{unshaded}{(.2,2.2)}{.3}{.05}{.15}{\scriptsize{$F^2_{Y,Z}$}}; \roundNbox{unshaded}{(0,3.5)}{.3}{.35}{.35}{\tiny{$F^2_{X,Y\otimes_R Z}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,0) rectangle (.85,4.4); } = \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.85,0) rectangle (.85,4.4); \filldraw[primedregion=white] (-.85,0) rectangle (.85,4.4); \filldraw[primedregion=green!30] (-.4,0) rectangle (0,.5); \filldraw[primedregion=green!30] (-.4,1.8) rectangle (0,3); \filldraw[primedregion=green!30] (-.4,2) rectangle (.15,3); \filldraw[primedregion=green!30] (-.1,4.2) rectangle (0,4.4); \filldraw[primedregion=gray!55] (-.4,.5) rectangle (0,1.8); \filldraw[primedregion=gray!55] (-.4,3) rectangle (.15,3.5); \filldraw[primedregion=gray!55] (-.1,3.5) rectangle (0,4.2); \filldraw[primedregion=cyan!30] (0,0) rectangle (.4,.2); \filldraw[primedregion=cyan!30] (0,1.5) rectangle (.4,1.6); \filldraw[primedregion=cyan!30] (.15,2.8) rectangle (.25,3.5); \filldraw[primedregion=cyan!30] (0,3.5) rectangle (.1,4.4); \filldraw[primedregion=gray!75] (0,.2) rectangle (.4,1.5); \filldraw[primedregion=gray!75] (0,1.6) rectangle (.4,2.3); \filldraw[primedregion=gray!75] (.15,2.3) rectangle (.25,2.8); \end{scope} \draw[DarkGreen,thick] (-.4,.5) -- (0,.5); \draw[DarkGreen,thick] (-.4,1.8) -- (0,1.8); \draw[DarkGreen,thick] (-.4,3) -- (.15,3); \draw[DarkGreen,thick] (-.1,4.2) -- (0,4.2); \draw[cyan!90,thick] (0,.2) -- (.4,.2); \draw[cyan!90,thick] (0,1.5) -- (.4,1.5); \draw[cyan!90,thick] (0,1.6) -- (.4,1.6); \draw[cyan!90,thick] (.15,2.8) -- (.25,2.8); \draw[\XColor,thick] (-.4,0) -- (-.4,3.5); \draw[orange,thick] (0,0) -- (0,2); \draw[violet,thick] (.4,0) -- (.4,2); \draw[orange,thick] (.15,2) -- (.15,3.5); \draw[violet,thick] (.25,2) -- (.25,3.5); \draw[\XColor,thick] (-.1,3.5) -- (-.1,4.4); \draw[orange,thick] (0,3.5) -- (0,4.4); \draw[violet,thick] (.1,3.5) -- (.1,4.4); \roundNbox{unshaded}{(0,1)}{.3}{.35}{.35}{\scriptsize{$\alpha^\cD$}}; \roundNbox{unshaded}{(.2,2.3)}{.3}{.05}{.15}{\scriptsize{$F^2_{Y,Z}$}}; \roundNbox{unshaded}{(0,3.5)}{.3}{.35}{.35}{\tiny{$F^2_{X,Y\otimes_R Z}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.85,0) rectangle (.85,4.4); } = \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-1.7,0) rectangle (1.9,8); \filldraw[primedregion=white] (-1.7,0) rectangle (1.9,8); \filldraw[primedregion=green!30] (-1,0) rectangle (.4,6); \filldraw[primedregion=cyan!30] (0,0) rectangle (1,4); \filldraw[primedregion=cyan!30] (.6,4) rectangle (.4,6); \filldraw[primedregion=green!30] (-.2,6) rectangle (0,8); \filldraw[primedregion=cyan!30] (0,6) rectangle (.2,8); \end{scope} \draw[\XColor,thick] (-1,0) -- (-1,6); \draw[orange,thick] (0,0) -- (0,4); \draw[violet,thick] (1,0) -- (1,4); \draw[orange,thick] (.4,4) -- (.4,6); \draw[violet,thick] (.6,4) -- (.6,6); \draw[\XColor,thick] (-.2,6) -- (-.2,8); \draw[orange,thick] (0,6) -- (0,8); \draw[violet,thick] (.2,6) -- (.2,8); \roundNbox{unshaded}{(0,2)}{.6}{.75}{.95}{\normalsize{$\alpha^{{\sf QSys}(\cD)}$}}; \roundNbox{unshaded}{(.5,4)}{.6}{.45}{.45}{\small{${\sf QSys}(F)^2_{Y,Z}$}}; \roundNbox{unshaded}{(0,6)}{.6}{.75}{.95}{\small{${\sf QSys}(F)^2_{X,Y\otimes_R Z}$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.7,0) rectangle (1.9,8); } \\ \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners=5pt] (-1.4,-1.4) rectangle (1.4,2); \filldraw[primedregion=white] (-1.4,-1.4) rectangle (1.4,2); \filldraw[primedregion=green!30] (-.4,-1.4) rectangle (.4,0); \filldraw[primedregion=green!30] (.1,0) -- (.1,1.2) arc (0:90:.2cm) -- (-.1,0); \end{scope} \draw[\XColor,thick] (-.4,-1.4) -- (-.4,0); \draw[DarkGreen,thick] (.4,-1.4) -- (.4,0); \draw[\XColor,thick] (-.1,2) -- (-.1,0); \draw[DarkGreen,thick] (.1,0) -- (.1,1.2) arc (0:90:.2cm); \filldraw[\XColor] (-.1,1.4) circle (.06cm); \roundNbox{unshaded}{(0,0)}{.6}{.45}{.45}{\small{${\sf QSys}(F)^2_{X,Q}$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.4,-1.4) rectangle (1.4,2); } &= \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-.7) rectangle (.7,1); \filldraw[primedregion=white] (-.7,-.7) rectangle (.7,1); \filldraw[primedregion=gray!55] (-.05,0) -- (.05,0) -- (.05,.7) arc (0:90:.1cm); \filldraw[primedregion=green!30] (-.05,.5) rectangle (.05,.6); \filldraw[primedregion=gray!55] (-.2,0) rectangle (.2,-.5); \filldraw[primedregion=green!30] (-.2,-.7) rectangle (.2,-.5); \end{scope} \draw[DarkGreen,thick] (-.2,-.5) -- (.2,-.5); \draw[DarkGreen,thick] (-.05,.5) -- (.05,.5); \draw[DarkGreen,thick] (-.05,.6) -- (.05,.6); \draw[\XColor,thick] (-.2,-.7) -- (-.2,0); \draw[DarkGreen,thick] (.2,-.7) -- (.2,0); \draw[\XColor,thick] (-.05,1) -- (-.05,0); \draw[DarkGreen,thick] (.05,0) -- (.05,.7) arc (0:90:.1cm); \filldraw[\XColor] (-.05,.8) circle (.03cm); \roundNbox{unshaded}{(0,0)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Q}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-.7) rectangle (.7,1); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-.7) rectangle (.7,1); \filldraw[primedregion=white] (-.7,-.7) rectangle (.7,1); \filldraw[primedregion=gray!55] (-.05,0) -- (.05,0) -- (.05,.6) arc (0:90:.1cm); \filldraw[primedregion=green!30] (-.2,-.7) rectangle (.2,-.5); \filldraw[primedregion=gray!55] (-.2,0) rectangle (.2,-.5); \end{scope} \draw[DarkGreen,thick] (-.2,-.5) -- (.2,-.5); \draw[DarkGreen,thick] (-.05,.5) -- (.05,.5); \draw[\XColor,thick] (-.2,-.7) -- (-.2,0); \draw[DarkGreen,thick] (.2,-.7) -- (.2,0); \draw[\XColor,thick] (-.05,1) -- (-.05,0); \draw[DarkGreen,thick] (.05,0) -- (.05,.6) arc (0:90:.1cm); \filldraw[\XColor] (-.05,.7) circle (.03cm); \roundNbox{unshaded}{(0,0)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Q}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-.7) rectangle (.7,1); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,-.7) rectangle (.7,1); \filldraw[primedregion=white] (-.7,-.7) rectangle (.7,1); \filldraw[primedregion=gray!55] (-.05,0) -- (.05,0) -- (.05,.6) arc (0:90:.1cm); \filldraw[primedregion=green!30] (-.2,-.7) rectangle (.2,-.5); \filldraw[primedregion=gray!55] (-.2,0) rectangle (.2,-.5); \end{scope} \draw[DarkGreen,thick] (-.2,-.5) -- (.2,-.5); \draw[\XColor,thick] (-.2,-.7) -- (-.2,0); \draw[DarkGreen,thick] (.2,-.7) -- (.2,0); \draw[\XColor,thick] (-.05,1) -- (-.05,0); \draw[DarkGreen,thick] (.05,0) -- (.05,.6) arc (0:90:.1cm); \filldraw[\XColor] (-.05,.7) circle (.03cm); \roundNbox{unshaded}{(0,0)}{.3}{.2}{.2}{\scriptsize{$F^2_{X,Q}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,-.7) rectangle (.7,1); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.7,1.7); \filldraw[primedregion=white] (-.3,0) rectangle (.7,1.7); \filldraw[primedregion=gray!55] (0,.5) -- (.4,.5) -- (.4,.7) arc (0:90:.4cm); \filldraw[primedregion=green!30] (0,0) rectangle (.4,.5); \end{scope} \draw[DarkGreen,thick] (0,.5) -- (.4,.5); \draw[\XColor,thick] (0,0) -- (0,1.7); \draw[DarkGreen,thick] (.4,0) -- (.4,.7) arc (0:90:.4cm); \filldraw[\XColor] (0,1.1) circle (.05cm); \draw[thin, dotted, rounded corners = 5pt] (-.3,0) rectangle (.7,1.7); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.7,1.7); \filldraw[primedregion=white] (-.3,0) rectangle (.7,1.7); \filldraw[primedregion=green!30] (.4,0) -- (.4,.7) arc (0:90:.4cm) -- (0,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,1.7); \draw[DarkGreen,thick] (.4,0) -- (.4,.7) arc (0:90:.4cm); \filldraw[\XColor] (0,1.1) circle (.05cm); \draw[thin, dotted, rounded corners = 5pt] (-.3,0) rectangle (.7,1.7); }\,; \quad\text{similarly,}\quad \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners=5pt] (-1.4,-1.4) rectangle (1.4,2); \filldraw[primedregion=white] (-1.4,-1.4) rectangle (1.4,2); \filldraw[primedregion=green!30] (-.4,-1.4) rectangle (.4,0); \filldraw[primedregion=green!30] (-.1,0) -- (-.1,1.2) arc (180:90:.2cm) -- (.1,0); \end{scope} \draw[DarkGreen,thick] (-.4,-1.4) -- (-.4,0); \draw[orange,thick] (.4,-1.4) -- (.4,0); \draw[DarkGreen,thick] (-.1,0) -- (-.1,1.2) arc (180:90:.2cm); \draw[orange,thick] (.1,0) -- (.1,2); \filldraw[orange] (.1,1.4) circle (.06cm); \roundNbox{unshaded}{(0,0)}{.6}{.45}{.45}{\small{${\sf QSys}(F)^2_{Q,Y}$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.4,-1.4) rectangle (1.4,2); } = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.7,1.7); \filldraw[primedregion=white] (-.3,0) rectangle (.7,1.7); \filldraw[primedregion=green!30] (0,0) -- (0,.7) arc (180:90:.4cm) -- (.4,0); \end{scope} \draw[DarkGreen,thick] (0,0) -- (0,.7) arc (180:90:.4cm); \draw[orange,thick] (.4,0) -- (.4,1.7); \filldraw[orange] (.4,1.1) circle (.05cm); \draw[thin, dotted, rounded corners = 5pt] (-.3,0) rectangle (.7,1.7); }\,. \end{align*} \end{construction} For the rest of this section, we fix two $\dag$ 2-functors $F,G: \cC \to \cD$. \begin{rem} Suppose $F,G: \cC\to \cD$ are two $\dag$ 2-functors and $\varphi: F\Rightarrow G$ is a $\dag$ 2-transformation. For any dualizable 1-cell $X\in\cC(a\to b)$, we have \[ \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-1.3,-1.6) rectangle (1.3,1); \filldraw[primedregion=gray!55] (-.9,-1.6) .. controls ++(90:.3cm) and ++(270:.3cm) .. (-.3,-1) -- (-.3,-.3) .. controls ++(90:.3cm) and ++(270:.3cm) .. (.3,.3) -- (.3,1) -- (-1.3,1) -- (-1.3,-1.6); \filldraw[primedregion=gray!30] (-.9,-1.6) .. controls ++(90:.2cm) and ++(-135:.1cm) .. (-.6,-1.3) .. controls ++(135:.1cm) and ++(270:.2cm) .. (-.9,-1) -- (-.9,.3) arc (180:0:.3cm) .. controls ++(270:.2cm) and ++(135:.1cm) .. (0,0) .. controls ++(45:.1cm) and ++(270:.2cm) .. (.3,.3) -- (.3,1) -- (-1.3,1) -- (-1.3,-1.6); \filldraw[boxregion=gray!55] (-.9,-1.6) .. controls ++(90:.3cm) and ++(270:.3cm) .. (-.3,-1) -- (-.3,-.3) .. controls ++(90:.3cm) and ++(270:.3cm) .. (.3,.3) -- (.3,1) -- (1.3,1) -- (1.3,-1.6); \filldraw[boxregion=gray!30] (-.9,-1.6) .. controls ++(90:.2cm) and ++(-135:.1cm) .. (-.6,-1.3) .. controls ++(-45:.1cm) and ++(90:.2cm) .. (-.3,-1.6); \filldraw[boxregion=gray!30] (.9,1) -- (.9,-.3) arc (0:-180:.3cm) .. controls ++(90:.2cm) and ++(-45:.1cm) .. (0,0) .. controls ++(45:.1cm) and ++(270:.2cm) .. (.3,.3) -- (.3,1); \end{scope} \draw[dashed] (-1.3,-1) -- (1.3,-1); \draw[black,thick] (-.9,-1.6) .. controls ++(90:.3cm) and ++(270:.3cm) .. (-.3,-1) -- (-.3,-.3) .. controls ++(90:.3cm) and ++(270:.3cm) .. (.3,.3) -- (.3,1); \draw[\XColor,thick] (.9,1) -- (.9,-.3) arc (0:-180:.3cm) .. controls ++(90:.3cm) and ++(270:.3cm) .. (-.3,.3) arc (0:180:.3cm) -- (-.9,-1); \draw[\XColor,thick] (-.3,-1.6) .. controls ++(90:.3cm) and ++(270:.3cm) .. (-.9,-1); \filldraw[white] (0,0) circle (.07cm); \draw[thick] (0,0) circle (.07cm); \filldraw[white] (-.6,-1.3) circle (.07cm); \draw[thick] (-.6,-1.3) circle (.07cm); \node at (.9,1.2) {\tiny{$G(X)$}}; \node at (.3,1.2) {\scriptsize{$\varphi_a$}}; \node at (-.3,-1.8) {\tiny{$G(X)$}}; \node at (-.9,-1.8) {\scriptsize{$\varphi_a$}}; \node at (-.45,0) {\scriptsize{$\varphi_X^\dag$}}; \node at (-1.05,-1.3) {\scriptsize{$\varphi_X^\dag$}}; } = \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.8,-.9) rectangle (1.5,.9); \filldraw[primedregion=gray!30] (-.8,-.9) rectangle (-.4,.9); \filldraw[boxregion=gray!30] (0,-.9) -- (0,0) arc (180:0:.3cm) arc (-180:0:.3cm) -- (1.2,.9) -- (-.4,.9) -- (-.4,-.9); \filldraw[boxregion=gray!55] (0,-.9) -- (0,0) arc (180:0:.3cm) arc (-180:0:.3cm) -- (1.2,.9) -- (1.5,.9) -- (1.5,-.9); \end{scope} \draw[black,thick] (-.4,-.9) -- (-.4,.9); \draw[\XColor,thick] (0,-.9) -- (0,0) arc (180:0:.3cm) arc (-180:0:.3cm) -- (1.2,.9); \node at (-.4,-1.1) {\scriptsize{$\varphi_a$}}; \node at (1.2,1.1) {\tiny{$G(X)$}}; } = \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.6,-.9) rectangle (.6,.9); \filldraw[primedregion=gray!30] (-.6,-.9) rectangle (-.2,.9); \filldraw[boxregion=gray!30] (-.2,-.9) rectangle (.2,.9); \filldraw[boxregion=gray!55] (.2,-.9) rectangle (.6,.9); \end{scope} \draw[black,thick] (-.2,-.9) -- (-.2,.9); \draw[\XColor,thick] (.2,-.9) -- (.2,.9); } \qquad \Longrightarrow \qquad \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-1.3,-1) rectangle (1.3,1); \filldraw[primedregion=gray!55] (-.3,-1) -- (-.3,-.3) .. controls ++(90:.3cm) and ++(270:.3cm) .. (.3,.3) -- (.3,1) -- (-1.3,1) -- (-1.3,-1); \filldraw[primedregion=gray!30] (-.9,-1) -- (-.9,.3) arc (180:0:.3cm) .. controls ++(270:.2cm) and ++(135:.1cm) .. (0,0) .. controls ++(45:.1cm) and ++(270:.2cm) .. (.3,.3) -- (.3,1) -- (-1.3,1) -- (-1.3,-1); \filldraw[boxregion=gray!55] (-.3,-1) -- (-.3,-.3) .. controls ++(90:.3cm) and ++(270:.3cm) .. (.3,.3) -- (.3,1) -- (1.3,1) -- (1.3,-1); \filldraw[boxregion=gray!30] (.9,1) -- (.9,-.3) arc (0:-180:.3cm) .. controls ++(90:.2cm) and ++(-45:.1cm) .. (0,0) .. controls ++(45:.1cm) and ++(270:.2cm) .. (.3,.3) -- (.3,1); \end{scope} \draw[black,thick] (-.3,-1) -- (-.3,-.3) .. controls ++(90:.3cm) and ++(270:.3cm) .. (.3,.3) -- (.3,1); \draw[\XColor,thick] (.9,1) -- (.9,-.3) arc (0:-180:.3cm) .. controls ++(90:.3cm) and ++(270:.3cm) .. (-.3,.3) arc (0:180:.3cm) -- (-.9,-1); \filldraw[white] (0,0) circle (.07cm); \draw[thick] (0,0) circle (.07cm); \node at (.9,1.2) {\scriptsize{$G(X)$}}; \node at (.3,1.2) {\scriptsize{$\varphi_a$}}; \node at (-.3,-1.2) {\scriptsize{$\varphi_b$}}; \node at (-.9,-1.2) {\scriptsize{$F(X)$}}; \node at (-.45,0) {\scriptsize{$\varphi_X^\dag$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,2.4); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.4); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.4); \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.3) {\large{$F(X)$}}; \node at (1.2,2.7) {\large{$G(X)$}}; \node at (1.2,-.3) {\large{$\varphi_b$}}; \node at (0,2.7) {\large{$\varphi_a$}}; }\,. \] \end{rem} \begin{construction} \label{construction:QSys(phi)} Given a $\dag$-transformation $\varphi : F\Rightarrow G$, we define a $\dag$-transformation ${\sf QSys}(\varphi):{\sf QSys}(F)\Rightarrow {\sf QSys}(G)$. In the diagrams below, we suppress all coherence isomorphisms for $F$ and $G$. For a Q-system $({}_bQ_b,m,i)\in{\sf QSys}(\cC)$, we define ${\sf QSys}(\varphi)_Q$ by orthogonally splitting the orthgonal projection which appears as the second diagram below. The other diagrams in the next two rows prove that this second diagram is an orthogonal projection. \[ \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-1.2,-1.2) rectangle (1.2,1.2); \filldraw[primedregion=green!30] (-1.2,-1.2) rectangle (0,1.2); \filldraw[boxregion=green!30] (1.2,1.2) rectangle (0,-1.2); \filldraw[green!30] (-1,-.2) rectangle (-.2,.2); \filldraw[green!30] (.2,-.2) rectangle (1,.2); \end{scope} \draw[black,thick] (0,-1.2) -- (0,1.2); \node at (.6,0) {\scriptsize{$G(Q)$}}; \node at (-.6,0) {\scriptsize{$F(Q)$}}; \node at (0,-1.4) {\scriptsize{${\sf QSys}(\varphi)_Q$}}; } := \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.4,-1.2) rectangle (2.4,1.2); \filldraw[primedregion=gray!55] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2) -- (-2.4,1.2) -- (-2.4,-1.2); \filldraw[boxregion=gray!55] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2) -- (2.4,1.2) -- (2.4,-1.2); \filldraw[primedregion=green!30] (-.6,-1.2) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2) -- (-2.4,1.2) -- (-2.4,-1.2); \filldraw[boxregion=green!30] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2) -- (2.4,1.2) -- (2.4,-1.2); \filldraw[gray!55] (-.6,-.2) rectangle (-.2,.2); \end{scope} \draw[black,thick] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2); \draw[DarkGreen,thick] (-.6,-1.2) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2); \draw[DarkGreen,thick] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,.6); \filldraw[DarkGreen] (.6,.6) circle (.05cm); \filldraw[DarkGreen] (-.6,-.6) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \node at (-.6,-1.4) {\scriptsize{$F(Q)$}}; \node at (.6,-1.4) {\scriptsize{$\varphi_b$}}; \node at (1.8,-1.4) {\scriptsize{$G(Q)$}}; \node at (-.4,0) {\scriptsize{$\varphi_Q$}}; } = \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-4,-2) rectangle (4,2); \filldraw[primedregion=gray!55] (-.6,-2) -- (-.6,-.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.6,.6) -- (.6,2) -- (-4,2) -- (-4,-2); \filldraw[boxregion=gray!55] (-.6,-2) -- (-.6,-.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.6,.6) -- (.6,2) -- (4,2) -- (4,-2); \filldraw[primedregion=green!30] (-2.4,-2) -- (-2.4,-1.2) arc (-90:-180:.6cm) -- (-3,2) -- (-4,2) -- (-4,-2); \filldraw[boxregion=green!30] (2.4,2) -- (2.4,1.2) arc (90:0:.6cm) -- (3,-2) -- (4,-2) -- (4,2); \end{scope} \draw[black,thick] (-.6,-2) -- (-.6,-.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.6,.6) -- (.6,2); \draw[DarkGreen,thick] (3,-2) -- (3,.6) arc (0:180:.6cm) -- (1.8,-.6) arc (0:-180:.6cm) .. controls ++(90:.6cm) and ++(270:.6cm) .. (-.6,.6) arc (0:180:.6cm) -- (-1.8,-.6) arc (0:-180:.6cm) -- (-3,2); \draw[DarkGreen,thick] (2.4,1.2) -- (2.4,2); \draw[DarkGreen,thick] (-2.4,-1.2) -- (-2.4,-2); \draw[DarkGreen,thick] (1.2,-1.2) -- (1.2,-1.6); \draw[DarkGreen,thick] (-1.2,1.2) -- (-1.2,1.6); \filldraw[DarkGreen] (2.4,1.2) circle (.07cm); \filldraw[DarkGreen] (-2.4,-1.2) circle (.07cm); \filldraw[DarkGreen] (1.2,-1.2) circle (.07cm); \filldraw[DarkGreen] (1.2,-1.6) circle (.07cm); \filldraw[DarkGreen] (-1.2,1.2) circle (.07cm); \filldraw[DarkGreen] (-1.2,1.6) circle (.07cm); \filldraw[white] (0,0) circle (.12cm); \draw[thick] (0,0) circle (.12cm); } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.4,-1.2) rectangle (2.4,1.2); \filldraw[primedregion=gray!55] (-.6,-1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (0,0) .. controls ++(45:.2cm) and ++(270:.4cm) .. (.6,1.2) -- (-2.4,1.2) -- (-2.4,-1.2); \filldraw[boxregion=gray!55] (-.6,-1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (0,0) .. controls ++(45:.2cm) and ++(270:.4cm) .. (.6,1.2) -- (2.4,1.2) -- (2.4,-1.2); \filldraw[primedregion=green!30] (-1.8,-1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-1.2,0) -- (-.6,.6) -- (-.6,1.2) -- (-2.4,1.2) -- (-2.4,-1.2); \filldraw[boxregion=green!30] (.6,-1.2) -- (.6,-.6) -- (1.2,0) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.8,1.2) -- (2.4,1.2) -- (2.4,-1.2); \filldraw[gray!55] (-.6,-.2) rectangle (-.2,.2); \end{scope} \draw[black,thick] (-.6,-1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (0,0) .. controls ++(45:.2cm) and ++(270:.4cm) .. (.6,1.2); \draw[DarkGreen,thick] (.6,-1.2) -- (.6,-.6) -- (1.2,0) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.8,1.2); \draw[DarkGreen,thick] (-1.8,-1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-1.2,0) -- (-.6,.6) -- (-.6,1.2); \draw[DarkGreen,thick] (.6,-.6) -- (-.6,.6); \filldraw[DarkGreen] (.6,-.6) circle (.05cm); \filldraw[DarkGreen] (-.6,.6) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \node at (-1.8,-1.4) {\scriptsize{$F(Q)$}}; \node at (-.6,-1.4) {\scriptsize{$\varphi_b$}}; \node at (.6,-1.4) {\scriptsize{$G(Q)$}}; \node at (-.4,0) {\scriptsize{$\varphi_Q^\dag$}}; } \] \[ \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-3,-1.8) rectangle (3,1.8); \filldraw[primedregion=gray!55] (1.2,-1.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,-.6) -- (-.6,.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.2,1.8) -- (-3,1.8) -- (-3,-1.8); \filldraw[boxregion=gray!55] (1.2,-1.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,-.6) -- (-.6,.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.2,1.8) -- (3,1.8) -- (3,-1.8); \filldraw[primedregion=green!30] (0,-1.8) -- (0,-1.2) -- (-1.8,.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-2.4,1.8) -- (-3,1.8) -- (-3,-1.8); \filldraw[boxregion=green!30] (2.4,-1.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,-.6) -- (0,1.2) -- (0,1.8) -- (3,1.8) -- (3,-1.8); \end{scope} \draw[black,thick] (1.2,-1.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,-.6) -- (-.6,.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.2,1.8); \draw[DarkGreen,thick] (0,-1.8) -- (0,-1.2) -- (-1.8,.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-2.4,1.8); \draw[DarkGreen,thick] (2.4,-1.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,-.6) -- (0,1.2) -- (0,1.8); \draw[DarkGreen,thick] (0,-1.2) -- (1.2,0); \draw[DarkGreen,thick] (0,1.2) -- (-1.2,0); \filldraw[DarkGreen] (0,-1.2) circle (.05cm); \filldraw[DarkGreen] (-1.2,0) circle (.05cm); \filldraw[DarkGreen] (0,1.2) circle (.05cm); \filldraw[DarkGreen] (1.2,0) circle (.05cm); \filldraw[white] (.6,-.6) circle (.1cm); \draw[thick] (.6,-.6) circle (.1cm); \filldraw[white] (-.6,.6) circle (.1cm); \draw[thick] (-.6,.6) circle (.1cm); \node at (0,-2) {\scriptsize{$F(Q)$}}; \node at (1.2,-2) {\scriptsize{$\varphi_b$}}; \node at (2.4,-2) {\scriptsize{$G(Q)$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.4,-1.8) rectangle (2.4,1.8); \filldraw[primedregion=gray!55] (1.2,-1.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,-.6) -- (-.6,.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.2,1.8) -- (-1.8,1.8) -- (-1.8,-1.8); \filldraw[boxregion=gray!55] (1.2,-1.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,-.6) -- (-.6,.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.2,1.8) -- (1.8,1.8) -- (1.8,-1.8); \filldraw[primedregion=green!30] (-1.8,1.8) -- (-1.8,-1.8) -- (-2.4,-1.8) -- (-2.4,1.8); \filldraw[boxregion=green!30] (1.8,-1.8) -- (1.8,1.8) -- (2.4,1.8) -- (2.4,-1.8); \end{scope} \coordinate (a) at (.8,.8); \coordinate (b) at (-.8,-.8); \draw[black,thick] (1.2,-1.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,-.6) -- (-.6,.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.2,1.8); \draw[DarkGreen,thick] (1.8,-1.8) -- (1.8,1.8); \draw[DarkGreen,thick] (-1.8,-1.8) -- (-1.8,1.8); \draw[DarkGreen,thick] (-.4,.4) .. controls ++(45:.4cm) and ++(-135:.4cm) .. (a); \draw[DarkGreen,thick] (.4,-.4) .. controls ++(45:.4cm) and ++(-45:.4cm) .. (a); \draw[DarkGreen,thick] (a) .. controls ++(90:.3cm) and ++(-135:.3cm) .. (1.8,1.5); \draw[DarkGreen,thick] (-.4,.4) .. controls ++(-135:.4cm) and ++(135:.4cm) .. (b); \draw[DarkGreen,thick] (.4,-.4) .. controls ++(-135:.4cm) and ++(45:.4cm) .. (b); \draw[DarkGreen,thick] (b) .. controls ++(270:.3cm) and ++(45:.3cm) .. (-1.8,-1.5); \filldraw[DarkGreen] (a) circle (.05cm); \filldraw[DarkGreen] (b) circle (.05cm); \filldraw[DarkGreen] (-1.8,-1.5) circle (.05cm); \filldraw[DarkGreen] (1.8,1.5) circle (.05cm); \filldraw[fill=white, thick] (.4,-.4) circle (.1cm); \filldraw[fill=white, thick] (-.4,.4) circle (.1cm); \node at (-1.8,-2) {\scriptsize{$F(Q)$}}; \node at (1.2,-2) {\scriptsize{$\varphi_b$}}; \node at (1.8,-2) {\scriptsize{$G(Q)$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-1.8,-1.5) rectangle (1.8,2.1); \filldraw[primedregion=gray!55] (.6,-1.5) -- (.6,-.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (-.6,.6) -- (-.6,2.1) -- (-1.2,2.1) -- (-1.2,-1.5); \filldraw[boxregion=gray!55] (.6,-1.5) -- (.6,-.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (-.6,.6) -- (-.6,2.1) -- (1.2,2.1) -- (1.2,-1.5); \filldraw[primedregion=green!30] (-1.8,2.1) -- (-1.8,-1.5) -- (-1.2,-1.5) -- (-1.2,2.1); \filldraw[boxregion=green!30] (1.8,2.1) -- (1.8,-1.5) -- (1.2,-1.5) -- (1.2,2.1); \end{scope} \draw[black,thick] (.6,-1.5) -- (.6,-.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (-.6,.6) -- (-.6,2.1); \draw[DarkGreen,thick] (-1.2,-1.2) arc (-90:0:.6cm) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.6,.6); \draw[DarkGreen,thick] (.6,1.2) arc (90:450:.3cm) arc (180:90:.6cm); \draw[DarkGreen,thick] (-1.2,-1.5) -- (-1.2,2.1); \draw[DarkGreen,thick] (1.2,-1.5) -- (1.2,2.1); \filldraw[DarkGreen] (.6,.6) circle (.05cm); \filldraw[DarkGreen] (.6,1.2) circle (.05cm); \filldraw[DarkGreen] (1.2,1.8) circle (.05cm); \filldraw[DarkGreen] (-1.2,-1.2) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \node at (-1.2,-1.7) {\scriptsize{$F(Q)$}}; \node at (.6,-1.7) {\scriptsize{$\varphi_b$}}; \node at (1.2,-1.7) {\scriptsize{$G(Q)$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.4,-1.2) rectangle (2.4,1.2); \filldraw[primedregion=gray!55] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2) -- (-2.4,1.2) -- (-2.4,-1.2); \filldraw[boxregion=gray!55] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2) -- (2.4,1.2) -- (2.4,-1.2); \filldraw[primedregion=green!30] (-.6,-1.2) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2) -- (-2.4,1.2) -- (-2.4,-1.2); \filldraw[boxregion=green!30] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2) -- (2.4,1.2) -- (2.4,-1.2); \end{scope} \draw[black,thick] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2); \draw[DarkGreen,thick] (-.6,-1.2) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2); \draw[DarkGreen,thick] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,.6); \filldraw[DarkGreen] (.6,.6) circle (.05cm); \filldraw[DarkGreen] (-.6,-.6) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \node at (-.6,-1.4) {\scriptsize{$F(Q)$}}; \node at (.6,-1.4) {\scriptsize{$\varphi_b$}}; \node at (1.8,-1.4) {\scriptsize{$G(Q)$}}; } \] For a 1-cell $({}_P X_Q,\lambda,\rho)$, we define ${\sf QSys}(\varphi)_X: F(X)\otimes_{F(Q)} {\sf QSys}(\varphi)_Q \Rightarrow {\sf QSys}(\varphi)_P\otimes_{G(P)}G(X)$ by \[ {\sf QSys}(\varphi)_X= \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,2.4); \filldraw[primedregion=\PrColor] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=green!30] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=\PrColor] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=green!30] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.4); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.4); \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,2.6) {\scriptsize{$G(X)$}}; \node at (1.2,-.2) {\scriptsize{${\sf QSys}(\varphi)_Q$}}; \node at (0,2.6) {\scriptsize{${\sf QSys}(\varphi)_P$}}; } := \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-3.6,-2.4) rectangle (3.6,2.4); \filldraw[primedregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-3,2.4) -- (-3,-.6) -- (-.6,-.6); \filldraw[boxregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-.6,2.4) -- (-.6,1.8) -- (.6,.6); \filldraw[primedregion=gray!55] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (-.6,-.6) -- (.6,-1.8) -- (.6,-2.4); \filldraw[boxregion=gray!55] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (.6,.6) -- (3,.6) -- (3,-2.4); \filldraw[primedregion=\PrColor] (-.6,-2.4) -- (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4) -- (-3.6,2.4) -- (-3.6,-2.4); \filldraw[boxregion=\PrColor] (-.6,2.4) -- (-.6,1.8) -- (.6,.6) -- (.6,2.4); \filldraw[primedregion=green!30] (.6,-2.4) -- (.6,-1.8) -- (-.6,-.6) -- (-.6,-2.4); \filldraw[boxregion=green!30] (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6) -- (.6,2.4) -- (3.6,2.4) -- (3.6,-2.4); \filldraw[gray!30] (-1.6,1.2) circle (.22cm); \filldraw[gray!30] (-.4,0) circle (.22cm); \filldraw[gray!55] (.8,-1.2) circle (.22cm); \end{scope} \draw[black,thick] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4); \draw[\XColor,thick] (-.6,-2.4) -- (-.6,-.6) -- (.6,.6) -- (.6,2.4); \draw[\PsColor,thick] (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4); \draw[\PsColor,thick] (.6,.6) -- (-.6,1.8) -- (-.6,2.4); \draw[\PsColor,thick] (-1.8,.6) -- (-.6,1.8); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,-1.8) -- (.6,-2.4); \draw[DarkGreen,thick] (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6); \draw[DarkGreen,thick] (1.8,-.6) -- (.6,-1.8); \filldraw[\XColor] (-.6,-.6) circle (.05cm); \filldraw[\XColor] (.6,.6) circle (.05cm); \filldraw[\PsColor] (-1.8,.6) circle (.05cm); \filldraw[\PsColor] (-.6,1.8) circle (.05cm); \filldraw[DarkGreen] (1.8,-.6) circle (.05cm); \filldraw[DarkGreen] (.6,-1.8) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \filldraw[white] (-1.2,1.2) circle (.1cm); \draw[thick] (-1.2,1.2) circle (.1cm); \filldraw[white] (1.2,-1.2) circle (.1cm); \draw[thick] (1.2,-1.2) circle (.1cm); \node at (-.6,-2.6) {\scriptsize{$F(X)$}}; \node at (.6,-2.6) {\scriptsize{$F(Q)$}}; \node at (1.8,-2.6) {\scriptsize{$\varphi_b$}}; \node at (3,-2.6) {\scriptsize{$G(Q)$}}; \node at (.6,2.6) {\scriptsize{$G(X)$}}; \node at (-.6,2.6) {\scriptsize{$G(P)$}}; \node at (-1.8,2.6) {\scriptsize{$\varphi_a$}}; \node at (-3,2.6) {\scriptsize{$F(P)$}}; \node at (-.4,0) {\scriptsize{$\varphi_X$}}; \node at (-1.6,1.2) {\scriptsize{$\varphi_P$}}; \node at (.8,-1.2) {\scriptsize{$\varphi_Q$}}; } \] To see that ${\sf QSys}(\varphi)_X$ is unitary, we observe \begin{align*} {\sf QSys}(\varphi)_X^\dag\star{\sf QSys}(\varphi)_X &= \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,4.2); \filldraw[primedregion=green!30] (-.6,0) rectangle (1.8,4.2); \filldraw[boxregion=\PrColor] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[primedregion=\PrColor] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,2.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,3) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,3.6) -- (0,4.2) -- (-.6,4.2) -- (-.6,0); \filldraw[boxregion=green!30] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,3) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.6) -- (1.2,4.2) -- (1.8,4.2) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,3.6) -- (0,4.2); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,3.6) -- (1.2,4.2); \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \filldraw[white] (.6,3) circle (.1cm); \draw[thick] (.6,3) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (0,4.4) {\scriptsize{$F(X)$}}; \node at (1.2,-.2) {\scriptsize{${\sf QSys}(\varphi)_Q$}}; \node at (1.2,4.4) {\scriptsize{${\sf QSys}(\varphi)_Q$}}; } = \tikzmath[scale=.45, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-3.6,-4.8) rectangle (3.6,4.8); \filldraw[primedregion=gray!30] (-3.6,-4.8) rectangle (3.6,4.8); \filldraw[boxregion=gray!30] (.6,1.8) -- (.6,-1.8) -- (0,-2.4) -- (-1.2,-1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,-.2) -- (-1.8,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-1.2,1.2) -- (0,2.4); \filldraw[primedregion=gray!55] (1.8,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-3.6) -- (0,-2.4) -- (-.6,-3) -- (-.6,-4.8); \filldraw[primedregion=gray!55] (-.6,4.8) -- (-.6,3) -- (0,2.4) -- (1.2,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.8,4.8); \filldraw[boxregion=gray!55] (1.8,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-3.6) -- (0,-2.4) -- (.6,-1.8) -- (.6,1.8) -- (0,2.4) -- (1.2,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.8,4.8) -- (3.6,4.8) -- (3.6,-4.8); \filldraw[boxregion=\PrColor] (.6,-1.8) -- (-.6,-.6) -- (-.6,.6) -- (.6,1.8); \filldraw[primedregion=green!30] (-.6,3) -- (.6,4.2) -- (.6,4.8) -- (-.6,4.8); \filldraw[primedregion=green!30] (-.6,-3) -- (.6,-4.2) -- (.6,-4.8) -- (-.6,-4.8); \filldraw[primedregion=\PrColor] (-.6,-4.8) -- (-.6,-3) -- (-2.4,-1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,-.2) -- (-3,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-2.4,1.2) -- (-.6,3) -- (-.6,4.8) -- (-3.6,4.8) -- (-3.6,-4.8); \filldraw[boxregion=green!30] (3,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-3.6) -- (.6,-1.8) -- (.6,1.8) -- (2.4,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (3,4.8) -- (3.6,4.8) -- (3.6,-4.8); \end{scope} \draw[black,thick] (1.8,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-3.6) -- (-1.2,-1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,-.2) -- (-1.8,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-1.2,1.2) -- (1.2,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.8,4.8); \draw[\XColor,thick] (-.6,-4.8) -- (-.6,-3) -- (.6,-1.8) -- (.6,1.8) -- (-.6,3) -- (-.6,4.8); \draw[\PsColor,thick] (-.6,-3) -- (-2.4,-1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,-.2) -- (-3,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-2.4,1.2) -- (-.6,3); \draw[\PsColor,thick] (.6,-1.8) -- (-.6,-.6) -- (-.6,.6) -- (.6,1.8); \draw[\PsColor,thick] (-1.8,-1.8) -- (-.6,-.6); \draw[\PsColor,thick] (-1.8,1.8) -- (-.6,.6); \draw[DarkGreen,thick] (-.6,-3) -- (.6,-4.2) -- (.6,-4.8); \draw[DarkGreen,thick] (3,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-3.6) -- (.6,-1.8); \draw[DarkGreen,thick] (1.8,-3) -- (.6,-4.2); \draw[DarkGreen,thick] (-.6,3) -- (.6,4.2) -- (.6,4.8); \draw[DarkGreen,thick] (.6,1.8) -- (2.4,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (3,4.8); \draw[DarkGreen,thick] (1.8,3) -- (.6,4.2); \filldraw[\XColor] (-.6,-3) circle (.07cm); \filldraw[\XColor] (.6,-1.8) circle (.07cm); \filldraw[\XColor] (-.6,3) circle (.07cm); \filldraw[\XColor] (.6,1.8) circle (.07cm); \filldraw[\PsColor] (-1.8,-1.8) circle (.07cm); \filldraw[\PsColor] (-.6,-.6) circle (.07cm); \filldraw[\PsColor] (-1.8,1.8) circle (.07cm); \filldraw[\PsColor] (-.6,.6) circle (.07cm); \filldraw[DarkGreen] (1.8,3) circle (.07cm); \filldraw[DarkGreen] (.6,4.2) circle (.07cm); \filldraw[DarkGreen] (1.8,-3) circle (.07cm); \filldraw[DarkGreen] (.6,-4.2) circle (.07cm); \filldraw[white] (0,-2.4) circle (.1cm); \draw[thick] (0,-2.4) circle (.1cm); \filldraw[white] (0,2.4) circle (.1cm); \draw[thick] (0,2.4) circle (.1cm); \filldraw[white] (-1.2,-1.2) circle (.1cm); \draw[thick] (-1.2,-1.2) circle (.1cm); \filldraw[white] (-1.2,1.2) circle (.1cm); \draw[thick] (-1.2,1.2) circle (.1cm); \filldraw[white] (1.2,-3.6) circle (.1cm); \draw[thick] (1.2,-3.6) circle (.1cm); \filldraw[white] (1.2,3.6) circle (.1cm); \draw[thick] (1.2,3.6) circle (.1cm); } = \tikzmath[scale=.45, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-3.6,-4.8) rectangle (3.6,4.8); \filldraw[primedregion=gray!30] (-3.6,-4.8) rectangle (3.6,4.8); \filldraw[boxregion=gray!30] (.6,1.8) -- (.6,-1.8) -- (0,-2.4) -- (-1.2,-1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,-.2) -- (-1.8,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-1.2,1.2) -- (0,2.4); \filldraw[primedregion=gray!55] (1.8,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-3.6) -- (0,-2.4) -- (-.6,-3) -- (-.6,-4.8); \filldraw[primedregion=gray!55] (-.6,4.8) -- (-.6,3) -- (0,2.4) -- (1.2,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.8,4.8); \filldraw[boxregion=gray!55] (1.8,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-3.6) -- (0,-2.4) -- (.6,-1.8) -- (.6,1.8) -- (0,2.4) -- (1.2,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.8,4.8) -- (3.6,4.8) -- (3.6,-4.8); \filldraw[primedregion=green!30] (-.6,3) -- (.6,4.2) -- (.6,4.8) -- (-.6,4.8); \filldraw[primedregion=green!30] (-.6,-3) -- (.6,-4.2) -- (.6,-4.8) -- (-.6,-4.8); \filldraw[primedregion=\PrColor] (-.6,-4.8) -- (-.6,-3) -- (-2.4,-1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,-.2) -- (-3,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-2.4,1.2) -- (-.6,3) -- (-.6,4.8) -- (-3.6,4.8) -- (-3.6,-4.8); \filldraw[boxregion=green!30] (3,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-3.6) -- (1.8,-3) -- (1.8,3) -- (2.4,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (3,4.8) -- (3.6,4.8) -- (3.6,-4.8); \end{scope} \draw[black,thick] (1.8,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-3.6) -- (-1.2,-1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,-.2) -- (-1.8,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-1.2,1.2) -- (1.2,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.8,4.8); \draw[\XColor,thick] (-.6,-4.8) -- (-.6,-3) -- (.6,-1.8) -- (.6,1.8) -- (-.6,3) -- (-.6,4.8); \draw[\PsColor,thick] (-.6,-3) -- (-2.4,-1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,-.2) -- (-3,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-2.4,1.2) -- (-.6,3); \draw[DarkGreen,thick] (-.6,-3) -- (.6,-4.2) -- (.6,-4.8); \draw[DarkGreen,thick] (3,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-3.6) -- (1.8,-3) -- (1.8,3); \draw[DarkGreen,thick] (1.8,-3) -- (.6,-4.2); \draw[DarkGreen,thick] (-.6,3) -- (.6,4.2) -- (.6,4.8); \draw[DarkGreen,thick] (1.8,3) -- (2.4,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (3,4.8); \draw[DarkGreen,thick] (1.8,3) -- (.6,4.2); \filldraw[\XColor] (-.6,-3) circle (.07cm); \filldraw[\XColor] (-.6,3) circle (.07cm); \filldraw[DarkGreen] (1.8,3) circle (.07cm); \filldraw[DarkGreen] (.6,4.2) circle (.07cm); \filldraw[DarkGreen] (1.8,-3) circle (.07cm); \filldraw[DarkGreen] (.6,-4.2) circle (.07cm); \filldraw[white] (0,-2.4) circle (.1cm); \draw[thick] (0,-2.4) circle (.1cm); \filldraw[white] (0,2.4) circle (.1cm); \draw[thick] (0,2.4) circle (.1cm); \filldraw[white] (1.2,-3.6) circle (.1cm); \draw[thick] (1.2,-3.6) circle (.1cm); \filldraw[white] (1.2,3.6) circle (.1cm); \draw[thick] (1.2,3.6) circle (.1cm); } \displaybreak[1]\\ &= \tikzmath[scale=.45, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-3,-4.8) rectangle (3.6,4.8); \filldraw[primedregion=gray!55] (1.8,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-3.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (.6,-2.4) -- (.6,2.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.2,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.8,4.8) -- (-.6,4.8) -- (-.6,-4.8); \filldraw[boxregion=gray!55] (1.8,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-3.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (.6,-2.4) -- (.6,2.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.2,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.8,4.8) -- (3.6,4.8) -- (3.6,-4.8); \filldraw[primedregion=green!30] (.6,-4.8) -- (.6,-4.2) -- (0,-3.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,-2.4) -- (-.6,2.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (0,3.6) -- (.6,4.2) -- (.6,4.8) -- (-1.8,4.8) -- (-1.8,-4.8); \filldraw[primedregion=\PrColor] (-3,-4.8) rectangle (-1.8,4.8); \filldraw[boxregion=green!30] (3,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-3.6) -- (1.8,-3) -- (1.8,3) -- (2.4,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (3,4.8) -- (3.6,4.8) -- (3.6,-4.8); \end{scope} \draw[black,thick] (1.8,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-3.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (.6,-2.4) -- (.6,2.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.2,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.8,4.8); \draw[\XColor,thick] (-1.8,-4.8) -- (-1.8,4.8); \draw[DarkGreen,thick] (.6,-4.8) -- (.6,-4.2) -- (0,-3.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,-2.4) -- (-.6,2.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (0,3.6) -- (.6,4.2) -- (.6,4.8); \draw[DarkGreen,thick] (3,-4.8) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-3.6) -- (1.8,-3) -- (1.8,3); \draw[DarkGreen,thick] (1.8,-3) -- (.6,-4.2); \draw[DarkGreen,thick] (1.8,3) -- (2.4,3.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (3,4.8); \draw[DarkGreen,thick] (1.8,3) -- (.6,4.2); \filldraw[DarkGreen] (1.8,3) circle (.07cm); \filldraw[DarkGreen] (.6,4.2) circle (.07cm); \filldraw[DarkGreen] (1.8,-3) circle (.07cm); \filldraw[DarkGreen] (.6,-4.2) circle (.07cm); \filldraw[white] (1.2,-3.6) circle (.1cm); \draw[thick] (1.2,-3.6) circle (.1cm); \filldraw[white] (1.2,3.6) circle (.1cm); \draw[thick] (1.2,3.6) circle (.1cm); } = \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-3,-1.2) rectangle (2.4,1.2); \filldraw[primedregion=gray!55] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2) -- (-2.4,1.2) -- (-2.4,-1.2); \filldraw[boxregion=gray!55] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2) -- (2.4,1.2) -- (2.4,-1.2); \filldraw[primedregion=\PrColor] (-3,-1.2) rectangle (-2.4,1.2); \filldraw[primedregion=green!30] (-.6,-1.2) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2) -- (-2.4,1.2) -- (-2.4,-1.2); \filldraw[boxregion=green!30] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2) -- (2.4,1.2) -- (2.4,-1.2); \end{scope} \draw[\XColor,thick] (-2.4,-1.2) -- (-2.4,1.2); \draw[black,thick] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2); \draw[DarkGreen,thick] (-.6,-1.2) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2); \draw[DarkGreen,thick] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,.6); \filldraw[DarkGreen] (.6,.6) circle (.07cm); \filldraw[DarkGreen] (-.6,-.6) circle (.07cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \node at (-.6,-1.4) {\small{$F(Q)$}}; \node at (.6,-1.4) {\small{$\varphi_b$}}; \node at (1.8,-1.4) {\small{$G(Q)$}}; } = \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.5,-.5) rectangle (.5,.5); \filldraw[primedregion=\PrColor] (-.5,-.5) rectangle (-.2,.5); \filldraw[primedregion=green!30] (-.2,-.5) rectangle (.2,.5); \filldraw[boxregion=green!30] (.2,-.5) rectangle (.5,.5); \end{scope} \draw[\XColor,thick] (-.2,-.5) -- (-.2,.5); \draw[black,thick] (.2,-.5) -- (.2,.5); }\,. \displaybreak[1] \end{align*} Similarly, ${\sf QSys}(\varphi)_X\star {\sf QSys}(\varphi)_X^\dag=1_{{\sf QSys}(\varphi)_P\otimes_{G(P)}G(X)}$. To see that ${\sf QSys}(\varphi):{\sf QSys}(F)\Rightarrow{\sf QSys}(G)$ is a 2-transformation, we observe \begin{align*} \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,-.6) rectangle (3,3.6); \filldraw[primedregion=\PrColor] (0,-.6) -- (0,2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,2.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,3.2) -- (0,4.2) -- (-.6,4.2) -- (-.6,-.6); \filldraw[primedregion=green!30] (1.2,0) -- (1.2,.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,2) -- (0,0); \filldraw[primedregion=cyan!30] (2.4,-.6) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (1.2,.4) -- (1.2,-.6); \filldraw[boxregion=\PrColor] (0,4.2) -- (0,3.2) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.2,4.2); \filldraw[boxregion=green!30] (2.4,3.6) -- (2.4,1.6) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.2,3.6); \filldraw[primedregion=gray!55] (0,-.6) rectangle (1.2,0); \filldraw[boxregion=cyan!30] (2.4,-.6) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(45:.2cm) and ++(270:.4cm) .. (2.4,1.6) -- (2.4,4.2) -- (3,4.2) -- (3,-.6); \filldraw[green!30] (.8,1.6) rectangle (2.2,2); \end{scope} \draw[\XColor,thick] (0,-.6) -- (0,.4) -- (0,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,3.2) -- (1.2,3.6); \draw[orange,thick] (1.2,-.6) -- (1.2,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (2.4,1.6) -- (2.4,3.6); \draw[black,thick] (2.4,-.6) -- (2.4,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,3.2) -- (0,3.6); \draw[DarkGreen,thick] (0,0) -- (1.2,0); \filldraw[white] (1.8,1) circle (.1cm); \draw[thick] (1.8,1) circle (.1cm); \filldraw[white] (.6,2.6) circle (.1cm); \draw[thick] (.6,2.6) circle (.1cm); \node at (0,-.8) {\scriptsize{$F(X)$}}; \node at (1.2,-.8) {\scriptsize{$F(Y)$}}; \node at (1.2,3.8) {\scriptsize{$G(X)$}}; \node at (2.4,3.8) {\scriptsize{$G(Y)$}}; \node at (2.4,-.8) {\scriptsize{${\sf QSys}(\varphi)_R$}}; \node at (1.5,1.8) {\scriptsize{${\sf QSys}(\varphi)_Q$}}; \node at (0,3.8) {\scriptsize{${\sf QSys}(\varphi)_P$}}; } &= \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-4.8,-3.6) rectangle (4.8,3.6); \filldraw[primedregion=gray!30] (-1.2,1.2) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6) -- (-4.2,3.6) -- (-4.2,-.6) -- (-1.8,.6); \filldraw[boxregion=gray!30] (-1.2,1.2) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6) -- (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8); \filldraw[primedregion=gray!55] (.6,-1.8) -- (-1.8,.6) -- (-1.2,1.2) -- (1.2,-1.2); \filldraw[boxregion=gray!55] (1.8,-.6) -- (-.6,1.8)-- (-1.2,1.2) -- (1.2,-1.2); \filldraw[primedregion=gray!75] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (1.2,-1.2) -- (.6,-1.8) -- (1.8,-3) -- (1.8,-3.6); \filldraw[boxregion=gray!75] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (1.2,-1.2) -- (1.8,-.6) -- (4.2,-.6) -- (4.2,-3.6); \filldraw[primedregion=\PrColor] (-1.8,-3.6) -- (-1.8,.6) -- (-3.6,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-4.2,3.6) -- (-4.8,3.6) -- (-4.8,-3.6); \filldraw[boxregion=\PrColor] (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8) -- (-.6,3.6); \filldraw[primedregion=green!30] (-1.8,-3.6) -- (-1.8,.6) -- (.6,-1.8) -- (.6,-3.6); \filldraw[boxregion=green!30] (-.6,1.8) -- (-.6,3.6) -- (1.8,3.6) -- (1.8,-.6); \filldraw[primedregion=cyan!30] (1.8,-3.6) -- (1.8,-3) -- (.6,-1.8) -- (.6,-3.6); \filldraw[boxregion=cyan!30] (4.2,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.6,-2.4) -- (1.8,-.6) -- (1.8,3.6) -- (4.8,3.6) -- (4.8,-3.6); \filldraw[primedregion=gray!55] (.6,-3.6) -- (.6,-2.4) -- (-1.8,-2.4) -- (-1.8,-3.6); \end{scope} \draw[black,thick] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6); \draw[\XColor,thick] (-1.8,-3.6) -- (-1.8,.6) -- (-.6,1.8) -- (-.6,3.6); \draw[orange,thick] (1.8,3.6) -- (1.8,-.6) -- (.6,-1.8) -- (.6,-3.6); \draw[DarkGreen,thick] (.6,-1.8) -- (-1.8,.6); \draw[DarkGreen,thick] (1.8,-.6) -- (-.6,1.8); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,.6); \draw[\PsColor,thick] (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8); \draw[\PsColor,thick] (-1.8,.6) -- (-3.6,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-4.2,3.6); \draw[\PsColor,thick] (-3,1.8) -- (-1.8,3); \draw[cyan!90,thick] (.6,-1.8) -- (1.8,-3) -- (1.8,-3.6); \draw[cyan!90,thick] (4.2,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.6,-2.4) -- (1.8,-.6); \draw[cyan!90,thick] (1.8,-3) -- (3,-1.8); \draw[DarkGreen,thick] (.6,-2.4) -- (-1.8,-2.4); \filldraw[white] (-1.2,1.2) circle (.1cm); \draw[thick] (-1.2,1.2) circle (.1cm); \filldraw[white] (1.2,-1.2) circle (.1cm); \draw[thick] (1.2,-1.2) circle (.1cm); \filldraw[white] (-2.4,2.4) circle (.1cm); \draw[thick] (-2.4,2.4) circle (.1cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \filldraw[white] (2.4,-2.4) circle (.1cm); \draw[thick] (2.4,-2.4) circle (.1cm); \filldraw[\XColor] (-1.8,.6) circle (.07cm); \filldraw[\XColor] (-.6,1.8) circle (.07cm); \filldraw[orange] (1.8,-.6) circle (.07cm); \filldraw[orange] (.6,-1.8) circle (.07cm); \filldraw[DarkGreen] (-.6,-.6) circle (.07cm); \filldraw[DarkGreen] (.6,.6) circle (.07cm); \filldraw[\PsColor] (-3,1.8) circle (.07cm); \filldraw[\PsColor] (-1.8,3) circle (.07cm); \filldraw[cyan!90] (3,-1.8) circle (.07cm); \filldraw[cyan!90] (1.8,-3) circle (.07cm); \node at (-1.8,-3.8) {\small{$F(X)$}}; \node at (.6,-3.8) {\small{$F(Y)$}}; \node at (1.8,-3.8) {\small{$F(R)$}}; \node at (3,-3.8) {\small{$\varphi_c$}}; \node at (4.2,-3.8) {\small{$G(R)$}}; \node at (1.8,3.8) {\small{$G(Y)$}}; \node at (-.6,3.8) {\small{$G(X)$}}; \node at (-1.8,3.8) {\small{$G(P)$}}; \node at (-3,3.8) {\small{$\varphi_a$}}; \node at (-4.2,3.8) {\small{$F(P)$}}; } = \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-4.8,-3.6) rectangle (4.8,3.6); \filldraw[primedregion=gray!30] (-1.2,1.2) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6) -- (-4.2,3.6) -- (-4.2,-.6) -- (-1.8,.6); \filldraw[boxregion=gray!30] (-1.2,1.2) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6) -- (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8); \filldraw[primedregion=gray!55] (.6,-1.8) -- (-1.8,.6) -- (-1.2,1.2) -- (1.2,-1.2); \filldraw[boxregion=gray!55] (1.8,-.6) -- (-.6,1.8)-- (-1.2,1.2) -- (1.2,-1.2); \filldraw[primedregion=gray!75] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (1.2,-1.2) -- (.6,-1.8) -- (1.8,-3) -- (1.8,-3.6); \filldraw[boxregion=gray!75] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (1.2,-1.2) -- (1.8,-.6) -- (4.2,-.6) -- (4.2,-3.6); \filldraw[primedregion=\PrColor] (-1.8,-3.6) -- (-1.8,.6) -- (-3.6,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-4.2,3.6) -- (-4.8,3.6) -- (-4.8,-3.6); \filldraw[boxregion=\PrColor] (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8) -- (-.6,3.6); \filldraw[primedregion=gray!55] (-1.8,-3.6) -- (-1.8,.6) -- (.6,-1.8) -- (.6,-3.6); \filldraw[boxregion=green!30] (-.6,1.8) -- (-.6,3.6) -- (1.8,3.6) -- (1.8,-.6); \filldraw[primedregion=cyan!30] (1.8,-3.6) -- (1.8,-3) -- (.6,-1.8) -- (.6,-3.6); \filldraw[boxregion=cyan!30] (4.2,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.6,-2.4) -- (1.8,-.6) -- (1.8,3.6) -- (4.8,3.6) -- (4.8,-3.6); \end{scope} \draw[black,thick] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6); \draw[\XColor,thick] (-1.8,-3.6) -- (-1.8,.6) -- (-.6,1.8) -- (-.6,3.6); \draw[orange,thick] (1.8,3.6) -- (1.8,-.6) -- (.6,-1.8) -- (.6,-3.6); \draw[DarkGreen,thick] (.6,-1.8) -- (-1.8,.6); \draw[DarkGreen,thick] (1.8,-.6) -- (-.6,1.8); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,.6); \draw[\PsColor,thick] (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8); \draw[\PsColor,thick] (-1.8,.6) -- (-3.6,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-4.2,3.6); \draw[\PsColor,thick] (-3,1.8) -- (-1.8,3); \draw[cyan!90,thick] (.6,-1.8) -- (1.8,-3) -- (1.8,-3.6); \draw[cyan!90,thick] (4.2,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.6,-2.4) -- (1.8,-.6); \draw[cyan!90,thick] (1.8,-3) -- (3,-1.8); \filldraw[white] (-1.2,1.2) circle (.1cm); \draw[thick] (-1.2,1.2) circle (.1cm); \filldraw[white] (1.2,-1.2) circle (.1cm); \draw[thick] (1.2,-1.2) circle (.1cm); \filldraw[white] (-2.4,2.4) circle (.1cm); \draw[thick] (-2.4,2.4) circle (.1cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \filldraw[white] (2.4,-2.4) circle (.1cm); \draw[thick] (2.4,-2.4) circle (.1cm); \filldraw[\XColor] (-1.8,.6) circle (.07cm); \filldraw[\XColor] (-.6,1.8) circle (.07cm); \filldraw[orange] (1.8,-.6) circle (.07cm); \filldraw[orange] (.6,-1.8) circle (.07cm); \filldraw[DarkGreen] (-.6,-.6) circle (.07cm); \filldraw[DarkGreen] (.6,.6) circle (.07cm); \filldraw[\PsColor] (-3,1.8) circle (.07cm); \filldraw[\PsColor] (-1.8,3) circle (.07cm); \filldraw[cyan!90] (3,-1.8) circle (.07cm); \filldraw[cyan!90] (1.8,-3) circle (.07cm); \node at (-1.8,-3.8) {\small{$F(X)$}}; \node at (.6,-3.8) {\small{$F(Y)$}}; \node at (1.8,-3.8) {\small{$F(R)$}}; \node at (3,-3.8) {\small{$\varphi_c$}}; \node at (4.2,-3.8) {\small{$G(R)$}}; \node at (1.8,3.8) {\small{$G(Y)$}}; \node at (-.6,3.8) {\small{$G(X)$}}; \node at (-1.8,3.8) {\small{$G(P)$}}; \node at (-3,3.8) {\small{$\varphi_a$}}; \node at (-4.2,3.8) {\small{$F(P)$}}; } \\ &= \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-4.8,-3.6) rectangle (4.8,3.6); \filldraw[primedregion=gray!30] (-1.2,1.2) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6) -- (-4.2,3.6) -- (-4.2,-.6) -- (-1.8,.6); \filldraw[boxregion=gray!30] (-1.2,1.2) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6) -- (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8); \filldraw[primedregion=gray!55] (.6,-1.8) -- (-1.8,.6) -- (-1.2,1.2) -- (1.2,-1.2); \filldraw[boxregion=gray!55] (1.8,-.6) -- (-.6,1.8)-- (-1.2,1.2) -- (1.2,-1.2); \filldraw[primedregion=gray!75] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (1.2,-1.2) -- (.6,-1.8) -- (1.8,-3) -- (1.8,-3.6); \filldraw[boxregion=gray!75] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (1.2,-1.2) -- (1.8,-.6) -- (4.2,-.6) -- (4.2,-3.6); \filldraw[primedregion=\PrColor] (-1.8,-3.6) -- (-1.8,.6) -- (-3.6,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-4.2,3.6) -- (-4.8,3.6) -- (-4.8,-3.6); \filldraw[boxregion=\PrColor] (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8) -- (-.6,3.6); \filldraw[primedregion=gray!55] (-1.8,-3.6) -- (-1.8,.6) -- (.6,-1.8) -- (.6,-3.6); \filldraw[boxregion=green!30] (-.6,1.8) -- (-.6,3.6) -- (1.8,3.6) -- (1.8,-.6); \filldraw[primedregion=cyan!30] (1.8,-3.6) -- (1.8,-3) -- (.6,-1.8) -- (.6,-3.6); \filldraw[boxregion=cyan!30] (4.2,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.6,-2.4) -- (1.8,-.6) -- (1.8,3.6) -- (4.8,3.6) -- (4.8,-3.6); \end{scope} \draw[black,thick] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6); \draw[\XColor,thick] (-1.8,-3.6) -- (-1.8,.6) -- (-.6,1.8) -- (-.6,3.6); \draw[orange,thick] (1.8,3.6) -- (1.8,-.6) -- (.6,-1.8) -- (.6,-3.6); \draw[DarkGreen,thick] (1.5,-.9) -- (-.9,1.5); \draw[DarkGreen,thick] (1.8,-.6) -- (-.6,1.8); \draw[DarkGreen,thick] (.3,.3) -- (.6,.6); \draw[\PsColor,thick] (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8); \draw[\PsColor,thick] (-1.8,.6) -- (-3.6,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-4.2,3.6); \draw[\PsColor,thick] (-3,1.8) -- (-1.8,3); \draw[cyan!90,thick] (.6,-1.8) -- (1.8,-3) -- (1.8,-3.6); \draw[cyan!90,thick] (4.2,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.6,-2.4) -- (1.8,-.6); \draw[cyan!90,thick] (1.8,-3) -- (3,-1.8); \filldraw[white] (-1.2,1.2) circle (.1cm); \draw[thick] (-1.2,1.2) circle (.1cm); \filldraw[white] (1.2,-1.2) circle (.1cm); \draw[thick] (1.2,-1.2) circle (.1cm); \filldraw[white] (-2.4,2.4) circle (.1cm); \draw[thick] (-2.4,2.4) circle (.1cm); \filldraw[white] (2.4,-2.4) circle (.1cm); \draw[thick] (2.4,-2.4) circle (.1cm); \filldraw[\XColor] (-1.8,.6) circle (.07cm); \filldraw[\XColor] (-.6,1.8) circle (.07cm); \filldraw[\XColor] (-.9,1.5) circle (.07cm); \filldraw[orange] (1.5,-.9) circle (.07cm); \filldraw[orange] (1.8,-.6) circle (.07cm); \filldraw[orange] (.6,-1.8) circle (.07cm); \filldraw[DarkGreen] (.3,.3) circle (.07cm); \filldraw[DarkGreen] (.6,.6) circle (.07cm); \filldraw[\PsColor] (-3,1.8) circle (.07cm); \filldraw[\PsColor] (-1.8,3) circle (.07cm); \filldraw[cyan!90] (3,-1.8) circle (.07cm); \filldraw[cyan!90] (1.8,-3) circle (.07cm); \node at (-1.8,-3.8) {\small{$F(X)$}}; \node at (.6,-3.8) {\small{$F(Y)$}}; \node at (1.8,-3.8) {\small{$F(R)$}}; \node at (3,-3.8) {\small{$\varphi_c$}}; \node at (4.2,-3.8) {\small{$G(R)$}}; \node at (1.8,3.8) {\small{$G(Y)$}}; \node at (-.6,3.8) {\small{$G(X)$}}; \node at (-1.8,3.8) {\small{$G(P)$}}; \node at (-3,3.8) {\small{$\varphi_a$}}; \node at (-4.2,3.8) {\small{$F(P)$}}; } = \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-4.8,-3.6) rectangle (4.8,3.6); \filldraw[primedregion=gray!30] (-1.2,1.2) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6) -- (-4.2,3.6) -- (-4.2,-.6) -- (-1.8,.6); \filldraw[boxregion=gray!30] (-1.2,1.2) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6) -- (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8); \filldraw[primedregion=gray!55] (.6,-1.8) -- (-1.8,.6) -- (-1.2,1.2) -- (1.2,-1.2); \filldraw[boxregion=gray!55] (1.8,-.6) -- (-.6,1.8)-- (-1.2,1.2) -- (1.2,-1.2); \filldraw[primedregion=gray!75] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (1.2,-1.2) -- (.6,-1.8) -- (1.8,-3) -- (1.8,-3.6); \filldraw[boxregion=gray!75] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (1.2,-1.2) -- (1.8,-.6) -- (4.2,-.6) -- (4.2,-3.6); \filldraw[primedregion=\PrColor] (-1.8,-3.6) -- (-1.8,.6) -- (-3.6,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-4.2,3.6) -- (-4.8,3.6) -- (-4.8,-3.6); \filldraw[boxregion=\PrColor] (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8) -- (-.6,3.6); \filldraw[primedregion=gray!55] (-1.8,-3.6) -- (-1.8,.6) -- (.6,-1.8) -- (.6,-3.6); \filldraw[boxregion=green!30] (-.6,1.8) -- (-.6,3.6) -- (1.8,3.6) -- (1.8,-.6); \filldraw[primedregion=cyan!30] (1.8,-3.6) -- (1.8,-3) -- (.6,-1.8) -- (.6,-3.6); \filldraw[boxregion=cyan!30] (4.2,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.6,-2.4) -- (1.8,-.6) -- (1.8,3.6) -- (4.8,3.6) -- (4.8,-3.6); \end{scope} \draw[black,thick] (3,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-2.4) -- (-2.4,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,3.6); \draw[\XColor,thick] (-1.8,-3.6) -- (-1.8,.6) -- (-.6,1.8) -- (-.6,3.6); \draw[orange,thick] (1.8,3.6) -- (1.8,-.6) -- (.6,-1.8) -- (.6,-3.6); \draw[DarkGreen,thick] (1.8,-.6) -- (-.6,1.8); \draw[\PsColor,thick] (-1.8,3.6) -- (-1.8,3) -- (-.6,1.8); \draw[\PsColor,thick] (-1.8,.6) -- (-3.6,2.4) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-4.2,3.6); \draw[\PsColor,thick] (-3,1.8) -- (-1.8,3); \draw[cyan!90,thick] (.6,-1.8) -- (1.8,-3) -- (1.8,-3.6); \draw[cyan!90,thick] (4.2,-3.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.6,-2.4) -- (1.8,-.6); \draw[cyan!90,thick] (1.8,-3) -- (3,-1.8); \filldraw[white] (-1.2,1.2) circle (.1cm); \draw[thick] (-1.2,1.2) circle (.1cm); \filldraw[white] (1.2,-1.2) circle (.1cm); \draw[thick] (1.2,-1.2) circle (.1cm); \filldraw[white] (-2.4,2.4) circle (.1cm); \draw[thick] (-2.4,2.4) circle (.1cm); \filldraw[white] (2.4,-2.4) circle (.1cm); \draw[thick] (2.4,-2.4) circle (.1cm); \filldraw[\XColor] (-1.8,.6) circle (.07cm); \filldraw[\XColor] (-.6,1.8) circle (.07cm); \filldraw[orange] (1.8,-.6) circle (.07cm); \filldraw[orange] (.6,-1.8) circle (.05cm); \filldraw[\PsColor] (-3,1.8) circle (.07cm); \filldraw[\PsColor] (-1.8,3) circle (.07cm); \filldraw[cyan!90] (3,-1.8) circle (.07cm); \filldraw[cyan!90] (1.8,-3) circle (.07cm); \node at (-1.8,-3.8) {\small{$F(X)$}}; \node at (.6,-3.8) {\small{$F(Y)$}}; \node at (1.8,-3.8) {\small{$F(R)$}}; \node at (3,-3.8) {\small{$\varphi_c$}}; \node at (4.2,-3.8) {\small{$G(R)$}}; \node at (1.8,3.8) {\small{$G(Y)$}}; \node at (-.6,3.8) {\small{$G(X)$}}; \node at (-1.8,3.8) {\small{$G(P)$}}; \node at (-3,3.8) {\small{$\varphi_a$}}; \node at (-4.2,3.8) {\small{$F(P)$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (3,4.2); \filldraw[primedregion=\PrColor] (0,0) -- (0,2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,2.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,3.2) -- (0,4.2) -- (-.6,4.2) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,2) -- (0,0); \filldraw[primedregion=cyan!30] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (1.2,.4) -- (1.2,0); \filldraw[boxregion=\PrColor] (0,4.2) -- (0,3.2) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.2,4.2); \filldraw[boxregion=gray!55] (2.4,3.6) -- (2.4,1.6) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.2,3.6); \filldraw[boxregion=green!30] (1.2,3.6) rectangle (2.4,4.2); \filldraw[boxregion=cyan!30] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(45:.2cm) and ++(270:.4cm) .. (2.4,1.6) -- (2.4,4.2) -- (3,4.2) -- (3,0); \filldraw[gray!55] (.8,1.6) rectangle (2.2,2); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.4) -- (0,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,3.2) -- (1.2,4.2); \draw[orange,thick] (1.2,0) -- (1.2,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (2.4,1.6) -- (2.4,4.2); \draw[black,thick] (2.4,0) -- (2.4,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,3.2) -- (0,4.2); \draw[DarkGreen,thick] (1.2,3.6) -- (2.4,3.6); \filldraw[white] (1.8,1) circle (.1cm); \draw[thick] (1.8,1) circle (.1cm); \filldraw[white] (.6,2.6) circle (.1cm); \draw[thick] (.6,2.6) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,-.2) {\scriptsize{$F(Y)$}}; \node at (1.2,4.4) {\scriptsize{$G(X)$}}; \node at (2.4,4.4) {\scriptsize{$G(Y)$}}; \node at (2.4,-.2) {\scriptsize{${\sf QSys}(\varphi)_R$}}; \node at (1.5,1.8) {\scriptsize{${\sf QSys}(\varphi)_b$}}; \node at (0,4.4) {\scriptsize{${\sf QSys}(\varphi)_P$}}; } \end{align*} This relation implies the monoidality coherence condition: \[ \tikzmath[scale=.65, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (3,4.8); \filldraw[primedregion=\PrColor] (0,0) -- (0,2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,2.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,3.2) -- (0,4.8) -- (-.6,4.8) -- (-.6,0); \filldraw[primedregion=green!30] (1.2,0) -- (1.2,.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,2) -- (0,0); \filldraw[primedregion=cyan!30] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (1.2,.4) -- (1.2,0); \filldraw[boxregion=\PrColor] (0,4.8) -- (0,3.2) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.2,3.8) -- (1.733,3.8) -- (1.733,4.8); \filldraw[boxregion=cyan!30] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(45:.2cm) and ++(270:.4cm) .. (2.4,1.6) -- (2.4,3.2) -- (1.867,3.6) -- (1.867,4.8) -- (3,4.8) -- (3,0); \filldraw[boxregion=green!30] (1.867,4.8) -- (1.867,3.8) -- (2.4,3.8) -- (2.4,1.6) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.2,3.8)-- (1.733,3.8) -- (1.733,4.8); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.4) -- (0,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,3.2) -- (1.2,3.6); \draw[orange,thick] (1.2,0) -- (1.2,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (2.4,1.6) -- (2.4,3.6); \draw[black,thick] (2.4,0) -- (2.4,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,3.2) -- (0,4.8); \draw[\XColor,thick] (1.733,4) -- (1.733,4.8); \draw[orange,thick] (1.867,4) -- (1.867,4.8); \roundNbox{unshaded}{(1.8,3.8)}{.3}{.6}{.6}{\scriptsize{${\sf QSys}(G)^2_{X,Y}$}}; \filldraw[white] (1.8,1) circle (.1cm); \draw[thick] (1.8,1) circle (.1cm); \filldraw[white] (.6,2.6) circle (.1cm); \draw[thick] (.6,2.6) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,-.2) {\scriptsize{$F(Y)$}}; \node at (1.8,5) {\scriptsize{$G(X\otimes_Q Y)$}}; \node at (2.4,-.2) {\scriptsize{${\sf QSys}(\varphi)_R$}}; \node at (0,5) {\scriptsize{${\sf QSys}(\varphi)_P$}}; } = \tikzmath[scale=.65, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (3,4.8); \filldraw[primedregion=\PrColor] (0,0) -- (0,2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,2.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,3.2) -- (0,4.8) -- (-.6,4.8) -- (-.6,0); \filldraw[primedregion=green!30] (1.2,0) -- (1.2,.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,2) -- (0,0); \filldraw[primedregion=cyan!30] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (1.2,.4) -- (1.2,0); \filldraw[boxregion=\PrColor] (0,4.8) -- (0,3.2) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.2,3.8) -- (1.733,3.8) -- (1.733,4.8); \filldraw[boxregion=cyan!30] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(45:.2cm) and ++(270:.4cm) .. (2.4,1.6) -- (2.4,3.2) -- (1.867,3.6) -- (1.867,4.8) -- (3,4.8) -- (3,0); \filldraw[boxregion=green!30] (1.867,4.8) -- (1.867,3.8) -- (2.4,3.8) -- (2.4,1.6) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.2,3.8)-- (1.733,3.8) -- (1.733,4.8); \filldraw[boxregion=gray!55] (1.2,3.2) rectangle (2.4,3.8); \filldraw[boxregion=gray!55] (1.733,4.4) rectangle (1.867,3.8); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.4) -- (0,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,3.2) -- (1.2,3.6); \draw[orange,thick] (1.2,0) -- (1.2,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (2.4,1.6) -- (2.4,3.6); \draw[black,thick] (2.4,0) -- (2.4,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,3.2) -- (0,4.8); \draw[\XColor,thick] (1.733,4) -- (1.733,4.8); \draw[orange,thick] (1.867,4) -- (1.867,4.8); \draw[DarkGreen,thick] (1.2,3.2) -- (2.4,3.2); \draw[DarkGreen,thick] (1.733,4.4) -- (1.867,4.4); \roundNbox{unshaded}{(1.8,3.8)}{.3}{.6}{.6}{\scriptsize{$G^2_{X,Y}$}}; \filldraw[white] (1.8,1) circle (.1cm); \draw[thick] (1.8,1) circle (.1cm); \filldraw[white] (.6,2.6) circle (.1cm); \draw[thick] (.6,2.6) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,-.2) {\scriptsize{$F(Y)$}}; \node at (1.8,5) {\scriptsize{$G(X\otimes_Q Y)$}}; \node at (2.4,-.2) {\scriptsize{${\sf QSys}(\varphi)_R$}}; \node at (0,5) {\scriptsize{${\sf QSys}(\varphi)_P$}}; } = \tikzmath[scale=.65, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (3,4.8); \filldraw[primedregion=\PrColor] (0,0) -- (0,2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,2.6) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,3.2) -- (0,4.8) -- (-.6,4.8) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,2) -- (0,0); \filldraw[primedregion=cyan!30] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (1.2,.4) -- (1.2,0); \filldraw[boxregion=\PrColor] (0,4.8) -- (0,3.2) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.2,3.8) -- (1.733,3.8) -- (1.733,4.8); \filldraw[boxregion=cyan!30] (2.4,0) -- (2.4,.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.8,1) .. controls ++(45:.2cm) and ++(270:.4cm) .. (2.4,1.6) -- (2.4,3.2) -- (1.867,3.6) -- (1.867,4.8) -- (3,4.8) -- (3,0); \filldraw[boxregion=gray!55] (1.867,4.8) -- (1.867,3.8) -- (2.4,3.8) -- (2.4,1.6) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.8,1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,2.6) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,3.2) -- (1.2,3.8)-- (1.733,3.8) -- (1.733,4.8); \filldraw[primedregion=green!30] (0,0) rectangle (1.2,.4); \filldraw[boxregion=green!30] (1.733,4.4) rectangle (1.867,4.8); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.4) -- (0,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,3.2) -- (1.2,3.6); \draw[orange,thick] (1.2,0) -- (1.2,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (2.4,1.6) -- (2.4,3.6); \draw[black,thick] (2.4,0) -- (2.4,.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.6) -- (1.2,2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,3.2) -- (0,4.8); \draw[\XColor,thick] (1.733,4) -- (1.733,4.8); \draw[orange,thick] (1.867,4) -- (1.867,4.8); \draw[DarkGreen,thick] (0,.4) -- (1.2,.4); \draw[DarkGreen,thick] (1.733,4.4) -- (1.867,4.4); \roundNbox{unshaded}{(1.8,3.8)}{.3}{.6}{.6}{\scriptsize{$G^2_{X,Y}$}}; \filldraw[white] (1.8,1) circle (.1cm); \draw[thick] (1.8,1) circle (.1cm); \filldraw[white] (.6,2.6) circle (.1cm); \draw[thick] (.6,2.6) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,-.2) {\scriptsize{$F(Y)$}}; \node at (1.8,5) {\scriptsize{$G(X\otimes_Q Y)$}}; \node at (2.4,-.2) {\scriptsize{${\sf QSys}(\varphi)_R$}}; \node at (0,5) {\scriptsize{${\sf QSys}(\varphi)_P$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,-.4) rectangle (2.4,3); \filldraw[primedregion=\PrColor] (0,-.4) -- (0,.8) -- (.533,.8) -- (.533,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.2,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (.6,2.4) -- (.6,3) -- (-.6,3) -- (-.6,-.4); \filldraw[primedregion=cyan!30] (1.8,-.4) -- (1.8,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (.65,1.2) -- (.65,.8) -- (1.2,.8) -- (1.2,-.4); \filldraw[primedregion=gray!55] (0,-.4) -- (0,.8) -- (.533,.8) -- (.533,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.133,1.8) -- (1.267,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (.667,1.2) -- (.667,.8) -- (1.2,.8) -- (1.2,-.4); \filldraw[boxregion=\PrColor] (.6,3) -- (.6,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (1.2,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.75,2.4) -- (1.75,3); \filldraw[boxregion=cyan!30] (1.8,-.4) -- (1.8,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.85,2.4) -- (1.85,3) -- (2.4,3) -- (2.4,-.4); \filldraw[boxregion=gray!55] (1.733,3) -- (1.733,2.4) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.133,1.8) -- (1.267,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.867,2.4) -- (1.867,3); \filldraw[primedregion=green!30] (0,0) rectangle (1.2,-.4); \filldraw[boxregion=green!30] (1.733,2.6) rectangle (1.867,3); \end{scope} \draw[\XColor,thick] (0,-.4) -- (0,.6); \draw[orange,thick] (1.2,-.4) -- (1.2,.6); \draw[\XColor,thick] (.533,.6) -- (.533,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.733,2.4) -- (1.733,3); \draw[orange,thick] (.667,.6) -- (.667,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.867,2.4) -- (1.867,3); \draw[black,thick] (1.8,-.4) -- (1.8,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.6,2.4) -- (.6,3); \draw[DarkGreen,thick] (0,0) -- (1.2,0); \draw[DarkGreen,thick] (1.733,2.6) -- (1.867,2.6); \roundNbox{unshaded}{(.6,.6)}{.3}{.6}{.6}{\scriptsize{$F^2_{X,Y}$}}; \filldraw[white] (1.2,1.8) circle (.1cm); \draw[thick] (1.2,1.8) circle (.1cm); \node at (0,-.6) {\scriptsize{$F(X)$}}; \node at (1.2,-.6) {\scriptsize{$F(Y)$}}; \node at (1.8,3.2) {\scriptsize{$G(X\otimes_Q Y)$}}; \node at (1.8,-.6) {\scriptsize{$\uparrow$}}; \node at (1.8,-1) {\scriptsize{${\sf QSys}(\varphi)_R$}}; \node at (.6,3.2) {\scriptsize{$\downarrow$}}; \node at (.6,3.6) {\scriptsize{${\sf QSys}(\varphi)_P$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,-.4) rectangle (2.4,3); \filldraw[primedregion=\PrColor] (0,-.4) -- (0,.8) -- (.533,.8) -- (.533,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.2,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (.6,2.4) -- (.6,3) -- (-.6,3) -- (-.6,-.4); \filldraw[primedregion=cyan!30] (1.8,-.4) -- (1.8,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (.65,1.2) -- (.65,.8) -- (1.2,.8) -- (1.2,-.4); \filldraw[primedregion=green!30] (0,-.4) -- (0,.8) -- (.533,.8) -- (.533,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.133,1.8) -- (1.267,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (.667,1.2) -- (.667,.8) -- (1.2,.8) -- (1.2,-.4); \filldraw[boxregion=\PrColor] (.6,3) -- (.6,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (1.2,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.75,2.4) -- (1.75,3); \filldraw[boxregion=cyan!30] (1.8,-.4) -- (1.8,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.85,2.4) -- (1.85,3) -- (2.4,3) -- (2.4,-.4); \filldraw[boxregion=green!30] (1.733,3) -- (1.733,2.4) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.133,1.8) -- (1.267,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.867,2.4) -- (1.867,3); \filldraw[primedregion=gray!55] (0,0) rectangle (1.2,.4); \filldraw[boxregion=gray!55] (.533,.8) rectangle (.667,1.2); \end{scope} \draw[\XColor,thick] (0,-.4) -- (0,.6); \draw[orange,thick] (1.2,-.4) -- (1.2,.6); \draw[\XColor,thick] (.533,.6) -- (.533,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.733,2.4) -- (1.733,3); \draw[orange,thick] (.667,.6) -- (.667,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.867,2.4) -- (1.867,3); \draw[black,thick] (1.8,-.4) -- (1.8,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.6,2.4) -- (.6,3); \draw[DarkGreen,thick] (0,0) -- (1.2,0); \draw[DarkGreen,thick] (.533,1.2) -- (.667,1.2); \roundNbox{unshaded}{(.6,.6)}{.3}{.6}{.6}{\scriptsize{$F^2_{X,Y}$}}; \filldraw[white] (1.2,1.8) circle (.1cm); \draw[thick] (1.2,1.8) circle (.1cm); \node at (0,-.6) {\scriptsize{$F(X)$}}; \node at (1.2,-.6) {\scriptsize{$F(Y)$}}; \node at (1.8,3.2) {\scriptsize{$G(X\otimes_Q Y)$}}; \node at (1.8,-.6) {\scriptsize{$\uparrow$}}; \node at (1.8,-1) {\scriptsize{${\sf QSys}(\varphi)_R$}}; \node at (.6,3.2) {\scriptsize{$\downarrow$}}; \node at (.6,3.6) {\scriptsize{${\sf QSys}(\varphi)_P$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,-.4) rectangle (2.4,3); \filldraw[primedregion=\PrColor] (0,-.4) -- (0,.8) -- (.533,.8) -- (.533,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.2,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (.6,2.4) -- (.6,3) -- (-.6,3) -- (-.6,-.4); \filldraw[primedregion=cyan!30] (1.8,-.4) -- (1.8,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (.65,1.2) -- (.65,.8) -- (1.2,.8) -- (1.2,-.4); \filldraw[primedregion=green!30] (0,-.4) -- (0,.8) -- (.533,.8) -- (.533,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (1.133,1.8) -- (1.267,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (.667,1.2) -- (.667,.8) -- (1.2,.8) -- (1.2,-.4); \filldraw[boxregion=\PrColor] (.6,3) -- (.6,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (1.2,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.75,2.4) -- (1.75,3); \filldraw[boxregion=cyan!30] (1.8,-.4) -- (1.8,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.85,2.4) -- (1.85,3) -- (2.4,3) -- (2.4,-.4); \filldraw[boxregion=green!30] (1.733,3) -- (1.733,2.4) .. controls ++(270:.4cm) and ++(45:.2cm) .. (1.133,1.8) -- (1.267,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.867,2.4) -- (1.867,3); \end{scope} \draw[\XColor,thick] (0,-.4) -- (0,.6); \draw[orange,thick] (1.2,-.4) -- (1.2,.6); \draw[\XColor,thick] (.533,.6) -- (.533,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.733,2.4) -- (1.733,3); \draw[orange,thick] (.667,.6) -- (.667,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.867,2.4) -- (1.867,3); \draw[black,thick] (1.8,-.4) -- (1.8,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.6,2.4) -- (.6,3); \roundNbox{unshaded}{(.6,.6)}{.3}{.6}{.6}{\scriptsize{${\sf QSys}(F)^2_{X,Y}$}}; \filldraw[white] (1.2,1.8) circle (.1cm); \draw[thick] (1.2,1.8) circle (.1cm); \node at (0,-.6) {\scriptsize{$F(X)$}}; \node at (1.2,-.6) {\scriptsize{$F(Y)$}}; \node at (1.8,3.2) {\scriptsize{$G(X\otimes_Q Y)$}}; \node at (1.8,-.6) {\scriptsize{$\uparrow$}}; \node at (1.8,-1) {\scriptsize{${\sf QSys}(\varphi)_R$}}; \node at (.6,3.2) {\scriptsize{$\downarrow$}}; \node at (.6,3.6) {\scriptsize{${\sf QSys}(\varphi)_P$}}; }\,. \] Unitality is checked similarly. Finally, to check naturality, for a 2-cell $f\in\cC({}_PX_Q\to {}_PZ_Q)$: \[ \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=\PrColor] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=green!30] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=\PrColor] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=green!30] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.2); \draw[violet,thick] (1.2,2.2) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,3); \roundNbox{unshaded}{(1.2,2.2)}{.3}{.15}{.15}{\scriptsize{$G(f)$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[black,thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$G(Z)$}}; \node at (1.2,-.2) {\scriptsize{${\sf QSys}(\varphi)_Q$}}; \node at (0,3.2) {\scriptsize{${\sf QSys}(\varphi)_P$}}; } = \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-3.6,-2.4) rectangle (3.6,2.4); \filldraw[primedregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-3,2.4) -- (-3,-.6) -- (-.6,-.6); \filldraw[boxregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-.6,2.4) -- (-.6,1.8) -- (.6,.6); \filldraw[primedregion=gray!55] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (-.6,-.6) -- (.6,-1.8) -- (.6,-2.4); \filldraw[boxregion=gray!55] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (.6,.6) -- (3,.6) -- (3,-2.4); \filldraw[primedregion=\PrColor] (-.6,-2.4) -- (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4) -- (-3.6,2.4) -- (-3.6,-2.4); \filldraw[boxregion=\PrColor] (-.6,2.4) -- (-.6,1.8) -- (.6,.6) -- (.6,2.4); \filldraw[primedregion=green!30] (.6,-2.4) -- (.6,-1.8) -- (-.6,-.6) -- (-.6,-2.4); \filldraw[boxregion=green!30] (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6) -- (.6,2.4) -- (3.6,2.4) -- (3.6,-2.4); \end{scope} \draw[black,thick] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4); \draw[\XColor,thick] (-.6,-2.4) -- (-.6,-.6) -- (.6,.6) -- (.6,1.7); \draw[violet,thick] (.6,1.7) -- (.6,2.4); \draw[\PsColor,thick] (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4); \draw[\PsColor,thick] (.6,.6) -- (-.6,1.8) -- (-.6,2.4); \draw[\PsColor,thick] (-1.8,.6) -- (-.6,1.8); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,-1.8) -- (.6,-2.4); \draw[DarkGreen,thick] (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6); \draw[DarkGreen,thick] (1.8,-.6) -- (.6,-1.8); \filldraw[\XColor] (-.6,-.6) circle (.05cm); \filldraw[\XColor] (.6,.6) circle (.05cm); \filldraw[\PsColor] (-1.8,.6) circle (.05cm); \filldraw[\PsColor] (-.6,1.8) circle (.05cm); \filldraw[DarkGreen] (1.8,-.6) circle (.05cm); \filldraw[DarkGreen] (.6,-1.8) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \filldraw[white] (-1.2,1.2) circle (.1cm); \draw[thick] (-1.2,1.2) circle (.1cm); \filldraw[white] (1.2,-1.2) circle (.1cm); \draw[thick] (1.2,-1.2) circle (.1cm); \roundNbox{unshaded}{(.6,1.7)}{.4}{.1}{.1}{\normalsize{$G(f)$}}; \node at (-.6,-2.6) {\scriptsize{$F(X)$}}; \node at (.6,-2.6) {\scriptsize{$F(Q)$}}; \node at (1.8,-2.6) {\scriptsize{$\varphi_b$}}; \node at (3,-2.6) {\scriptsize{$G(Q)$}}; \node at (.6,2.6) {\scriptsize{$G(X)$}}; \node at (-.6,2.6) {\scriptsize{$G(P)$}}; \node at (-1.8,2.6) {\scriptsize{$\varphi_a$}}; \node at (-3,2.6) {\scriptsize{$F(P)$}}; } = \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-3.6,-2.4) rectangle (3.6,2.4); \filldraw[primedregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-3,2.4) -- (-3,-.6) -- (-.6,-.6); \filldraw[boxregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-.6,2.4) -- (-.6,1.8) -- (.6,.6); \filldraw[primedregion=gray!55] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (-.6,-.6) -- (.6,-1.8) -- (.6,-2.4); \filldraw[boxregion=gray!55] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (.6,.6) -- (3,.6) -- (3,-2.4); \filldraw[primedregion=\PrColor] (-.6,-2.4) -- (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4) -- (-3.6,2.4) -- (-3.6,-2.4); \filldraw[boxregion=\PrColor] (-.6,2.4) -- (-.6,1.8) -- (.6,.6) -- (.6,2.4); \filldraw[primedregion=green!30] (.6,-2.4) -- (.6,-1.8) -- (-.6,-.6) -- (-.6,-2.4); \filldraw[boxregion=green!30] (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6) -- (.6,2.4) -- (3.6,2.4) -- (3.6,-2.4); \end{scope} \draw[black,thick] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4); \draw[\XColor,thick] (-.6,-2.4) -- (-.6,-1.7); \draw[violet,thick] (-.6,-1.7) -- (-.6,-.6) -- (.6,.6) -- (.6,2.4); \draw[\PsColor,thick] (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4); \draw[\PsColor,thick] (.6,.6) -- (-.6,1.8) -- (-.6,2.4); \draw[\PsColor,thick] (-1.8,.6) -- (-.6,1.8); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,-1.8) -- (.6,-2.4); \draw[DarkGreen,thick] (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6); \draw[DarkGreen,thick] (1.8,-.6) -- (.6,-1.8); \filldraw[violet] (-.6,-.6) circle (.05cm); \filldraw[violet] (.6,.6) circle (.05cm); \filldraw[\PsColor] (-1.8,.6) circle (.05cm); \filldraw[\PsColor] (-.6,1.8) circle (.05cm); \filldraw[DarkGreen] (1.8,-.6) circle (.05cm); \filldraw[DarkGreen] (.6,-1.8) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \filldraw[white] (-1.2,1.2) circle (.1cm); \draw[thick] (-1.2,1.2) circle (.1cm); \filldraw[white] (1.2,-1.2) circle (.1cm); \draw[thick] (1.2,-1.2) circle (.1cm); \roundNbox{unshaded}{(-.6,-1.7)}{.4}{.1}{.1}{\normalsize{$F(f)$}}; \node at (-.6,-2.6) {\scriptsize{$F(X)$}}; \node at (.6,-2.6) {\scriptsize{$F(Q)$}}; \node at (1.8,-2.6) {\scriptsize{$\varphi_b$}}; \node at (3,-2.6) {\scriptsize{$G(Q)$}}; \node at (.6,2.6) {\scriptsize{$G(X)$}}; \node at (-.6,2.6) {\scriptsize{$G(P)$}}; \node at (-1.8,2.6) {\scriptsize{$\varphi_a$}}; \node at (-3,2.6) {\scriptsize{$F(P)$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=\PrColor] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=green!30] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=\PrColor] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=green!30] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.8); \draw[violet,thick] (0,.8) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(0,.8)}{.3}{.15}{.15}{\scriptsize{$F(f)$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$G(Z)$}}; \node at (1.2,-.2) {\scriptsize{${\sf QSys}(\varphi)_Q$}}; \node at (0,3.2) {\scriptsize{${\sf QSys}(\varphi)_P$}}; }\,. \] \end{construction} \begin{construction} \label{construction:QSys(n)} Suppose $n: \varphi \Rrightarrow \psi$ is a bounded modification between $\dag$-transformations. We define a bounded modification ${\sf QSys}(n): {\sf QSys}(\varphi) \Rrightarrow {\sf QSys}(\psi)$ as follows. Given a Q-system ${}_bQ_b\in{\sf QSys}(\cC)$, we define \[ \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-1.2,-1.2) rectangle (1.2,1.2); \filldraw[primedregion=green!30] (-1.2,-1.2) rectangle (0,1.2); \filldraw[boxregion=green!30] (1.2,1.2) rectangle (0,-1.2); \filldraw[green!30] (-1,-.8) rectangle (-.2,-.4); \filldraw[green!30] (.2,-.8) rectangle (1,-.4); \end{scope} \draw[black,thick] (0,-1.2) -- (0,0); \draw[snake,thick] (0,0) -- (0,1.2); \roundNbox{unshaded}{(0,0)}{.3}{.5}{.5}{\scriptsize{${\sf QSys}(n)_Q$}}; \node at (.6,-.6) {\scriptsize{$G(Q)$}}; \node at (-.6,-.6) {\scriptsize{$F(Q)$}}; \node at (0,-1.4) {\scriptsize{${\sf QSys}(\varphi)_Q$}}; \node at (0,1.4) {\scriptsize{${\sf QSys}(\psi)_Q$}}; } := \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.4,-1.8) rectangle (2.4,1.2); \filldraw[primedregion=gray!55] (.6,-1.8) -- (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2) -- (-2.4,1.2) -- (-2.4,-1.8); \filldraw[boxregion=gray!55] (.6,-1.8) -- (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2) -- (2.4,1.2) -- (2.4,-1.8); \filldraw[primedregion=green!30] (-.6,-1.8) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2) -- (-2.4,1.2) -- (-2.4,-1.8); \filldraw[boxregion=green!30] (1.8,-1.8) -- (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2) -- (2.4,1.2) -- (2.4,-1.8); \end{scope} \draw[black,thick] (.6,-1.8) -- (.6,-1.2); \draw[snake,thick] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2); \draw[DarkGreen,thick] (-.6,-1.8) -- (-.6,-1.2) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2); \draw[DarkGreen,thick] (1.8,-1.8) -- (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,.6); \filldraw[DarkGreen] (.6,.6) circle (.05cm); \filldraw[DarkGreen] (-.6,-.6) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \roundNbox{unshaded}{(.6,-1)}{.3}{0}{0}{\small{$n_b$}}; \node at (-.6,-2) {\scriptsize{$F(Q)$}}; \node at (.6,-2) {\scriptsize{$\varphi_b$}}; \node at (1.8,-2) {\scriptsize{$G(Q)$}}; \node at (-.6,1.4) {\scriptsize{$\psi_b$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.4,-1.2) rectangle (2.4,1.8); \filldraw[primedregion=gray!55] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.8) -- (-2.4,1.8) -- (-2.4,-1.2); \filldraw[boxregion=gray!55] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.8) -- (2.4,1.8) -- (2.4,-1.2); \filldraw[primedregion=green!30] (-.6,-1.2) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2) -- (-1.8,1.8) -- (-2.4,1.8) -- (-2.4,-1.2); \filldraw[boxregion=green!30] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.8) -- (2.4,1.8) -- (2.4,-1.2); \end{scope} \draw[black,thick] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2); \draw[snake,thick] (-.6,1.2) -- (-.6,1.8); \draw[DarkGreen,thick] (-.6,-1.2) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2) -- (-1.8,1.8); \draw[DarkGreen,thick] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2) -- (.6,1.8); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,.6); \filldraw[DarkGreen] (.6,.6) circle (.05cm); \filldraw[DarkGreen] (-.6,-.6) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \roundNbox{unshaded}{(-.6,1)}{.3}{0}{0}{\small{$n_b$}}; \node at (-.6,-1.4) {\scriptsize{$F(Q)$}}; \node at (.6,-1.4) {\scriptsize{$\varphi_b$}}; \node at (1.8,-1.4) {\scriptsize{$G(Q)$}}; \node at (-.6,2) {\scriptsize{$\psi_b$}}; }\,. \] It is clear that ${\sf QSys}(n^\dag)={\sf QSys}(n)^\dag$. The modification coherence axiom is verified by \[ \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (2.1,3); \filldraw[primedregion=\PrColor] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=green!30] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=\PrColor] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=green!30] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (2.1,3) -- (2.1,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.8); \draw[snake,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(1.2,.8)}{.3}{.45}{.45}{\scriptsize{${\sf QSys}(n)_Q$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$G(X)$}}; \node at (1.2,-.2) {\scriptsize{${\sf QSys}(\varphi)_Q$}}; \node at (0,3.2) {\scriptsize{${\sf QSys}(\psi)_P$}}; } =\tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-3.6,-3) rectangle (3.6,2.4); \filldraw[primedregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-3,2.4) -- (-3,-.6) -- (-.6,-.6); \filldraw[boxregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-.6,2.4) -- (-.6,1.8) -- (.6,.6); \filldraw[primedregion=gray!55] (1.8,-3) -- (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (-.6,-.6) -- (.6,-1.8) -- (.6,-3); \filldraw[boxregion=gray!55] (1.8,-3) -- (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (.6,.6) -- (3,.6) -- (3,-3); \filldraw[primedregion=\PrColor] (-.6,-3) -- (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4) -- (-3.6,2.4) -- (-3.6,-3); \filldraw[boxregion=\PrColor] (-.6,2.4) -- (-.6,1.8) -- (.6,.6) -- (.6,2.4); \filldraw[primedregion=green!30] (.6,-3) -- (.6,-1.8) -- (-.6,-.6) -- (-.6,-3); \filldraw[boxregion=green!30] (3,-3) -- (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6) -- (.6,2.4) -- (3.6,2.4) -- (3.6,-3); \end{scope} \draw[black,thick] (1.8,-3) -- (1.8,-2.4); \draw[snake,thick] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4); \draw[\XColor,thick] (-.6,-3) -- (-.6,-.6) -- (.6,.6) -- (.6,2.4); \draw[\PsColor,thick] (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4); \draw[\PsColor,thick] (.6,.6) -- (-.6,1.8) -- (-.6,2.4); \draw[\PsColor,thick] (-1.8,.6) -- (-.6,1.8); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,-1.8) -- (.6,-3); \draw[DarkGreen,thick] (3,-3) -- (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6); \draw[DarkGreen,thick] (1.8,-.6) -- (.6,-1.8); \filldraw[\XColor] (-.6,-.6) circle (.05cm); \filldraw[\XColor] (.6,.6) circle (.05cm); \filldraw[\PsColor] (-1.8,.6) circle (.05cm); \filldraw[\PsColor] (-.6,1.8) circle (.05cm); \filldraw[DarkGreen] (1.8,-.6) circle (.05cm); \filldraw[DarkGreen] (.6,-1.8) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \filldraw[white] (-1.2,1.2) circle (.1cm); \draw[thick] (-1.2,1.2) circle (.1cm); \filldraw[white] (1.2,-1.2) circle (.1cm); \draw[thick] (1.2,-1.2) circle (.1cm); \roundNbox{unshaded}{(1.8,-2.2)}{.3}{0}{0}{\small{$n_b$}}; \node at (-.6,-3.2) {\scriptsize{$F(X)$}}; \node at (.6,-3.2) {\scriptsize{$F(Q)$}}; \node at (1.8,-3.2) {\scriptsize{$\varphi_b$}}; \node at (3,-3.2) {\scriptsize{$G(Q)$}}; \node at (.6,2.6) {\scriptsize{$G(X)$}}; \node at (-.6,2.6) {\scriptsize{$G(P)$}}; \node at (-1.8,2.6) {\scriptsize{$\psi_a$}}; \node at (-3,2.6) {\scriptsize{$F(P)$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-3.6,-2.4) rectangle (3.6,3); \filldraw[primedregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-1.8,3) -- (-3,3) -- (-3,-.6) -- (-.6,-.6); \filldraw[boxregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-1.8,3) -- (-.6,3) -- (-.6,1.8) -- (.6,.6); \filldraw[primedregion=gray!55] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (-.6,-.6) -- (.6,-1.8) -- (.6,-2.4); \filldraw[boxregion=gray!55] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (.6,.6) -- (3,.6) -- (3,-2.4); \filldraw[primedregion=\PrColor] (-.6,-2.4) -- (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4) -- (-3,3) -- (-3.6,3) -- (-3.6,-2.4); \filldraw[boxregion=\PrColor] (-.6,3) -- (-.6,1.8) -- (.6,.6) -- (.6,3); \filldraw[primedregion=green!30] (.6,-2.4) -- (.6,-1.8) -- (-.6,-.6) -- (-.6,-2.4); \filldraw[boxregion=green!30] (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6) -- (.6,3) -- (3.6,3) -- (3.6,-2.4); \end{scope} \draw[black,thick] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4); \draw[snake,thick] (-1.8,2.4) -- (-1.8,3); \draw[\XColor,thick] (-.6,-2.4) -- (-.6,-.6) -- (.6,.6) -- (.6,3); \draw[\PsColor,thick] (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4) -- (-3,3); \draw[\PsColor,thick] (.6,.6) -- (-.6,1.8) -- (-.6,2.4) -- (-.6,3); \draw[\PsColor,thick] (-1.8,.6) -- (-.6,1.8); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,-1.8) -- (.6,-2.4); \draw[DarkGreen,thick] (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6); \draw[DarkGreen,thick] (1.8,-.6) -- (.6,-1.8); \filldraw[\XColor] (-.6,-.6) circle (.05cm); \filldraw[\XColor] (.6,.6) circle (.05cm); \filldraw[\PsColor] (-1.8,.6) circle (.05cm); \filldraw[\PsColor] (-.6,1.8) circle (.05cm); \filldraw[DarkGreen] (1.8,-.6) circle (.05cm); \filldraw[DarkGreen] (.6,-1.8) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \filldraw[white] (-1.2,1.2) circle (.1cm); \draw[thick] (-1.2,1.2) circle (.1cm); \filldraw[white] (1.2,-1.2) circle (.1cm); \draw[thick] (1.2,-1.2) circle (.1cm); \roundNbox{unshaded}{(-1.8,2.2)}{.3}{0}{0}{\small{$n_a$}}; \node at (-.6,-2.6) {\scriptsize{$F(X)$}}; \node at (.6,-2.6) {\scriptsize{$F(Q)$}}; \node at (1.8,-2.6) {\scriptsize{$\varphi_b$}}; \node at (3,-2.6) {\scriptsize{$G(Q)$}}; \node at (.6,3.2) {\scriptsize{$G(X)$}}; \node at (-.6,3.2) {\scriptsize{$G(P)$}}; \node at (-1.8,3.2) {\scriptsize{$\psi_a$}}; \node at (-3,3.2) {\scriptsize{$F(P)$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.9,0) rectangle (1.8,3); \filldraw[primedregion=\PrColor] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.9,3) -- (-.9,0); \filldraw[primedregion=green!30] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=\PrColor] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=green!30] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[snake,thick] (0,2.2) -- (0,3); \roundNbox{unshaded}{(0,2.2)}{.3}{.45}{.45}{\scriptsize{${\sf QSys}(n)_P$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,3.2) {\scriptsize{$G(X)$}}; \node at (1.2,-.2) {\scriptsize{${\sf QSys}(\varphi)_Q$}}; \node at (0,3.2) {\scriptsize{${\sf QSys}(\psi)_P$}}; }\,. \] By our construction, it is clear that when $n:\varphi\Rrightarrow \psi$ is invertible, ${\sf QSys}(n):{\sf QSys}(\varphi)\Rrightarrow{\sf QSys}(\psi)$ is also invertible. \end{construction} \begin{construction} \label{construction:QSys-otimes} Given $\cA,\cB\in 2{\mathsf{Cat}}$, $F,G,H:\cA\to\cB$, and $\varphi:F\Rightarrow G$, $\psi:G\Rightarrow H$, we construct ${\sf QSys}^{\otimes}_{\varphi,\psi}:{\sf QSys}(\varphi)\otimes{\sf QSys}(\psi)\Rrightarrow{\sf QSys}(\varphi\otimes\psi)$ by \[ \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.7,.3) rectangle (.7,1.7); \filldraw[primedregion=white] (-.7,0) rectangle (.7,2); \filldraw[boxregion=white] (.2,0) -- (.2,1) -- (.05,1) -- (.05,2) -- (-.05,2) -- (-.05,1) -- (-.2,1) -- (-.2,0); \filldraw[plusregion=white] (.2,0) -- (.2,1) -- (.05,1) -- (.05,2) -- (.7,2) -- (.7,0); \end{scope} \draw[black,thick] (-.2,.3) -- (-.2,1); \draw[snake,thick] (.2,.3) -- (.2,1); \draw[black,thick] (-.05,1) -- (-.05,1.7); \draw[snake,thick] (.05,1) -- (.05,1.7); \roundNbox{unshaded}{(0,1)}{.3}{.25}{.25}{\tiny{${\sf QSys}^\otimes_{\varphi,\psi}$}}; \draw[thin, dotted, rounded corners = 5pt] (-.7,.3) rectangle (.7,1.7); } : \qquad \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners=5pt] (-1.2,0) rectangle (1.2,2); \filldraw[primedregion=green!30] (-1.2,0) rectangle (1,2); \filldraw[boxregion=green!30] (.6,0) -- (.6,1) -- (.05,1) -- (.05,2) -- (-.05,2) -- (-.05,1) -- (-.6,1) -- (-.6,0); \filldraw[plusregion=green!30] (.6,0) -- (.6,1) -- (.05,1) -- (.05,2) -- (1.2,2) -- (1.2,0); \end{scope} \draw[black,thick] (-.6,0) -- (-.6,1); \draw[snake,thick] (.6,0) -- (.6,1); \draw[black,thick] (-.05,1) -- (-.05,2); \draw[snake,thick] (.05,1) -- (.05,2); \roundNbox{unshaded}{(0,1)}{.33}{.6}{.6}{\scriptsize{$\left({\sf QSys}^\otimes_{\varphi,\psi}\right)_Q$}}; } := \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-4.8,-1.2) rectangle (2.4,1.2); \filldraw[primedregion=green!30] (-4.8,-1.2) rectangle (2.4,1.2); \filldraw[primedregion=gray!55] (-4.2,-1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-3.6,0) -- (-3,.6) -- (-3,1.2) -- (-1.2,1.2) -- (-1.2,-1.2); \filldraw[boxregion=gray!55] ((-3,-1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-2.4,0) .. controls ++(45:.2cm) and ++(270:.4cm) .. (-1.8,1.2) -- (.6,1.2) -- (.6,-1.2); \filldraw[boxregion=green!30] (-1.8,-.6) rectangle (-.6,-1.2); \filldraw[plusregion=gray!55] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2) -- (2.4,1.2) -- (2.4,-1.2); \filldraw[plusregion=green!30] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2) -- (2.4,1.2) -- (2.4,-1.2); \end{scope} \draw[black,thick] (-3,-1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-2.4,0) .. controls ++(45:.2cm) and ++(270:.4cm) .. (-1.8,1.2); \draw[snake,thick] (.6,-1.2) .. controls ++(90:.4cm) and ++(45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2); \draw[DarkGreen,thick] (-4.2,-1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-3.6,0) -- (-3,.6); \draw[DarkGreen,thick] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6); \draw[DarkGreen,thick] (-.6,-1.2) -- (-.6,-.6) -- (.6,.6) -- (.6,1.2); \draw[DarkGreen,thick] (-1.8,-1.2) -- (-1.8,-.6) -- (-3,.6) -- (-3,1.2); \draw[DarkGreen,thick] (-1.8,-.6) -- (-.6,-.6); \filldraw[DarkGreen] (.6,.6) circle (.07cm); \filldraw[DarkGreen] (-.6,-.6) circle (.07cm); \filldraw[DarkGreen] (-3,.6) circle (.07cm); \filldraw[DarkGreen] (-1.8,-.6) circle (.07cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \filldraw[white] (-2.4,0) circle (.1cm); \draw[thick] (-2.4,0) circle (.1cm); \node at (-4.2,-1.4) {\small{$F(Q)$}}; \node at (-3,-1.4) {\small{$\varphi_b$}}; \node at (-.6,-1.4) {\small{$G(Q)$}}; \node at (.6,-1.4) {\small{$\psi_b$}}; \node at (1.8,-1.4) {\small{$H(Q)$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.1,-1) rectangle (2.1,1); \filldraw[primedregion=green!30] (-2.1,-1.2) rectangle (-1.5,1.2); \filldraw[primedregion=gray!55] (-1.5,-1.2) rectangle (-.9,1.2); \filldraw[boxregion=gray!55] (-.9,-1.2) rectangle (.9,1.2); \filldraw[boxregion=green!30] (-.3,-1.2) rectangle (.3,0); \filldraw[plusregion=gray!55] (.9,-1.2) rectangle (1.5,1.2); \filldraw[plusregion=green!30] (1.5,-1.2) rectangle (2.1,1.2); \end{scope} \draw[black,thick] (-.9,-1) -- (-.9,1); \draw[snake,thick] (.9,-1) -- (.9,1); \draw[DarkGreen,thick] (-1.5,-1) -- (-1.5,1); \draw[DarkGreen,thick] (1.5,-1) -- (1.5,1); \draw[DarkGreen,thick] (.3,-1) -- (.3,0); \draw[DarkGreen,thick] (-.3,-1) -- (-.3,0); \draw[DarkGreen,thick] (-1.5,0) -- (1.5,0); \filldraw[white] (-.9,0) circle (.08cm); \draw[thick] (-.9,0) circle (.08cm); \filldraw[white] (.9,0) circle (.08cm); \draw[thick] (.9,0) circle (.08cm); }\,. \] It is straightforward to verify $({\sf QSys}^\otimes_{\varphi,\psi})_Q$ is unitary. The following calculation shows ${\sf QSys}^\otimes_{\varphi,\psi}$ is a modification: \[ \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-1.2,-1.9) rectangle (1.8,1); \filldraw[primedregion=\PrColor] (-.6,-1.9) -- (-.6,-.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (0,0) -- (.05,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.5,.6) -- (-.5,1) -- (-1.2,1) -- (-1.2,-1.9); \filldraw[primedregion=green!30] (-.6,-1.9) -- (-.6,-.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (0,0) -- (.05,0) .. controls ++(-45:.2cm) and ++(90:.4cm) .. (.6,-.6) -- (.6,-1) -- (-.05,-1) -- (-.05,-1.9); \filldraw[boxregion=green!30] (-.05,-1.9) rectangle (1.15,-1); \filldraw[plusregion=\PrColor] (-.5,1) -- (-.5,.6) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.05,0) -- (0,0) .. controls ++(45:.2cm) and ++(270:.4cm) .. (.6,.6) -- (.6,1); \filldraw[plusregion=green!30] (1.15,-1.9) -- (1.15,-1) -- (.6,-1) -- (.6,-.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.05,0) -- (0,0) .. controls ++(45:.2cm) and ++(270:.4cm) .. (.6,.6) -- (.6,1) -- (1.8,1) -- (1.8,-1.9); \end{scope} \draw[\XColor,thick] (-.6,-1.9) -- (-.6,-.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (.6,.6) -- (.6,1); \draw[black,thick] (.5,-1) -- (.5,-.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (-.6,.6) -- (-.6,1); \draw[snake,thick] (.6,-1) -- (.6,-.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (-.5,.6) -- (-.5,1); \draw[black,thick] (-.05,-1) -- (-.05,-1.9); \draw[snake,thick] (1.15,-1) -- (1.15,-1.9); \roundNbox{unshaded}{(.55,-1)}{.33}{.6}{.6}{\scriptsize{$\left({\sf QSys}^\otimes_{\varphi,\psi}\right)_Q$}}; \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); } = \tikzmath[scale=.45, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-4.5,-5.1) rectangle (5.7,2.7); \filldraw[plusregion=green!30] (5.1,-5.1) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (4.5,-3.9) -- (3.9,-3.3) -- (3.9,-2.7) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.3,-1.5) -- (.9,.9) -- (.9,2.7) -- (5.7,2.7) -- (5.7,-5.1); \filldraw[plusregion=gray!55] (5.1,-5.1) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (4.5,-3.9) -- (3.9,-3.3) -- (3.9,-2.7) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.3,-1.5) -- (.9,.9) -- (-.9,-.9) -- (-.9,-5.1); \filldraw[boxregion=gray!55] (3.9,-5.1) .. controls ++(90:.4cm) and ++(45:.2cm) .. (3.3,-3.9) .. controls ++(135:.2cm) and ++(270:.4cm) .. (2.7,-2.7) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.1,-1.5) -- (.3,.3) -- (-.9,-.9) -- (-.9,-5.1); \filldraw[boxregion=green!30] (1.5,-5.1) rectangle (2.7,-4.5); \filldraw[primedregion=gray!55] (.3,-5.1) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.9,-3.9) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.5,-2.7) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.9,-1.5) -- (-.3,-.3) -- (-.9,-.9) -- (-.9,-5.1); \filldraw[primedregion=green!30] (-.9,-5.1) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-.3,-3.9) -- (.3,-3.3) -- (.3,-2.1) -- (-.9,-.9) -- (-2.1,-.9) -- (-2.1,-5.1); \filldraw[primedregion=\PrColor] (-2.1,-5.1) -- (-2.1,-2.7) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-1.5,-1.5) -- (-.9,-.9) -- (-3.3,1.5) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3.9,2.7) -- (-4.5,2.7) -- (-4.5,-5.1); \filldraw[primedregion=gray!30] (-.9,-.9) -- (-3.3,1.5) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3.9,2.7) -- (.9,2.7) -- (.9,.9); \filldraw[boxregion=gray!30] (-.3,-.3) -- (-2.1,1.5) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-2.7,2.7) -- (.9,2.7) -- (.9,.9); \filldraw[plusregion=gray!30] (.3,.3) -- (-.9,1.5) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.5,2.7) -- (.9,2.7) -- (.9,.9); \filldraw[plusregion=\PrColor] (.9,.9) -- (-.3,2.1) -- (-.3,2.7) -- (.9,2.7) -- (.9,.9); \end{scope} \draw[black,thick] (1.5,-2.7) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.9,-1.5) -- (-2.1,1.5) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-2.7,2.7); \draw[snake,thick] (2.7,-2.7) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.1,-1.5) -- (-.9,1.5) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.5,2.7); \draw[\XColor,thick] (-2.1,-5.1) -- (-2.1,-2.7) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-1.5,-1.5) -- (-.9,-.9) -- (.9,.9) -- (.9,2.7); \draw[\PsColor,thick] (-2.1,.3) -- (-.3,2.1); \draw[\PsColor,thick] (.9,.9) -- (-.3,2.1) -- (-.3,2.7); \draw[\PsColor,thick] (-.9,-.9) -- (-3.3,1.5) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3.9,2.7); \draw[DarkGreen,thick] (.3,-2.1) -- (2.1,-.3); \draw[DarkGreen,thick] (-.9,-.9) -- (.3,-2.1) -- (.3,-2.7); \draw[DarkGreen,thick] (3.9,-2.7) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.3,-1.5) -- (.9,.9); \filldraw[white] (-.3,-.3) circle (.1cm); \draw[thick] (-.3,-.3) circle (.1cm); \filldraw[white] (.3,.3) circle (.1cm); \draw[thick] (.3,.3) circle (.1cm); \filldraw[white] (-1.5,.9) circle (.1cm); \draw[thick] (-1.5,.9) circle (.1cm); \filldraw[white] (.9,-1.5) circle (.1cm); \draw[thick] (.9,-1.5) circle (.1cm); \filldraw[white] (1.5,-.9) circle (.1cm); \draw[thick] (1.5,-.9) circle (.1cm); \filldraw[white] (-.9,1.5) circle (.1cm); \draw[thick] (-.9,1.5) circle (.1cm); \draw[dashed,rounded corners = 5] (-1.2,-5.1) rectangle (5.4,-2.7); \draw[black,thick] (.3,-5.1) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.9,-3.9) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.5,-2.7); \draw[snake,thick] (3.9,-5.1) .. controls ++(90:.4cm) and ++(45:.2cm) .. (3.3,-3.9) .. controls ++(135:.2cm) and ++(270:.4cm) .. (2.7,-2.7) -- (2.7,-2.6); \draw[DarkGreen,thick] (-.9,-5.1) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-.3,-3.9) -- (.3,-3.3) -- (.3,-2.7); \draw[DarkGreen,thick] (5.1,-5.1) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (4.5,-3.9) -- (3.9,-3.3) -- (3.9,-2.7); \draw[DarkGreen,thick] (.3,-3.3) -- (1.5,-4.5) -- (1.5,-5.1); \draw[DarkGreen,thick] (3.9,-3.3) -- (2.7,-4.5) -- (2.7,-5.1); \draw[DarkGreen,thick] (1.5,-4.5) -- (2.7,-4.5); \filldraw[white] (.9,-3.9) circle (.1cm); \draw[thick] (.9,-3.9) circle (.1cm); \filldraw[white] (3.3,-3.9) circle (.1cm); \draw[thick] (3.3,-3.9) circle (.1cm); \filldraw[\XColor] (-.9,-.9) circle (.07cm); \filldraw[\XColor] (.9,.9) circle (.07cm); \filldraw[\PsColor] (-2.1,.3) circle (.07cm); \filldraw[\PsColor] (-.3,2.1) circle (.07cm); \filldraw[DarkGreen] (.3,-2.1) circle (.07cm); \filldraw[DarkGreen] (2.1,-.3) circle (.07cm); \filldraw[DarkGreen] (.3,-3.3) circle (.07cm); \filldraw[DarkGreen] (1.5,-4.5) circle (.07cm); \filldraw[DarkGreen] (2.7,-4.5) circle (.07cm); \filldraw[DarkGreen] (3.9,-3.3) circle (.07cm); } = \tikzmath[scale=.4, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-6.3,-3.3) rectangle (6.3,5.7); \filldraw[primedregion=gray!30] (-1.5,-1.5) -- (-.9,-.9) -- (-3.9,2.1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-4.5,3.3) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-3.9,4.5) .. controls ++(45:.2cm) and ++(270:.4cm) .. (-3.3,5.7) -- (-5.7,5.7) -- (-5.7,-1.5); \filldraw[primedregion=\PrColor] (-1.5,-3.3) -- (-1.5,-1.5) -- (-5.1,2.1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-5.7,3.3) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-5.1,4.5) -- (-4.5,5.1) -- (-4.5,5.7) -- (-6.3,5.7) -- (-6.3,-3.3); \filldraw[boxregion=gray!30] (-.9,-.9) -- (-3.9,2.1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-4.5,3.3).. controls ++(90:.4cm) and ++(-135:.2cm) .. (-3.9,4.5) .. controls ++(45:.2cm) and ++(270:.4cm) .. (-3.3,5.7) -- (-.9,5.7) -- (1.5,5.7) -- (1.5,1.5); \filldraw[boxregion=\PrColor] (-.3,-.3) -- (-2.7,2.1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3.3,3.3) -- (-3.3,3.9) -- (-2.1,3.9) -- (.3,.3); \filldraw[boxregion=gray!30] (.3,.3) -- (-1.5,2.1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-2.1,3.3) -- (-2.1,3.9) -- (-1.5,4.5) -- (1.5,1.5); \filldraw[plusregion=gray!30] (.9,.9) -- (-.3,2.1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.9,3.3) .. controls ++(90:.4cm) and ++(45:.2cm) .. (-1.5,4.5) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-2.1,5.7) -- (-.9,5.7) -- (1.5,5.7) -- (1.5,1.5); \filldraw[plusregion=\PrColor] (.3,2.7) -- (.3,3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (-.3,4.5) -- (-.9,5.1) -- (-.9,5.7) -- (1.5,5.7) -- (1.5,1.5); \filldraw[boxregion=gray!55] (4.5,-3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.9,-2.1) -- (.9,.9) -- (-1.5,-1.5) -- (-1.5,-3.3); \filldraw[boxregion=green!30] (3.3,-3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.7,-2.1) -- (.3,.3) -- (-1.5,-1.5) -- (-1.5,-3.3); \filldraw[boxregion=gray!55] (2.1,-3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.5,-2.1) -- (-.3,-.3) -- (-1.5,-1.5) -- (-1.5,-3.3); \filldraw[primedregion=gray!55] (.9,-3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.3,-2.1) -- (-.9,-.9) -- (-1.5,-1.5) -- (-1.5,-3.3); \filldraw[primedregion=green!30] (-1.5,-3.3) -- (-1.5,-1.5) -- (-.3,-2.7) -- (-.3,-3.3); \filldraw[plusregion=gray!55] (4.5,-3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.9,-2.1) -- (.9,.9) -- (1.5,1.5) -- (5.7,1.5) -- (5.7,-3.3); \filldraw[plusregion=green!30] (5.7,-3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (5.1,-2.1) -- (1.5,1.5) -- (1.5,5.7) -- (6.3,5.7) -- (6.3,-3.3); \end{scope} \draw[black,thick] (.9,-3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.3,-2.1) -- (-3.9,2.1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-4.5,3.3); \draw[snake,thick] (4.5,-3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (3.9,-2.1) -- (-.3,2.1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.9,3.3); \draw[\XColor,thick] (-1.5,-3.3) -- (-1.5,-1.5) -- (1.5,1.5) -- (1.5,5.7); \draw[\PsColor,thick] (-2.7,-.3) -- (-1.5,.9); \draw[\PsColor,thick] (-.9,1.5) -- (.3,2.7); \draw[\PsColor,thick] (1.5,1.5) -- (.3,2.7) -- (.3,3.3); \draw[\PsColor,thick] (-1.5,-1.5) -- (-5.1,2.1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-5.7,3.3); \draw[\PsColor,thick] (-.3,-.3) -- (-2.7,2.1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3.3,3.3); \draw[\PsColor,thick] (.3,.3) -- (-1.5,2.1) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-2.1,3.3); \draw[DarkGreen,thick] (-.3,-2.7) -- (.9,-1.5); \draw[DarkGreen,thick] (1.5,-.9) -- (2.7,.3); \draw[DarkGreen,thick] (-1.5,-1.5) -- (-.3,-2.7) -- (-.3,-3.3); \draw[DarkGreen,thick] (2.1,-3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.5,-2.1) -- (-.3,-.3); \draw[DarkGreen,thick] (3.3,-3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.7,-2.1) -- (.3,.3); \draw[DarkGreen,thick] (5.7,-3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (5.1,-2.1) -- (1.5,1.5); \filldraw[white] (-.9,-.9) circle (.1cm); \draw[thick] (-.9,-.9) circle (.1cm); \filldraw[white] (.9,.9) circle (.1cm); \draw[thick] (.9,.9) circle (.1cm); \filldraw[white] (-2.1,.3) circle (.1cm); \draw[thick] (-2.1,.3) circle (.1cm); \filldraw[white] (.3,-2.1) circle (.1cm); \draw[thick] (.3,-2.1) circle (.1cm); \filldraw[white] (2.1,-.3) circle (.1cm); \draw[thick] (2.1,-.3) circle (.1cm); \filldraw[white] (-.3,2.1) circle (.1cm); \draw[thick] (-.3,2.1) circle (.1cm); \draw[dashed,rounded corners = 5] (-6,3.3) rectangle (.6,5.7); \draw[black,thick] (-4.5,3.3) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-3.9,4.5) .. controls ++(45:.2cm) and ++(270:.4cm) .. (-3.3,5.7); \draw[snake,thick] (-.9,3.3) .. controls ++(90:.4cm) and ++(45:.2cm) .. (-1.5,4.5) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-2.1,5.7); \draw[\PsColor,thick] (-5.7,3.3) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-5.1,4.5) -- (-4.5,5.1); \draw[\PsColor,thick] (.3,3.3) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (-.3,4.5) -- (-.9,5.1); \draw[\PsColor,thick] (-2.1,3.3) -- (-2.1,3.9) -- (-.9,5.1) -- (-.9,5.7); \draw[\PsColor,thick] (-3.3,3.3) -- (-3.3,3.9) -- (-4.5,5.1) -- (-4.5,5.7); \draw[\PsColor,thick] (-2.1,3.9) -- (-3.3,3.9); \filldraw[white] (-1.5,4.5) circle (.1cm); \draw[thick] (-1.5,4.5) circle (.1cm); \filldraw[white] (-3.9,4.5) circle (.1cm); \draw[thick] (-3.9,4.5) circle (.1cm); \filldraw[\XColor] (-1.5,-1.5) circle (.07cm); \filldraw[\XColor] (-.3,-.3) circle (.07cm); \filldraw[\XColor] (.3,.3) circle (.07cm); \filldraw[\XColor] (1.5,1.5) circle (.07cm); \filldraw[\PsColor] (-2.7,-.3) circle (.07cm); \filldraw[\PsColor] (-1.5,.9) circle (.07cm); \filldraw[\PsColor] (-.9,1.5) circle (.07cm); \filldraw[\PsColor] (.3,2.7) circle (.07cm); \filldraw[\PsColor] (-4.5,5.1) circle (.07cm); \filldraw[\PsColor] (-3.3,3.9) circle (.07cm); \filldraw[\PsColor] (-2.1,3.9) circle (.07cm); \filldraw[\PsColor] (-.9,5.1) circle (.07cm); \filldraw[DarkGreen] (-.3,-2.7) circle (.07cm); \filldraw[DarkGreen] (.9,-1.5) circle (.07cm); \filldraw[DarkGreen] (1.5,-.9) circle (.07cm); \filldraw[DarkGreen] (2.7,.3) circle (.07cm); } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-1.8,-1.8) rectangle (1.8,2.7); \filldraw[primedregion=\PrColor] (-1.2,-1.8) -- (-1.2,-1.4) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (-.6,-.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.2,-.2) -- (-1.2,1.8) -- (-.55,1.8) -- (-.55,2.7) -- (-1.8,2.7) -- (-1.8,-1.8); \filldraw[primedregion=green!30](0,-1.8) -- (0,-1.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (-.6,-.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (-1.2,-1.4) -- (-1.2,-1.8); \filldraw[boxregion=\PrColor] (-1.2,1.8) -- (-1.2,-.2) .. controls ++(270:.4cm) and ++(135:.2cm) .. (-.6,-.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (0,-.2) -- (0,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.4) -- (0,1.8); \filldraw[boxregion=green!30] (0,-1.8) -- (0,-1.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (-.6,-.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (0,-.2) -- (0,.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,.8) .. controls ++(-45:.2cm) and ++(90:.4cm) .. (1.2,.2) -- (1.2,-1.8); \filldraw[plusregion=\PrColor] (-.55,2.7) -- (-.55,1.8) -- (0,1.8) -- (0,1.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.4) -- (1.2,2.7); \filldraw[plusregion=green!30] (1.2,-1.8) -- (1.2,.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.4) -- (1.2,2.7) -- (1.8,2.7) -- (1.8,-1.8); \end{scope} \draw[\XColor,thick] (-1.2,-1.8) -- (-1.2,-1.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,-.2) -- (0,.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.4) -- (1.2,2.7); \draw[black,thick] (0,-1.8) -- (0,-1.4) .. controls ++(90:.6cm) and ++(270:.6cm) .. (-1.2,-.2) -- (-1.2,1.8); \draw[snake,thick] (1.2,-1.8) -- (1.2,.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.4) -- (0,1.8); \draw[black,thick] (-.65,1.8) -- (-.65,2.7); \draw[snake,thick] (-.55,1.8) -- (-.55,2.7); \roundNbox{unshaded}{(-.6,1.8)}{.33}{.6}{.6}{\scriptsize{$\left({\sf QSys}^\otimes_{\varphi,\psi}\right)_P$}}; \filldraw[white] (-.6,-.8) circle (.1cm); \draw[thick] (-.6,-.8) circle (.1cm); \filldraw[white] (.6,.8) circle (.1cm); \draw[thick] (.6,.8) circle (.1cm); }\,. \] Finally, we check the monoidality coherence axiom for ${\sf QSys}^\otimes_{\bullet,\bullet}$, and we leave ${\sf QSys}^\otimes_{\bullet}$ to the reader: \[ \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners=5pt] (-1.9,.6) rectangle (1.7,7.4); \filldraw[primedregion=green!30] (-1.9,0) rectangle (1.7,8); \filldraw[plusregion=green!30] (1,0) rectangle (-.4,4); \filldraw[boxregion=green!30] (-1,0) rectangle (0,2); \filldraw[boxregion=green!30] (-.6,2) rectangle (-.4,4); \filldraw[boxregion=green!30] (-.2,4) rectangle (0,8); \filldraw[plusregion=green!30] (0,4) rectangle (.2,8); \filldraw[starregion=green!30] (1,0) -- (1,4) -- (.2,4) -- (.2,8) -- (1.7,8) -- (1.7,0); \end{scope} \draw[black,thick] (-1,.6) -- (-1,2); \draw[snake,thick] (0,.6) -- (0,2); \draw[black,thick] (-.6,2) -- (-.6,4); \draw[snake,thick] (-.4,2) -- (-.4,4); \draw[saw,thick] (1,.6) -- (1,4); \draw[black,thick] (-.2,4) -- (-.2,7.4); \draw[snake,thick] (0,4) -- (0,7.4); \draw[saw,thick] (.2,4) -- (.2,7.4); \roundNbox{unshaded}{(0,6)}{.6}{.95}{.75}{\normalsize{$\left({\sf QSys}(\alpha^\otimes_{\varphi,\psi,\gamma})\right)_Q$}}; \roundNbox{unshaded}{(0,4)}{.6}{.95}{.75}{\normalsize{$\left({\sf QSys}^\otimes_{\varphi\otimes\psi,\gamma}\right)_Q$}}; \roundNbox{unshaded}{(-.5,2)}{.6}{.45}{.45}{\normalsize{$\left({\sf QSys}^\otimes_{\varphi,\psi}\right)_Q$}}; \draw[thin, dotted, rounded corners = 5pt] (-1.9,.6) rectangle (1.7,7.4); } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.2,-2.9) rectangle (2.2,1.2); \filldraw[primedregion=green!30] (-2.2,-2.9) rectangle (-1.6,1.2); \filldraw[primedregion=gray!55] (-1.6,-2.9) rectangle (-1.2,1.2); \filldraw[boxregion=gray!55] (-1.2,-2.9) rectangle (0,1.2); \filldraw[boxregion=green!30] (-.8,-2.9) rectangle (-.4,-2.3); \filldraw[plusregion=gray!55] (0,-2.9) rectangle (1.2,1.2); \filldraw[plusregion=green!30] (.4,-2.9) rectangle (.8,-1.5); \filldraw[starregion=gray!55] (1.2,-2.9) rectangle (1.6,1.2); \filldraw[starregion=green!30] (1.6,-2.9) rectangle (2.2,1.2); \end{scope} \draw[black,thick] (-1.2,-2.9) -- (-1.2,1.2); \draw[snake,thick] (0,-2.9) -- (0,1.2); \draw[saw,thick] (1.2,-2.9) -- (1.2,1.2); \draw[DarkGreen,thick] (-1.6,-2.9) -- (-1.6,1.2); \draw[DarkGreen,thick] (1.6,-2.9) -- (1.6,1.2); \draw[DarkGreen,thick] (.4,-2.9) -- (.4,-1.5); \draw[DarkGreen,thick] (.8,-2.9) -- (.8,-1.5); \draw[DarkGreen,thick] (-.4,-2.9) -- (-.4,-2.3); \draw[DarkGreen,thick] (-.8,-2.9) -- (-.8,-2.3); \draw[DarkGreen,thick] (-1.6,.7) -- (1.6,.7); \draw[DarkGreen,thick] (-1.6,-.7) -- (1.6,-.7); \draw[DarkGreen,thick] (-1.6,-1.5) -- (1.6,-1.5); \draw[DarkGreen,thick] (-1.6,-2.3) -- (.4,-2.3); \filldraw[white] (-1.2,.7) circle (.1cm); \draw[thick] (-1.2,.7) circle (.1cm); \filldraw[white] (-1.2,-.7) circle (.1cm); \draw[thick] (-1.2,-.7) circle (.1cm); \filldraw[white] (-1.2,-1.5) circle (.1cm); \draw[thick] (-1.2,-1.5) circle (.1cm); \filldraw[white] (-1.2,-2.3) circle (.1cm); \draw[thick] (-1.2,-2.3) circle (.1cm); \filldraw[white] (0,.7) circle (.1cm); \draw[thick] (0,.7) circle (.1cm); \filldraw[white] (0,-.7) circle (.1cm); \draw[thick] (0,-.7) circle (.1cm); \filldraw[white] (0,-1.5) circle (.1cm); \draw[thick] (0,-1.5) circle (.1cm); \filldraw[white] (0,-2.3) circle (.1cm); \draw[thick] (0,-2.3) circle (.1cm); \filldraw[white] (1.2,.7) circle (.1cm); \draw[thick] (1.2,.7) circle (.1cm); \filldraw[white] (1.2,-.7) circle (.1cm); \draw[thick] (1.2,-.7) circle (.1cm); \filldraw[white] (1.2,-1.5) circle (.1cm); \draw[thick] (1.2,-1.5) circle (.1cm); \roundNbox{unshaded}{(0,0)}{.33}{1.1}{1.1}{\scriptsize{$\left(\alpha^\otimes_{\varphi,\psi,\gamma}\right)_b$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.2,-1.2) rectangle (2.2,1.2); \filldraw[primedregion=green!30] (-2.2,-1.2) rectangle (-1.6,1.2); \filldraw[primedregion=gray!55] (-1.6,-1.2) rectangle (-1.2,1.2); \filldraw[boxregion=gray!55] (-1.2,-1.2) rectangle (0,1.2); \filldraw[boxregion=green!30] (-.8,-1.2) rectangle (-.4,-.7); \filldraw[plusregion=gray!55] (0,-1.2) rectangle (1.2,1.2); \filldraw[plusregion=green!30] (.4,-1.2) rectangle (.8,-.7); \filldraw[starregion=gray!55] (1.2,-1.2) rectangle (1.6,1.2); \filldraw[starregion=green!30] (1.6,-1.2) rectangle (2.2,1.2); \end{scope} \draw[black,thick] (-1.2,-1.2) -- (-1.2,1.2); \draw[snake,thick] (0,-1.2) -- (0,1.2); \draw[saw,thick] (1.2,-1.2) -- (1.2,1.2); \draw[DarkGreen,thick] (-1.6,-1.2) -- (-1.6,1.2); \draw[DarkGreen,thick] (1.6,-1.2) -- (1.6,1.2); \draw[DarkGreen,thick] (-.4,-1.2) -- (-.4,-.7); \draw[DarkGreen,thick] (-.8,-1.2) -- (-.8,-.7); \draw[DarkGreen,thick] (.4,-1.2) -- (.4,-.7); \draw[DarkGreen,thick] (.8,-1.2) -- (.8,-.7); \draw[DarkGreen,thick] (-1.6,.7) -- (1.6,.7); \draw[DarkGreen,thick] (-1.6,-.7) -- (1.6,-.7); \filldraw[white] (-1.2,.7) circle (.1cm); \draw[thick] (-1.2,.7) circle (.1cm); \filldraw[white] (-1.2,-.7) circle (.1cm); \draw[thick] (-1.2,-.7) circle (.1cm); \filldraw[white] (0,.7) circle (.1cm); \draw[thick] (0,.7) circle (.1cm); \filldraw[white] (0,-.7) circle (.1cm); \draw[thick] (0,-.7) circle (.1cm); \filldraw[white] (1.2,.7) circle (.1cm); \draw[thick] (1.2,.7) circle (.1cm); \filldraw[white] (1.2,-.7) circle (.1cm); \draw[thick] (1.2,-.7) circle (.1cm); \roundNbox{unshaded}{(0,0)}{.33}{1.1}{1.1}{\scriptsize{$\left(\alpha^\otimes_{\varphi,\psi,\gamma}\right)_b$}}; } = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.2,-1.2) rectangle (2.2,2.9); \filldraw[primedregion=green!30] (-2.2,-1.2) rectangle (-1.6,2.9); \filldraw[primedregion=gray!55] (-1.6,-1.2) rectangle (-1.2,2.9); \filldraw[boxregion=gray!55] (-1.2,-1.2) rectangle (0,2.9); \filldraw[boxregion=green!30] (-.8,-1.2) rectangle (-.4,-.7); \filldraw[boxregion=green!30] (-.8,.7) rectangle (-.4,2.3); \filldraw[plusregion=gray!55] (0,-1.2) rectangle (1.2,2.9); \filldraw[plusregion=green!30] (.4,-1.2) rectangle (.8,-.7); \filldraw[plusregion=green!30] (.4,.7) rectangle (.8,1.5); \filldraw[starregion=gray!55] (1.2,-1.2) rectangle (1.6,2.9); \filldraw[starregion=green!30] (1.6,-1.2) rectangle (2.2,2.9); \end{scope} \draw[black,thick] (-1.2,-1.2) -- (-1.2,2.9); \draw[snake,thick] (0,-1.2) -- (0,2.9); \draw[saw,thick] (1.2,-1.2) -- (1.2,2.9); \draw[DarkGreen,thick] (-1.6,-1.2) -- (-1.6,2.9); \draw[DarkGreen,thick] (1.6,-1.2) -- (1.6,2.9); \draw[DarkGreen,thick] (-.4,-1.2) -- (-.4,-.7); \draw[DarkGreen,thick] (-.8,-1.2) -- (-.8,-.7); \draw[DarkGreen,thick] (.4,-1.2) -- (.4,-.7); \draw[DarkGreen,thick] (.8,-1.2) -- (.8,-.7); \draw[DarkGreen,thick] (-.4,.7) -- (-.4,2.3); \draw[DarkGreen,thick] (-.8,.7) -- (-.8,2.3); \draw[DarkGreen,thick] (.4,.7) -- (.4,1.5); \draw[DarkGreen,thick] (.8,.7) -- (.8,1.5); \draw[DarkGreen,thick] (-1.6,.7) -- (1.6,.7); \draw[DarkGreen,thick] (-1.6,-.7) -- (1.6,-.7); \draw[DarkGreen,thick] (-1.6,2.3) -- (1.6,2.3); \draw[DarkGreen,thick] (-.4,1.5) -- (1.6,1.5); \filldraw[white] (-1.2,.7) circle (.1cm); \draw[thick] (-1.2,.7) circle (.1cm); \filldraw[white] (-1.2,-.7) circle (.1cm); \draw[thick] (-1.2,-.7) circle (.1cm); \filldraw[white] (-1.2,2.3) circle (.1cm); \draw[thick] (-1.2,2.3) circle (.1cm); \filldraw[white] (0,.7) circle (.1cm); \draw[thick] (0,.7) circle (.1cm); \filldraw[white] (0,-.7) circle (.1cm); \draw[thick] (0,-.7) circle (.1cm); \filldraw[white] (0,1.5) circle (.1cm); \draw[thick] (0,1.5) circle (.1cm); \filldraw[white] (0,2.3) circle (.1cm); \draw[thick] (0,2.3) circle (.1cm); \filldraw[white] (1.2,.7) circle (.1cm); \draw[thick] (1.2,.7) circle (.1cm); \filldraw[white] (1.2,-.7) circle (.1cm); \draw[thick] (1.2,-.7) circle (.1cm); \filldraw[white] (1.2,1.5) circle (.1cm); \draw[thick] (1.2,1.5) circle (.1cm); \filldraw[white] (1.2,2.3) circle (.1cm); \draw[thick] (1.2,2.3) circle (.1cm); \roundNbox{unshaded}{(0,0)}{.33}{1.1}{1.1}{\scriptsize{$\left(\alpha^\otimes_{\varphi,\psi,\gamma}\right)_b$}}; } = \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.3,.6) rectangle (2.5,7.4); \filldraw[primedregion=green!30] (-2.4,0) rectangle (2.6,8); \filldraw[boxregion=green!30] (-1,0) rectangle (.4,6); \filldraw[plusregion=green!30] (0,0) rectangle (1,4); \filldraw[plusregion=green!30] (.6,4) rectangle (.4,6); \filldraw[boxregion=green!30] (-.2,6) rectangle (0,8); \filldraw[plusregion=green!30] (0,6) rectangle (.2,8); \filldraw[starregion=green!30] (1,0) -- (1,4) -- (.6,4) -- (.6,6) -- (.2,6) -- (.2,8) -- (2.6,8) -- (2.6,.6); \end{scope} \draw[black,thick] (-1,.6) -- (-1,6); \draw[snake,thick] (0,.6) -- (0,4); \draw[saw,thick] (1,.6) -- (1,4); \draw[snake,thick] (.4,4) -- (.4,6); \draw[saw,thick] (.6,4) -- (.6,6); \draw[black,thick] (-.2,6) -- (-.2,7.4); \draw[snake,thick] (0,6) -- (0,7.4); \draw[saw,thick] (.2,6) -- (.2,7.4); \roundNbox{unshaded}{(0,6)}{.6}{.75}{.95}{\normalsize{$\left({\sf QSys}^\otimes_{\varphi,\psi\otimes\gamma}\right)_Q$}}; \roundNbox{unshaded}{(.5,4)}{.6}{.45}{.45}{\normalsize{$\left({\sf QSys}^\otimes_{\psi,\gamma}\right)_Q$}}; \roundNbox{unshaded}{(0,2)}{.6}{1.3}{1.5}{\small{$\left(\alpha^\otimes_{{\sf QSys}(\varphi),{\sf QSys}(\psi),{\sf QSys}(\gamma)}\right)_Q$}}; \draw[thin, dotted, rounded corners = 5pt] (-2.3,.6) rectangle (2.5,7.4); } \,. \] \end{construction} Constructions \ref{construction:Qsys(F)}, \ref{construction:QSys(phi)}, \ref{construction:QSys(n)}, and \ref{construction:QSys-otimes} immediately imply the following proposition. \begin{prop} ${\sf QSys}$ as defined above is a 2-functor on every hom 2-category ${\sf Fun}^\dag(\cA\to \cB)$. \end{prop} \begin{lem} \label{lem:Strict1Associator} For $F\in 2{\mathsf{Cat}}(\cA\to\cB)$ and $G\in 2{\mathsf{Cat}}(\cB\to \cC)$, ${\sf QSys}(G)\circ{\sf QSys}(F)={\sf QSys}(G\circ F)$. \end{lem} \begin{proof} By Constructions \ref{const:1CompositionIn2Cat} and \ref{construction:Qsys(F)}, for a 0-cell $Q\in{\sf QSys}(\cA)$, $$ {\sf QSys}(G\circ F)(Q)=G(F(Q))={\sf QSys}(G)({\sf QSys}(F)(Q))=[{\sf QSys}(G)\circ {\sf QSys}(F)](Q), $$ for a 1-cell $X\in{\sf QSys}(\cA)(P\to Q)$, $$ {\sf QSys}(G\circ F)(X)=G(F(X))=[{\sf QSys}(G)\circ {\sf QSys}(F)](X), $$ and for a 2-cell $f\in{\sf QSys}(\cA)(X\Rightarrow Y)$, $$ {\sf QSys}(G\circ F)(f)=G(F(f))=[{\sf QSys}(G)\circ {\sf QSys}(F)](f). $$ For a 0-cell $Q\in{\sf QSys}(\cA)$, ${\sf QSys}(F)^1_Q=\id$, so ${\sf QSys}(G\circ F)^1_Q=\id=({\sf QSys}(G)\circ{\sf QSys}(F))^1_Q$. For 1-cells $X\in{\sf QSys}(\cA)(P\to Q)$ and $Y\in{\sf QSys}(\cA)(Q\to R)$, we have \begin{small} \begin{align*} ({\sf QSys}(G)&\circ{\sf QSys}(F))^2_{X,Y} \\ &= {\sf QSys}(G)({\sf QSys}(F)^2_{X,Y})\star {\sf QSys}(G)^2_{{\sf QSys}(F)(X),{\sf QSys}(F)(Y)} \\ & = {\sf QSys}(G)(F(u^Q_{X,Y})\star F^2_{X,Y}\star (u^{F(Q)}_{F(X),F(Y)})^\dag)\star {\sf QSys}(G)^2_{F(X),F(Y)} \\ & = G(F(u^Q_{X,Y}))\star G(F^2_{X,Y})\star G({(u^{F(Q)}_{F(X),F(Y)})}^\dag)\star G(u^{F(Q)}_{F(X),F(Y)}) \star G^2_{F(X),F(Y)}\star {(u^{G(F(Q))}_{G(F(X)),G(F(Y))})}^\dag \\ & = G(F(u^Q_{X,Y}))\star G(F^2_{X,Y})\star G^2_{F(X),F(Y)}\star {(u^{G(F(Q))}_{G(F(X)),G(F(Y))})}^\dag \\ & = (G\circ F)(u^Q_{X,Y})\star (G\circ F)^2_{X,Y}\star {(u^{(G\circ F)(Q)}_{(G\circ F)(X),(G\circ F)(Y)})}^\dag \\ & = {\sf QSys}(G\circ F)^2_{X,Y}. \end{align*} \end{small} Hence ${\sf QSys}(G)\circ{\sf QSys}(F)={\sf QSys}(G\circ F)$ as claimed. \end{proof} By Lemma \ref{lem:Strict1Associator}, we may define each ${\sf QSys}^\circ_{G,F}:{\sf QSys}(G)\circ{\sf QSys}(F)\Rightarrow {\sf QSys}(G\circ F)$ to be the identity transformation, and we may define each 1-associator modification $\omega^\circ_{H,G,F}$ to be the identity modification, as well as each unitor modification $\ell^\circ_F$ and $r^\circ_G$. Theorem \ref{thm:QSys3Functor} follows immediately, i.e., ${\sf QSys}$ is a $\dag$ 3-functor. \qed \begin{rem} Since 1-composition is strict in $2{\mathsf{Cat}}$, 2-categories and 2-functors form a 1-category where we forget all transformations and modifications. (Observe we have \emph{not} truncated, as this would identify equivalent 2-functors.) Lemma \ref{lem:Strict1Associator} shows that ${\sf QSys}$ is a functor on this 1-category. \end{rem} \begin{rem} It was pointed out to us by Thibault D\'ecoppet and David Reutter that our $\dag$ 3-endofunctor ${\sf QSys}$ on $\rm C^*/W^*$ $2{\mathsf{Cat}}$ should be left 3-adjoint to the inclusion of the full 3-subcategory on the Q-system complete $\rm C^*/W^*$ 2-categories. We will not prove this here as it would take us too far afield. We note, however, that this would endow ${\sf QSys}$ with the structure of a symmetric lax monoidal $\dag$ 3-functor. \end{rem} \section{Universal property of Q-system completion} \label{sec:UniversalProperty} In this section, we give the strongest possible universal property which is satisfied by Q-system completion. Namely, we prove Theorem \ref{thm:UniqueLift}, which states that the \emph{lift 2-category} of a $\dag$ 2-functor $F: \cC\to \cD$ from a $\rm C^*/W^*$ 2-category $\cC$ into a Q-system complete $\rm C^*/W^*$ 2-category $\cD$ is \emph{$(-2)$-truncated}, i.e., equivalent to a point. We now define the necessary terms to prove this theorem, and we explain the proof strategy from \cite[\S3.1]{1910.03178}. \subsection{Lift categories and homotopy fibers} \begin{defn} \label{defn:Lift2Category} Suppose $\cC,\cD,\cE$ are $\rm C^*/W^*$ 2-categories and $F: \cC\to \cD$ and $G: \cC \to \cE$ are $\dag$ 2-functors. The \emph{lift 2-category} of $F$ along $G$ is the \emph{homotopy fiber} 2-category of the pre-composition 2-functor $-\circ G : {\sf Fun}^\dag(\cE\to \cD) \to {\sf Fun}^\dag(\cC\to \cD)$ at $F\in {\sf Fun}^\dag(\cC\to \cD)$. We remind the reader that the definition of $-\circ-$ in $2{\mathsf{Cat}}$ is detailed in Construction \ref{const:1CompositionIn2Cat} above. \end{defn} \begin{rem} We now further unpack Defintion \ref{defn:Lift2Category}. The lift 2-category of $F$ along $G$ has: \begin{itemize} \item \underline{objects:} pairs $(A,\alpha)$, where $A:\cE\to \cD$ is a $\dag$ 2-functor and $\alpha:F\Rightarrow A\circ G$ is a unitary 2-transformation. \[ \begin{tikzcd}[row sep=1.2em] \cC\arrow[rr, "G"] \arrow[ddrr, swap, "F"] && \cE\arrow[dd,"A"] \arrow[dl,Leftarrow,shorten <= 1em, shorten >= 1em, "\alpha"] \\ &\mbox{}& \\ && \cD. \end{tikzcd} \] \item \underline{1-morphisms:} pairs $(\varphi,m):(A,\alpha)\to(B,\beta)$, where $\varphi:A\Rightarrow B$ is a $\dag$ 2-transformation and $m:\beta\Rrightarrow \alpha\otimes (\varphi\circ G)$ is a unitary 2-modification: \[ \begin{tikzcd}[row sep=1.2em] \cC\arrow[rr, "G"] \arrow[ddrr, swap, "F"] && \cE\arrow[dd,"B"] \arrow[dl,Leftarrow,shorten <= 1em, shorten >= 1em, "\beta"] \\ &\mbox{}& \\ && \cD. \end{tikzcd} \qquad \overset{m}{\Rrightarrow} \qquad \begin{tikzcd}[row sep=1.2em] \cC\arrow[rr, "G"] \arrow[ddrr, swap, "F"] && \cE\arrow[dd,swap,"A"] \arrow[dl,Leftarrow,shorten <= 1em, shorten >= 1em, "\alpha"] \arrow[dd,bend left = 90, "B"] \\ &\mbox{}& \arrow[r,Rightarrow, shorten >= 1.5em, "\!\!\!\!\!\!\!\!\!\!\!\varphi"] &\mbox{} \\ && \cD \end{tikzcd} \] \item \underline{2-morphisms:} $p:(\varphi,m) \Rightarrow (\psi,n)$, where $p:\varphi\Rrightarrow\psi$ is a $\dag$ 2-modification such that \[ \begin{tikzcd}[row sep=1.2em] F\arrow[rr,Rightarrow, "\alpha"] \arrow[ddrr, Rightarrow,swap, "\beta"] && A\circ G\arrow[dd,Rightarrow, "\psi\circ G"] \\ & \mbox{} \arrow[ur,triplecd,shorten <= 1em, shorten >= 1em, "n"] & \\ && B\circ G. \end{tikzcd} \qquad = \qquad \begin{tikzcd}[row sep=1.2em] F\arrow[rr,Rightarrow, "\alpha"] \arrow[ddrr, Rightarrow,swap, "\beta"] && A\circ G\arrow[dd,swap,Rightarrow, "\varphi\circ G"] \arrow[dd,Rightarrow, bend left = 90, "\psi \circ G"] \\ & \arrow[ur,triplecd,shorten <= 1em, shorten >= 1em, "m"] \mbox{} & \arrow[r,triplecd,shorten >=1.5em, "\!\!\!\!\!\!\!\!\!\!\!p \circ G"] &\mbox{} \\ && B\circ G \end{tikzcd} \] \end{itemize} \end{rem} Recall that for a 2-category $\cC$, its \emph{core} is the 2-subcategory $\core(\cC)$ with only invertible 1-cells and invertible 2-cells. When $\cC$ is $\rm C^*/W^*$, its \emph{unitary core} $\core^\dag(\cC)$ is the 2-subcategory of $\core(\cC)$ with only unitary 2-cells. In a $\rm C^*/W^*$ 2-category, by polar decomposition for invertible 2-cells, there exists an invertible 2-cell $\cC({}_aX_b\Rightarrow {}_aY_b)$ if and only if there exists a unitary 2-cell, so the connectivity of $\core(\cC)$ and $\core^\dag(\cC)$ agree. We pass to cores in order to utilize the notion of $k$-truncated 2-functor between 2-groupoids from \cite[\S3.1]{1910.03178}. \begin{defn}[{cf.~\cite[Def.~3.3]{1910.03178}}] Suppose $\cC, \cD$ are $2$-groupoids and $U: \cC \to \cD$ is a 2-functor. We call $U$ $k$-\emph{truncated} or $(k+1)$-\emph{monic} \cite[\S5.5]{MR2664619} if $U$ is: \begin{itemize} \item $k=2$: (no condition) \item $k=1$: faithful on $2$-cells \item $k=0$: fully faithful on $2$-cells \item $k=-1$: an equivalence on hom-categories \item $k=-2$: an equivalence of 2-categories. \end{itemize} \end{defn} The following proposition connects the notions of a $k$-truncated 2-functor between 2-groupoids and its homotopy fibers. \begin{prop}[{cf.~\cite[Prop.~3.4]{1910.03178}}] \label{prop:TruncatedGroupoids} Suppose $\cC,\cD$ are $2$-groupoids, and $U: \cC \to \cD$ is a 2-functor. For every $-2\leq k\leq 2$, $U$ is $k$-truncated if and only if at each object $d\in \cD$, the homotopy fiber $\mathsf{hoFib}_d(U)$ is $k$-truncated as an $2$-groupoid, i.e., a $k$-groupoid.\footnote{We use `negative categorical thinking' \cite{MR2664619} when $k=-2,-1,0$. That is, a $0$-groupoid is a set, a $(-1)$-groupoid is either a point or the empty set, and a $(-2)$-groupoid is a point.} \end{prop} \subsection{Dominance and truncation} \label{sec:Dominance} Observe that $-\circ G: {\sf Fun}^\dag(\cE \to \cD) \to {\sf Fun}^\dag(\cC \to \cD)$ restricts to a $\dag$ 2-functor $-\circ G: \core^\dag({\sf Fun}^\dag(\cE \to \cD)) \to \core^\dag({\sf Fun}^\dag(\cC \to \cD))$. Hence, in order to apply Proposition \ref{prop:TruncatedGroupoids} to the $\dag$ 2-functor $-\circ G$, we need (essential) surjectivity conditions on $-\circ G$. (Being faithful on 2-morphisms is being surjective on equalities between 2-morphisms.) A suitable notion of (essential) surjectivity for a linear 2-functor is \emph{dominance}, which we define via the notion of \emph{condensation} in a 2-category \cite{1905.09566}. \begin{defn} \label{defn:Condensation} Suppose $\cC$ is a 2-category and $a,b\in \cC$ are 0-cells. A \emph{condensation} $X: a\mathrel{\,\hspace{.75ex}\joinrel\rhook\joinrel\hspace{-.75ex}\joinrel\rightarrow} b$ consists of 1-cells ${}_aX_b, {}_bX_a^{\bullet}$ and 2-cells $\varepsilon_X : {}_bX^\bullet\otimes_a X_b\to 1_b$ and $\delta_X: 1_b \to {}_bX^\bullet\otimes_a X_b$ such that $\varepsilon_X\star \delta_X = 1_{1_b}$. Graphically, we denote $X: a\mathrel{\,\hspace{.75ex}\joinrel\rhook\joinrel\hspace{-.75ex}\joinrel\rightarrow} b$ by \[ \tikzmath{\filldraw[gray!30, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=a \qquad\qquad \tikzmath{\filldraw[gray!55, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=b \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[gray!30] (0,0) rectangle (-.3,.6); \fill[gray!55] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, \XColor] (0,0) -- (0,.6); } ={}_{a}X_b \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[gray!55] (0,0) rectangle (-.3,.6); \fill[gray!30] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, \XColor] (0,0) -- (0,.6); } ={}_{b}X^\bullet_a \] \[ \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.5,0) rectangle (.5,.6); \filldraw[gray!55] (-.5,0) rectangle (.5,.8); \filldraw[gray!30] (-.2,0) -- (-.2,.2) arc (180:0:.2cm) -- (.2,0); \end{scope} \draw[thick,\XColor] (-.2,0) -- (-.2,.2) arc (180:0:.2cm) -- (.2,0); } =\varepsilon_X \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.5,0) rectangle (.5,.6); \filldraw[gray!55] (-.5,0) rectangle (.5,.8); \filldraw[gray!30] (-.2,.6) -- (-.2,.4) arc (-180:0:.2cm) -- (.2,.6); \end{scope} \draw[thick,\XColor] (-.2,.6) -- (-.2,.4) arc (-180:0:.2cm) -- (.2,.6); } =\delta_X \qquad\text{such that}\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.5,-.5) rectangle (.5,.5); \filldraw[gray!55] (-.5,-.5) rectangle (.5,.5); \filldraw[gray!30] (0,0) circle (.2cm); \end{scope} \draw[thick,\XColor] (0,0) circle (.2cm); } =1_{1_b} \] When $\cC$ is $\rm C^*/W^*$, a condensation $X: a\mathrel{\,\hspace{.75ex}\joinrel\rhook\joinrel\hspace{-.75ex}\joinrel\rightarrow} b$ is called a \emph{dagger condensation} if $\delta_X = \varepsilon_X^\dag$. \end{defn} \begin{defn} A 2-functor $G: \cC \to \cE$ is called: \begin{itemize} \item \emph{0-dominant} if for all $e\in\cE$, there is a condensation $G(c)\mathrel{\,\hspace{.75ex}\joinrel\rhook\joinrel\hspace{-.75ex}\joinrel\rightarrow} e$ for some $c\in \cC$, \item \emph{locally dominant} if every hom functor $G_{a\to b}: \cC(a\to b) \to \cE(G(a)\to G(b))$ is dominant as a linear functor, and \item \emph{dominant} if $G$ is both 0-dominant and locally dominant. \end{itemize} When $G$ is a $\dag$ 2-functor between $\rm C^*/W^*$ 2-categories, we call $G$ \begin{itemize} \item \emph{orthogonally} 0-dominant if for all $e\in \cE$, there is a dagger condensation $G(c) \mathrel{\,\hspace{.75ex}\joinrel\rhook\joinrel\hspace{-.75ex}\joinrel\rightarrow} e$ for some $c\in \cC$, \item locally \emph{orthogonally} dominant if every hom functor $G_{a\to b}$ is orthogonally dominant as a linear $\dag$-functor, i.e., every 1-cell ${}_{G(a)}Y_{G(b)}\in \cE$ is unitarily isomorphic to an orthogonal direct summand of some ${}_{G(a)}G(X)_{G(b)}$, and \item \emph{orthogonally} dominant if $G$ is both orthogonally 0-dominant and locally dominant. \end{itemize} \end{defn} \begin{rem} There is an analogous notion of $k$-dominance for an $n$-functor $G$ between $n$-categories for $0\leq k\leq n-1$: every $k$-morphism in the target should admit a condensation from a source in the image of $G$. \end{rem} For the propositions in this section, we work with algebraic 2-categories and 2-functors, and we make particular comments about the $\rm C^*/W^*$ setting. \begin{prop} If a 2-functor $G:\cC\to \cE$ is 0-dominant, then $-\circ G:{\sf Fun}(\cC\to \cE)\to {\sf Fun}(\cC\to \cD)$ is faithful on 2-morphisms. In the $\rm C^*/W^*$ setting, if $G: \cC\to \cE$ is orthogonally 0-dominant, then $-\circ G: {\sf Fun}^\dag(\cC\to \cE) \to {\sf Fun}^\dag(\cC\to \cD)$ is faithful on 2-morphisms. \end{prop} \begin{proof} Let $A,B\in{\sf Fun}(\cE\to\cD)$ and $\varphi,\psi:A\Rightarrow B$. Suppose $m,n:\varphi\Rrightarrow\psi$ and $m\circ G=n\circ G$. We show $m=n$. For each $e\in\cE$, there exists a 0-cell $c\in\cC$ and a condensation $X:G(c)\mathrel{\,\hspace{.75ex}\joinrel\rhook\joinrel\hspace{-.75ex}\joinrel\rightarrow} e$. We denote $G(c),e\in \cD$, ${}_{G(c)}X_e\in \cD(G(c)\to e)$, and the functors $A,B$ graphically by \begin{equation} \label{eq:GraphicalCalculusFaithfulOn2Mor} \tikzmath{\filldraw[gray!30, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=G(c) \qquad\qquad \tikzmath{\filldraw[gray!55, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=e \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[gray!30] (0,0) rectangle (-.3,.6); \fill[gray!55] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, \XColor] (0,0) -- (0,.6); } ={}_{G(c)}X_e \qquad\qquad \tikzmath{ \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = A \qquad\qquad \tikzmath{ \filldraw[boxregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } = B. \end{equation} The modification axiom implies the following: \[ \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (-.1,1) rectangle (.29,1.4); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[snake,thick] (0,2.2) -- (0,3); \roundNbox{unshaded}{(0,2.2)}{.3}{.1}{.1}{\scriptsize{$n_{G(c)}$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$A(X)$}}; \node at (1.2,3.2) {\scriptsize{$B(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_e$}}; \node at (0,3.2) {\scriptsize{$\psi_{G(c)}$}}; \node at (.1,1.2) {\scriptsize{$\varphi_{X}$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=gray!55] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (-.1,1.6) rectangle (.29,2); \end{scope} \draw[\XColor,thick] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.8); \draw[snake,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(1.2,.8)}{.3}{0}{0}{\scriptsize{$n_e$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$A(X)$}}; \node at (1.2,3.2) {\scriptsize{$B(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_e$}}; \node at (0,3.2) {\scriptsize{$\psi_{G(c)}$}}; \node at (.1,1.8) {\scriptsize{$\psi_X$}}; } \qquad\qquad \Longrightarrow \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.6,-.9) rectangle (.6,.9); \filldraw[primedregion=gray!55] (-.6,-.9) rectangle (0,.9); \filldraw[boxregion=gray!55] (0,-.9) rectangle (.6,.9); \end{scope} \draw[black,thick] (0,-.9) -- (0,0); \draw[snake,thick] (0,.9) -- (0,0); \roundNbox{unshaded}{(0,0)}{.3}{0}{0}{\scriptsize{$n_e$}}; \node at (0,1.1) {\scriptsize{$\psi_{e}$}}; \node at (0,-1.1) {\scriptsize{$\varphi_e$}}; } = \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.9,-.9) rectangle (.9,.9); \filldraw[primedregion=gray!55] (-.9,-.9) rectangle (.9,.9); \filldraw[boxregion=gray!55] (.6,-.9) -- (.6,-.74) .. controls ++(90:.2cm) and ++(270:.2cm) .. (0,-.3) -- (0,.3) .. controls ++(90:.2cm) and ++(270:.2cm) .. (-.6,.74) -- (-.6,.9) -- (.9,.9) -- (.9,-.9); \filldraw[primedregion=gray!30] (0,0) circle (.6cm); \filldraw[boxregion=gray!30] (.3,-.52) .. controls ++(135:.0667cm) and ++(270:.1333cm) .. (0,-.3) -- (0,.3) .. controls ++(90:.1333cm) and ++(-45:.0667cm) .. (-.3,.52) arc (120:-60:.6cm); \end{scope} \draw[\XColor,thick] (0,0) circle (.6cm); \draw[black,thick] (0,-.3) .. controls ++(270:.2cm) and ++(90:.2cm) .. (.6,-.74) -- (.6,-.9); \draw[snake,thick] (0,.3) .. controls ++(90:.2cm) and ++(270:.2cm) .. (-.6,.74) -- (-.6,.9); \roundNbox{unshaded}{(0,0)}{.3}{.1}{.1}{\scriptsize{$n_{G(c)}$}}; \filldraw[white] (.3,-.52) circle (.07cm); \draw[thick] (.3,-.52) circle (.07cm); \filldraw[white] (-.3,.52) circle (.07cm); \draw[thick] (-.3,.52) circle (.07cm); \node at (.6,-1.1) {\scriptsize{$\varphi_e$}}; \node at (-.6,1.1) {\scriptsize{$\psi_{e}$}}; } = \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.9,-.9) rectangle (.9,.9); \filldraw[primedregion=gray!55] (-.9,-.9) rectangle (.9,.9); \filldraw[boxregion=gray!55] (.6,-.9) -- (.6,-.74) .. controls ++(90:.2cm) and ++(270:.2cm) .. (0,-.3) -- (0,.3) .. controls ++(90:.2cm) and ++(270:.2cm) .. (-.6,.74) -- (-.6,.9) -- (.9,.9) -- (.9,-.9); \filldraw[primedregion=gray!30] (0,0) circle (.6cm); \filldraw[boxregion=gray!30] (.3,-.52) .. controls ++(135:.0667cm) and ++(270:.1333cm) .. (0,-.3) -- (0,.3) .. controls ++(90:.1333cm) and ++(-45:.0667cm) .. (-.3,.52) arc (120:-60:.6cm); \end{scope} \draw[\XColor,thick] (0,0) circle (.6cm); \draw[black,thick] (0,-.3) .. controls ++(270:.2cm) and ++(90:.2cm) .. (.6,-.74) -- (.6,-.9); \draw[snake,thick] (0,.3) .. controls ++(90:.2cm) and ++(270:.2cm) .. (-.6,.74) -- (-.6,.9); \roundNbox{unshaded}{(0,0)}{.3}{.13}{.13}{\scriptsize{$m_{G(c)}$}}; \filldraw[white] (.3,-.52) circle (.07cm); \draw[thick] (.3,-.52) circle (.07cm); \filldraw[white] (-.3,.52) circle (.07cm); \draw[thick] (-.3,.52) circle (.07cm); \node at (.6,-1.1) {\scriptsize{$\varphi_e$}}; \node at (-.6,1.1) {\scriptsize{$\psi_{e}$}}; } = \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.6,-.9) rectangle (.6,.9); \filldraw[primedregion=gray!55] (-.6,-.9) rectangle (0,.9); \filldraw[boxregion=gray!55] (0,-.9) rectangle (.6,.9); \end{scope} \draw[black,thick] (0,-.9) -- (0,0); \draw[snake,thick] (0,.9) -- (0,0); \roundNbox{unshaded}{(0,0)}{.3}{0}{0}{\scriptsize{$m_e$}}; \node at (0,1.1) {\scriptsize{$\psi_{e}$}}; \node at (0,-1.1) {\scriptsize{$\varphi_e$}}; }\,. \] Hence $m=n$, as claimed. \end{proof} \begin{prop} \label{prop:DominantToFullyFaithful} If a 2-functor $G:\cC\to \cE$ is dominant, then $-\circ G:{\sf Fun}(\cC\to \cE)\to {\sf Fun}(\cC\to \cD)$ is fully faithful on 2-morphisms. An analogous statement holds in the $\rm C^*/W^*$ setting. \end{prop} \begin{proof} It suffices to show $-\circ G$ is full on 2-morphisms. Suppose $A,B\in{\sf Fun}(\cE\to\cD)$, $\varphi,\psi:A\Rightarrow B$, and $p:\varphi\circ G\Rrightarrow \psi\circ G$. We show there exists $n:\varphi\Rrightarrow\psi$ such that $p=n\circ G$. First, for each 1-cell $X\in\cE(G(c)\to G(c'))$, there exists a 1-cell $Y\in\cC(c\to c')$ such that $G(Y)\mathrel{\mathop{\rightleftarrows}\limits^{r}_{s}}X$ is a retract, i.e., $rs=1_X$. Since $p:\varphi\circ G\Rrightarrow\psi\circ G$ is a 2-modification, building on our graphical conventions \eqref{eq:GraphicalCalculusFaithfulOn2Mor}, \[ \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!75] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!75] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (-.6,1) rectangle (.25,1.4); \end{scope} \draw[orange,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[snake,thick] (0,2.2) -- (0,3); \roundNbox{unshaded}{(0,2.2)}{.3}{0}{0}{\scriptsize{$p_c$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$AG(Y)$}}; \node at (1.2,3.2) {\scriptsize{$BG(Y)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_{G(c')}$}}; \node at (0,3.2) {\scriptsize{$\psi_{G(c)}$}}; \node at (-.1,1.2) {\scriptsize{$\varphi_{G(Y)}$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!75] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=gray!75] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (-.6,1.6) rectangle (.25,2); \end{scope} \draw[orange,thick] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.8); \draw[snake,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(1.2,.8)}{.3}{0}{0}{\scriptsize{$p_{c'}$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$AG(Y)$}}; \node at (1.2,3.2) {\scriptsize{$BG(Y)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_{G(c')}$}}; \node at (0,3.2) {\scriptsize{$\psi_{G(c)}$}}; \node at (-.1,1.8) {\scriptsize{$\psi_{G(Y)}$}}; } \qquad\qquad \text{where} \qquad\qquad \tikzmath{\filldraw[gray!75, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=G(c') \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[gray!30] (0,0) rectangle (-.3,.6); \fill[gray!75] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, orange] (0,0) -- (0,.6); } ={}_{G(c)}G(Y)_{G(c')}. \] This implies that for \emph{any} $X\in \cE(G(c)\to G(c'))$ (and not just 1-cells in the image of $G$!), \begin{equation} \label{eq:ModificationForAll1Cells} \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!75] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!75] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (-.1,1) rectangle (.3,1.4); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[snake,thick] (0,2.2) -- (0,3); \roundNbox{unshaded}{(0,2.2)}{.3}{0}{0}{\scriptsize{$p_c$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$A(X)$}}; \node at (1.2,3.2) {\scriptsize{$B(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_{G(c')}$}}; \node at (0,3.2) {\scriptsize{$\psi_{G(c)}$}}; \node at (.1,1.2) {\scriptsize{$\varphi_{X}$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!75] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!75] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (-.1,1) rectangle (.3,1.4); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[snake,thick] (0,2.2) -- (0,3); \roundNbox{unshaded}{(0,2.2)}{.3}{0}{0}{\scriptsize{$p_c$}}; \roundNbox{unshaded}{(1.2,2.2)}{.3}{.1}{.1}{\scriptsize{$B(rs)$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$A(X)$}}; \node at (1.2,3.2) {\scriptsize{$B(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_{G(c')}$}}; \node at (0,3.2) {\scriptsize{$\psi_{G(c)}$}}; \node at (.1,1.2) {\scriptsize{$\varphi_{X}$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,-.6) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,-.6) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,-.6); \filldraw[primedregion=gray!75] (1.2,-.6) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,-.6); \filldraw[boxregion=gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!75] (1.2,-.6) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,-.6); \filldraw[gray!30] (-.6,1) rectangle (.25,1.4); \end{scope} \draw[\XColor,thick] (0,-.6) -- (0,.2); \draw[\XColor,thick] (1.2,2.2) -- (1.2,3); \draw[orange,thick] (0,.2) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.2); \draw[black,thick] (1.2,-.6) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[snake,thick] (0,2.2) -- (0,3); \roundNbox{unshaded}{(0,2.2)}{.3}{0}{0}{\scriptsize{$p_c$}}; \roundNbox{unshaded}{(1.2,2.2)}{.3}{.05}{.05}{\scriptsize{$B(r)$}}; \roundNbox{unshaded}{(0,.2)}{.3}{.05}{.05}{\scriptsize{$A(s)$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.8) {\scriptsize{$A(X)$}}; \node at (1.2,3.2) {\scriptsize{$B(X)$}}; \node at (1.2,-.8) {\scriptsize{$\varphi_{G(c')}$}}; \node at (0,3.2) {\scriptsize{$\psi_{G(c)}$}}; \node at (-.1,1.2) {\scriptsize{$\varphi_{G(Y)}$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3.6); \filldraw[primedregion=gray!30] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3.6) -- (-.6,3.6) -- (-.6,0); \filldraw[primedregion=gray!75] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=gray!30] (0,3.6) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3.6); \filldraw[boxregion=gray!75] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3.6) -- (1.8,3.6) -- (1.8,0); \filldraw[gray!30] (-.6,1.6) rectangle (.25,2); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.8); \draw[\XColor,thick] (1.2,2.8) -- (1.2,3.6); \draw[orange,thick] (0,.8) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,2.8); \draw[black,thick] (1.2,0) -- (1.2,.8); \draw[snake,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3.6); \roundNbox{unshaded}{(1.2,.8)}{.3}{0}{0}{\scriptsize{$p_{c'}$}}; \roundNbox{unshaded}{(1.2,2.8)}{.3}{.05}{.05}{\scriptsize{$B(r)$}}; \roundNbox{unshaded}{(0,.8)}{.3}{.05}{.05}{\scriptsize{$A(s)$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$A(X)$}}; \node at (1.2,3.8) {\scriptsize{$B(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_{G(c')}$}}; \node at (0,3.8) {\scriptsize{$\psi_{G(c)}$}}; \node at (-.1,1.8) {\scriptsize{$\psi_{G(Y)}$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!75] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=gray!75] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (-.1,1.6) rectangle (.29,2); \end{scope} \draw[\XColor,thick] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.8); \draw[snake,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(1.2,.8)}{.3}{0}{0}{\scriptsize{$p_{c'}$}}; \roundNbox{unshaded}{(0,.8)}{.3}{.1}{.1}{\scriptsize{$A(rs)$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$A(X)$}}; \node at (1.2,3.2) {\scriptsize{$B(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_{G(c')}$}}; \node at (0,3.2) {\scriptsize{$\psi_{G(c)}$}}; \node at (.1,1.8) {\scriptsize{$\psi_{X}$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!30] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!75] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=gray!30] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=gray!75] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (-.1,1.6) rectangle (.29,2); \end{scope} \draw[\XColor,thick] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.8); \draw[snake,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(1.2,.8)}{.3}{0}{0}{\scriptsize{$p_{c'}$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$A(X)$}}; \node at (1.2,3.2) {\scriptsize{$B(X)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_{G(c')}$}}; \node at (0,3.2) {\scriptsize{$\psi_{G(c)}$}}; \node at (.1,1.8) {\scriptsize{$\psi_{X}$}}; }\,. \end{equation} Next we construct $n:\varphi\Rrightarrow \psi$ such that $p=n\circ G$. For each $c\in \cC$, we define $n_{G(c)}:=p_c$ so that $p_c=(n\circ G)_c$, and $p=n\circ G$, provided we can extend $n$ to a modification. For each $e\in \cE$, there exists a 0-cell $c\in\cC$ and a condensation $X:G(c)\mathrel{\,\hspace{.75ex}\joinrel\rhook\joinrel\hspace{-.75ex}\joinrel\rightarrow} e$. We define $n_e$ as follows \[ \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.6,-.9) rectangle (.6,.9); \filldraw[primedregion=gray!55] (-.6,-.9) rectangle (0,.9); \filldraw[boxregion=gray!55] (0,-.9) rectangle (.6,.9); \end{scope} \draw[black,thick] (0,-.9) -- (0,0); \draw[snake,thick] (0,.9) -- (0,0); \roundNbox{unshaded}{(0,0)}{.3}{0}{0}{\scriptsize{$n_e$}}; \node at (0,1.1) {\scriptsize{$\psi_{e}$}}; \node at (0,-1.1) {\scriptsize{$\varphi_e$}}; } := \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.9,-.9) rectangle (.9,.9); \filldraw[primedregion=gray!55] (-.9,-.9) rectangle (.9,.9); \filldraw[boxregion=gray!55] (.6,-.9) -- (.6,-.74) .. controls ++(90:.2cm) and ++(270:.2cm) .. (0,-.3) -- (0,.3) .. controls ++(90:.2cm) and ++(270:.2cm) .. (-.6,.74) -- (-.6,.9) -- (.9,.9) -- (.9,-.9); \filldraw[primedregion=gray!30] (0,0) circle (.6cm); \filldraw[boxregion=gray!30] (.3,-.52) .. controls ++(135:.0667cm) and ++(270:.1333cm) .. (0,-.3) -- (0,.3) .. controls ++(90:.1333cm) and ++(-45:.0667cm) .. (-.3,.52) arc (120:-60:.6cm); \end{scope} \draw[\XColor,thick] (0,0) circle (.6cm); \draw[black,thick] (0,-.3) .. controls ++(270:.2cm) and ++(90:.2cm) .. (.6,-.74) -- (.6,-.9); \draw[snake,thick] (0,.3) .. controls ++(90:.2cm) and ++(270:.2cm) .. (-.6,.74) -- (-.6,.9); \roundNbox{unshaded}{(0,0)}{.3}{.1}{.1}{\scriptsize{$n_{G(c)}$}}; \filldraw[white] (.3,-.52) circle (.07cm); \draw[thick] (.3,-.52) circle (.07cm); \filldraw[white] (-.3,.52) circle (.07cm); \draw[thick] (-.3,.52) circle (.07cm); \node at (.6,-1.1) {\scriptsize{$\varphi_e$}}; \node at (-.6,1.1) {\scriptsize{$\psi_{e}$}}; }\,. \] We prove $n$ is a 2-modification $\varphi\Rrightarrow\psi$. Suppose $e'\in\cE$ is a 0-cell and $Z\in\cE(e\to e')$ is a 1-cell. Let $X':G(c')\mathrel{\,\hspace{.75ex}\joinrel\rhook\joinrel\hspace{-.75ex}\joinrel\rightarrow} e'$ be a condensation for some 0-cell $c'\in\cC$. Using the graphical conventions \[ \tikzmath{\filldraw[gray!75, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=G(c') \qquad\qquad \tikzmath{\filldraw[gray!95, rounded corners=5, very thin, baseline=1cm] (0,0) rectangle (.6,.6);}=e' \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[gray!75] (0,0) rectangle (-.3,.6); \fill[gray!95] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, blue] (0,0) -- (0,.6); }=X' \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.6,-.9) rectangle (.6,.9); \filldraw[primedregion=gray!95] (-.6,-.9) rectangle (0,.9); \filldraw[boxregion=gray!95] (0,-.9) rectangle (.6,.9); \end{scope} \draw[black,thick] (0,-.9) -- (0,0); \draw[snake,thick] (0,.9) -- (0,0); \roundNbox{unshaded}{(0,0)}{.3}{0}{0}{\scriptsize{$n_{e'}$}}; \node at (0,1.1) {\scriptsize{$\psi_{e'}$}}; \node at (0,-1.1) {\scriptsize{$\varphi_{e'}$}}; } := \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.9,-.9) rectangle (.9,.9); \filldraw[primedregion=gray!95] (-.9,-.9) rectangle (.9,.9); \filldraw[boxregion=gray!95] (.6,-.9) -- (.6,-.74) .. controls ++(90:.2cm) and ++(270:.2cm) .. (0,-.3) -- (0,.3) .. controls ++(90:.2cm) and ++(270:.2cm) .. (-.6,.74) -- (-.6,.9) -- (.9,.9) -- (.9,-.9); \filldraw[primedregion=gray!75] (0,0) circle (.6cm); \filldraw[boxregion=gray!75] (.3,-.52) .. controls ++(135:.0667cm) and ++(270:.1333cm) .. (0,-.3) -- (0,.3) .. controls ++(90:.1333cm) and ++(-45:.0667cm) .. (-.3,.52) arc (120:-60:.6cm); \end{scope} \draw[blue,thick] (0,0) circle (.6cm); \draw[black,thick] (0,-.3) .. controls ++(270:.2cm) and ++(90:.2cm) .. (.6,-.74) -- (.6,-.9); \draw[snake,thick] (0,.3) .. controls ++(90:.2cm) and ++(270:.2cm) .. (-.6,.74) -- (-.6,.9); \roundNbox{unshaded}{(0,0)}{.3}{.13}{.13}{\scriptsize{$n_{G(c')}$}}; \filldraw[white] (.3,-.52) circle (.07cm); \draw[thick] (.3,-.52) circle (.07cm); \filldraw[white] (-.3,.52) circle (.07cm); \draw[thick] (-.3,.52) circle (.07cm); \node at (.6,-1.1) {\scriptsize{$\varphi_{e'}$}}; \node at (-.6,1.1) {\scriptsize{$\psi_{e'}$}}; } \qquad\qquad \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[gray!55] (0,0) rectangle (-.3,.6); \fill[gray!95] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, violet] (0,0) -- (0,.6); }=Z, \] we see that \[ \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!55] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!95] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[boxregion=gray!55] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!95] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[violet,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[snake,thick] (0,2.2) -- (0,3); \roundNbox{unshaded}{(0,2.2)}{.3}{0}{0}{\scriptsize{$n_e$}}; \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$A(Z)$}}; \node at (1.2,3.2) {\scriptsize{$B(Z)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_{e'}$}}; \node at (0,3.2) {\scriptsize{$\psi_{e}$}}; } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.9,0) rectangle (1.8,3); \filldraw[primedregion=gray!55] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3) -- (-.9,3) -- (-.9,0); \filldraw[primedregion=gray!95] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[boxregion=gray!55] (-.6,3) -- (-.6,2.94) .. controls ++(270:.2cm) and ++(90:.2cm) .. (0,2.5) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!95] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \filldraw[gray!30] (0,2.2) circle (.6cm); \filldraw[primedregion=gray!30] (.04,1.6) -- (0,2.5) .. controls ++(90:.1333cm) and ++(-45:.0667cm) .. (-.3,2.72) arc (120:-86:.6cm); \end{scope} \draw[violet,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[snake,thick] (0,2.5) .. controls ++(90:.2cm) and ++(270:.2cm) .. (-.6,2.94) -- (-.6,3); \draw[thick,\XColor] (0,2.2) circle (.6cm); \roundNbox{unshaded}{(0,2.2)}{.3}{.1}{.1}{\scriptsize{$n_{G(c)}$}}; % \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \filldraw[white] (.04,1.6) circle (.1cm); \draw[thick] (.04,1.6) circle (.1cm); \filldraw[white] (-.3,2.72) circle (.1cm); \draw[thick] (-.3,2.72) circle (.1cm); } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.9,-.6) rectangle (2.1,3); \filldraw[primedregion=gray!55] (0,-.6) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3) -- (-.9,3) -- (-.9,-.6); \filldraw[primedregion=gray!95] (0,-.6) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3) -- (2.1,3) -- (2.1,-.6); \filldraw[boxregion=gray!55] (-.6,3) -- (-.6,2.94) .. controls ++(270:.2cm) and ++(90:.2cm) .. (0,2.5) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[boxregion=gray!95] (1.8,-.6) -- (1.8,-.54) .. controls ++(90:.2cm) and ++(270:.2cm) .. (1.2,-.1) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (2.1,3) -- (2.1,0); \filldraw[primedregion=gray!30] (0,2.2) circle (.6cm); \filldraw[boxregion=gray!30] (.04,1.6) -- (0,2.5) .. controls ++(90:.1333cm) and ++(-45:.0667cm) .. (-.3,2.72) arc (120:-86:.6cm); \filldraw[primedregion=gray!75] (1.2,.2) circle (.6cm); \filldraw[boxregion=gray!75] (1.5,-.32) .. controls ++(135:.0667cm) and ++(270:.1333cm) .. (1.2,-.1) -- (1.16,.8) arc (94:-60:.6cm); \end{scope} \draw[violet,thick] (0,-.6) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,3); \draw[black,thick] (1.2,-.1) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.2); \draw[black,thick] (1.2,-.1) .. controls ++(270:.2cm) and ++(90:.2cm) .. (1.8,-.54) -- (1.8,-.6); \draw[snake,thick] (0,2.5) .. controls ++(90:.2cm) and ++(270:.2cm) .. (-.6,2.94) -- (-.6,3); \draw[thick,\XColor] (0,2.2) circle (.6cm); \draw[thick,blue] (1.2,.2) circle (.6cm); \roundNbox{unshaded}{(0,2.2)}{.3}{.1}{.1}{\scriptsize{$n_{G(c)}$}}; % \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \filldraw[white] (.04,1.61) circle (.1cm); \draw[thick] (.04,1.61) circle (.1cm); \filldraw[white] (-.3,2.72) circle (.1cm); \draw[thick] (-.3,2.72) circle (.1cm); \filldraw[white] (1.16,.8) circle (.1cm); \draw[thick] (1.16,.8) circle (.1cm); \filldraw[white] (1.5,-.32) circle (.1cm); \draw[thick] (1.5,-.32) circle (.1cm); } \underset{\text{\eqref{eq:ModificationForAll1Cells}}}{=} \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.9,0) rectangle (2.1,3.6); \filldraw[primedregion=gray!55] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3.6) -- (-.9,3.6) -- (-.9,0); \filldraw[primedregion=gray!95] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3.6) -- (2.1,3.6) -- (2.1,0); \filldraw[boxregion=gray!55] (-.6,3.6) -- (-.6,3.54) .. controls ++(270:.2cm) and ++(90:.2cm) .. (0,3.1) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3.6); \filldraw[boxregion=gray!95] (1.8,0) -- (1.8,.06) .. controls ++(90:.2cm) and ++(270:.2cm) .. (1.2,.5) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3.6) -- (2.1,3.6) -- (2.1,0); \filldraw[primedregion=gray!30] (0,2.8) circle (.6cm); \filldraw[boxregion=gray!30] (.04,2.2) -- (0,3.1) .. controls ++(90:.1333cm) and ++(-45:.0667cm) .. (-.3,3.32) arc (120:-86:.6cm); \filldraw[primedregion=gray!75] (1.2,.8) circle (.6cm); \filldraw[boxregion=gray!75] (1.5,.28) .. controls ++(135:.0667cm) and ++(270:.1333cm) .. (1.2,.5) -- (1.16,1.4) arc (94:-60:.6cm); \end{scope} \draw[violet,thick] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3.6); \draw[black,thick] (1.2,.5) .. controls ++(270:.2cm) and ++(90:.2cm) .. (1.8,.06) -- (1.8,0); \draw[snake,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3.1) .. controls ++(90:.2cm) and ++(270:.2cm) .. (-.6,3.54) -- (-.6,3.6); \draw[thick,\XColor] (0,2.8) circle (.6cm); \draw[thick,blue] (1.2,.8) circle (.6cm); \roundNbox{unshaded}{(1.2,.8)}{.3}{.13}{.13}{\scriptsize{$n_{G(c')}$}}; \filldraw[white] (1.5,.28) circle (.1cm); \draw[thick] (1.5,.28) circle (.1cm); \filldraw[white] (1.16,1.4) circle (.1cm); \draw[thick] (1.16,1.4) circle (.1cm); \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \filldraw[white] (.04,2.21) circle (.1cm); \draw[thick] (.04,2.21) circle (.1cm); \filldraw[white] (-.3,3.32) circle (.1cm); \draw[thick] (-.3,3.32) circle (.1cm); } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (2.1,3); \filldraw[primedregion=gray!55] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!95] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3) -- (2.1,3) -- (2.1,0); \filldraw[boxregion=gray!55] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=gray!95] (1.8,0) -- (1.8,.06) .. controls ++(90:.2cm) and ++(270:.2cm) .. (1.2,.5) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (2.1,3) -- (2.1,0); \filldraw[primedregion=gray!75] (1.2,.8) circle (.6cm); \filldraw[boxregion=gray!75] (1.5,.28) .. controls ++(135:.0667cm) and ++(270:.1333cm) .. (1.2,.5) -- (1.16,1.4) arc (94:-60:.6cm); \end{scope} \draw[violet,thick] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,.5) .. controls ++(270:.2cm) and ++(90:.2cm) .. (1.8,.06) -- (1.8,0); \draw[snake,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \draw[thick,blue] (1.2,.8) circle (.6cm); \roundNbox{unshaded}{(1.2,.8)}{.3}{.13}{.13}{\scriptsize{$n_{G(c')}$}}; \filldraw[white] (1.5,.28) circle (.1cm); \draw[thick] (1.5,.28) circle (.1cm); \filldraw[white] (1.16,1.4) circle (.1cm); \draw[thick] (1.16,1.4) circle (.1cm); \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); } = \tikzmath[scale=.75, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,3); \filldraw[primedregion=gray!55] (0,0) -- (0,1.2) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.8) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,2.4) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!95] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,1.2) -- (0,0); \filldraw[boxregion=gray!55] (0,3) -- (0,2.4) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3); \filldraw[boxregion=gray!95] (1.2,0) -- (1.2,1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.8) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,2.4) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[violet,thick] (0,0) -- (0,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,2.4) -- (1.2,3); \draw[black,thick] (1.2,0) -- (1.2,.8); \draw[snake,thick] (1.2,.8) -- (1.2,1.2) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,2.4) -- (0,3); \roundNbox{unshaded}{(1.2,.8)}{.3}{0}{0}{\scriptsize{$n_{e'}$}}; \filldraw[white] (.6,1.8) circle (.1cm); \draw[thick] (.6,1.8) circle (.1cm); \node at (0,-.2) {\scriptsize{$A(Z)$}}; \node at (1.2,3.2) {\scriptsize{$B(Z)$}}; \node at (1.2,-.2) {\scriptsize{$\varphi_{e'}$}}; \node at (0,3.2) {\scriptsize{$\psi_{e}$}}; }\,. \] In the third equality above, we used the fact that $X\otimes_e Z\otimes_{e'}(X')^\bullet \in \cE(G(c)\to G(c'))$ to apply \eqref{eq:ModificationForAll1Cells}. This completes the proof. \end{proof} \subsection{Proof of Theorem \ref{thm:UniqueLift}} In this section, we prove Theorem \ref{thm:UniqueLift}. We begin by recalling the construction of the canonical inclusion $\iota_\cC : \cC \hookrightarrow {\sf QSys}(\cC)$. \begin{construction}[{\cite[Const.~3.24]{2105.12010}}] \label{const:iota} For each $\cA\in 2{\mathsf{Cat}}$, there is a canonical inclusion strict $\dag$ 2-functor $\iota_\cA: \cA \to {\sf QSys}(\cA)$ defined as follows: \begin{itemize} \item For $a\in \cA$, $a\mapsto 1_a$, the trivial Q-system. \item For ${}_aX_b\in \cA(a\to b)$, $X$ is a separable $1_a-1_b$ bimodule, so $X$ maps to itself. \item For $f\in \cA(X\Rightarrow Y)$, $f$ is automatically $1_a-1_b$ bimodular, so $f$ maps to itself. \end{itemize} \end{construction} \begin{construction} \label{construction:LiftExists} Suppose $F\in {\sf Fun}^\dag(\cA\to \cB)$. We construct an invertible transformation $\psi^F:\iota_\cB \circ F \Rightarrow {\sf QSys}(F)\circ \iota_\cA$. By Constructions \ref{construction:Qsys(F)} and \ref{const:iota}, for a 0-cell $b\in\cA$, we have $$ (\iota_\cB\circ F)(b) = \iota_\cB(F(b)) = 1_{F(b)} \qquad\text{and}\qquad ({\sf QSys}(F)\circ \iota_\cA)(b) = {\sf QSys}(F)(1_b)=F(1_b). $$ For a 1-cell $X\in{\sf QSys}(\cA)(P\to Q)$, we have an equality $$ (\iota_\cB\circ F)(X) = \iota_\cB(F(X)) = F(X) = {\sf QSys}(F)(X) = ({\sf QSys}(F)\circ \iota_\cA)(X), $$ as well as for a 2-cell $f\in{\sf QSys}(\cA)(X\Rightarrow X')$: $$ (\iota_\cB\circ F)(f) = \iota_\cB(F(f)) = F(f) = {\sf QSys}(F)(f) = ({\sf QSys}(F)\circ \iota_\cA)(f). $$ Now $F(1_b)$ is equivalent to the trivial Q-system $1_{F(b)}$, and thus for every $X\in\cA(a\to b)$, $$ u^{F(1_b)}_{F(X),1_{F(b)}}: F(X)\otimes_{1_{F(b)}}1_{F(b)}\Rightarrow F(X)\otimes_{F(1_b)} 1_{F(b)} $$ from \eqref{nota:QSys(C)andOther} is unitary; similarly, $u^{F(1_a)}_{1_{F(a)},F(X)}$ is a unitary. We define: \begin{itemize} \item For 0-cell $a,b\in\cA$ and 1-cell $X\in\cA(a\to b)$, we define $\psi^F_b: = 1_{F(b)}$ as an $F(1_b)-1_{F(b)}$ bimodule, which is clearly invertible. \item For ${}_aX_b\in \cA$, we define \[ \psi^F_X : = \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-.6,0) rectangle (1.8,2.4); \filldraw[primedregion=gray!30] (0,0) -- (0,.6) .. controls ++(90:.4cm) and ++(-135:.2cm) .. (.6,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (0,1.8) -- (0,3) -- (-.6,3) -- (-.6,0); \filldraw[primedregion=gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(-135:.2cm) and ++(90:.4cm) .. (0,.6) -- (0,0); \filldraw[gray!30] (0,3) -- (0,1.8) .. controls ++(270:.4cm) and ++(135:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3); \filldraw[gray!55] (1.2,0) -- (1.2,.6) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (.6,1.2) .. controls ++(45:.2cm) and ++(270:.4cm) .. (1.2,1.8) -- (1.2,3) -- (1.8,3) -- (1.8,0); \end{scope} \draw[\XColor,thick] (0,0) -- (0,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (1.2,1.8) -- (1.2,2.4); \draw[black,thick,dotted] (1.2,0) -- (1.2,.6) .. controls ++(90:.6cm) and ++(270:.6cm) .. (0,1.8) -- (0,2.4); \filldraw[white] (.6,1.2) circle (.1cm); \draw[thick] (.6,1.2) circle (.1cm); \node at (0,-.2) {\scriptsize{$F(X)$}}; \node at (1.2,2.6) {\scriptsize{$F(X)$}}; \node at (1.2,-.2) {\scriptsize{$1_{F(b)}$}}; \node at (0,2.6) {\scriptsize{$1_{F(a)}$}}; } := \tikzmath{ \begin{scope} \clip[rounded corners = 5] (-.9,-.8) rectangle (.9,1); \filldraw[primedregion=gray!30] (-.9,-.8) rectangle (0,1); \filldraw[gray!30] (-.4,.6) rectangle (0,1); \filldraw[gray!55] (0,-.8) rectangle (.9,1); \filldraw[primedregion=gray!55] (.4,-.8) -- (.4,-.5) arc (0:90:.4cm) -- (0,-.8); \end{scope} \draw[\XColor,thick] (0,-.8) -- (0,1); \draw[thick,dotted] (.4,-.8) -- (.4,-.5) arc (0:90:.4cm); \draw[thick,dotted] (-.4,1) -- (-.4,.6) arc (180:270:.4cm); \draw[thick] (-.4,.6) -- (0,.6); \filldraw[\XColor] (0,-.1) circle (.05cm); \filldraw[\XColor] (0,.2) circle (.05cm); } \qquad\qquad \begin{aligned} \tikzmath{ \filldraw[primedregion=gray!30, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= F(1_a) & \tikzmath{ \filldraw[primedregion=gray!55, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= F(1_b) \\ \tikzmath{ \filldraw[gray!30, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= 1_{F(a)} & \tikzmath{ \filldraw[gray!55, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= 1_{F(b)} \end{aligned} \qquad \tikzmath{ \begin{scope} \clip[rounded corners = 5pt] (-.5,-.5) rectangle (.5,.5); \filldraw[gray!30] (-.2,0) rectangle (.2,.5); \filldraw[primedregion=gray!30] (-.2,-.5) rectangle (.2,0); \end{scope} \draw[thick, dotted] (-.2,-.5) -- (-.2,.5); \draw[thick] (-.2,0) -- (.2,0); \draw[thick, \XColor] (.2,-.5) -- (.2,.5); } =\left(u^{F(1_a)}_{1_{F(a)},F(X)}\right)^\dag \] Clearly $\psi^F_X$ is unitary. \end{itemize} We leave the verification that $\psi^F$ is a 2-transformation to the reader. \end{construction} \begin{rem} We expect that ${\sf QSys}$ is actually a symmetric lax monoidal 3-functor on the symmetric monoidal 3-category $2{\mathsf{Cat}}$. Indeed, for each $\cA,\cB\in 2{\mathsf{Cat}}$, there is a canonical 2-functor ${\sf QSys}(\cA) \boxtimes {\sf QSys}(\cB)\Rightarrow {\sf QSys}(\cA\boxtimes \cB)$ which satisfies various coherences. At this time, we are unaware of a definition of a symmetric monoidal structure on an algebraic tricategory, as well as a definition of symmetric lax monoidal 3-functor on an algebraic tricategory. We leave this exploration to the interested reader. A more tractable goal would be to produce a symmetric lax monoidal functor on the 1-category of 2-categories and equivalence classes of 2-functors, which is equivalent to the localization of the 1-category ${\mathsf{Gray}}$ of strict 2-categories and strict 2-functors with the ${\mathsf{Gray}}$ tensor product at the weak equivalences in the sense of a model category structure \cite{MR2138540}. \end{rem} Suppose now $\cC,\cD$ are $\rm C^*/W^*$ 2-categories with $\cD$ Q-system complete. We apply the propositions from \S\ref{sec:Dominance} in the case that $\cE={\sf QSys}(\cC)$ and $G=\iota_\cC$. \begin{lem} \label{lem:IotaDominant} $\iota_\cC$ is dominant. \end{lem} \begin{proof} For each 0-cell/Q-system ${}_bQ_b\in{\sf QSys}(\cC)$ where $b\in\cC$, $Q:\iota_\cC(b)=1_b\mathrel{\,\hspace{.75ex}\joinrel\rhook\joinrel\hspace{-.75ex}\joinrel\rightarrow} Q$ is a dagger condensation when equipped with the 1-cells ${}_bQ_Q = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[gray!55] (0,0) rectangle (-.3,.6); \fill[green!30] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, DarkGreen] (0,0) -- (0,.6); }$\,, ${}_QQ_b^\bullet:={}_QQ_b = \tikzmath{ \begin{scope} \clip[rounded corners=5pt] (-.3,0) rectangle (.3,.6); \fill[green!30] (0,0) rectangle (-.3,.6); \fill[gray!55] (0,0) rectangle (.3,.6); \end{scope} \draw[thick, DarkGreen] (0,0) -- (0,.6); } $\,, and the 2-cells \[ \tikzmath{ \fill[green!30, rounded corners=5pt] (-.3,0) rectangle (.9,.6); \filldraw[gray!55] (0,0) arc (180:0:.3cm); \draw[DarkGreen,thick] (0,0) arc (180:0:.3cm); \draw[DarkGreen,thick] (.3,.3) -- (.3,.6); \filldraw[DarkGreen] (.3,.3) circle (.05cm); }=\varepsilon_Q \qquad\qquad \tikzmath{ \fill[green!30, rounded corners=5pt] (-.3,0) rectangle (.9,-.6); \filldraw[gray!55] (0,0) arc (-180:0:.3cm); \draw[DarkGreen,thick] (0,0) arc (-180:0:.3cm); \draw[DarkGreen,thick] (.3,-.3) -- (.3,-.6); \filldraw[DarkGreen] (.3,-.3) circle (.05cm); }=\delta_Q=\varepsilon_Q^\dag \qquad\quad\underset{\text{\ref{Q:separable}}}{\Longrightarrow} \qquad\quad \varepsilon_Q\varepsilon_Q^\dag = \tikzmath{ \fill[green!30, rounded corners=5pt] (-.3,0) rectangle (.9,1.2); \filldraw[gray!55] (0,.6) arc (180:-180:.3cm); \draw[DarkGreen,thick] (0,.6) arc (180:-180:.3cm); \draw[DarkGreen,thick] (.3,1.2) -- (.3,.9); \draw[DarkGreen,thick] (.3,0) -- (.3,.3); \filldraw[DarkGreen] (.3,.3) circle (.05cm); \filldraw[DarkGreen] (.3,.9) circle (.05cm); } = \tikzmath{ \fill[green!30, rounded corners=5pt ] (0,0) rectangle (.6,1.2); \draw[DarkGreen,thick] (.3,0) -- (.3,1.2); }=\id_{{}_QQ_Q}\,. \] The result now follows as $\iota_\cC$ is a local equivalence on hom categories by definition. \end{proof} \begin{prop} \label{prop:PrecompositionEquivalenceOnHomCats} $-\circ\iota_\cC: {\sf Fun}^\dag({\sf QSys}(\cC)\to \cD)\to {\sf Fun}^\dag(\cC\to \cD)$ is a dagger equivalence on hom categories. \end{prop} \begin{proof} By Lemma \ref{lem:IotaDominant}, $\iota_\cC$ is dominant, so by Proposition \ref{prop:DominantToFullyFaithful}, $-\circ \iota_\cC$ is fully faithful on 2-morphism. To prove $-\circ \iota_\cC$ is a dagger equivalence on hom categories, it remains to prove $-\circ \iota_\cC$ is unitarily essentially surjective on 1-morphisms, i.e., for all $A,B\in {\sf Fun}^\dag({\sf QSys}(\cC) \to \cD)$ and each 1-morphism $\gamma:A\circ \iota_\cC\Rightarrow B\circ \iota_\cC$, there exists $\varphi:A\Rightarrow B$ such that $\gamma\cong\varphi\circ \iota_\cC$. For 0-cells/Q-systems ${}_aP_a,{}_bQ_b\in {\sf QSys}(\cC)$ and a 1-cell ${}_PX_Q\in{\sf QSys}(\cC)(P\to Q)$, we define $\varphi_Q\in \cD(A(Q)\to B(Q))$ and $\varphi_X\in \cD({}_{A(P)}{A(X)\otimes_{A(Q)}\varphi_Q}_{B(Q)}\Rightarrow {}_{A(P)}{\varphi_P\otimes_{B(P)}B(X)}_{B(Q)})$ by \[ \varphi_Q := \tikzmath[scale=.7, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-2.4,-1.2) rectangle (2.4,1.2); \filldraw[primedregion=gray!55] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2) -- (-2.4,1.2) -- (-2.4,-1.2); \filldraw[boxregion=gray!55] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2) -- (2.4,1.2) -- (2.4,-1.2); \filldraw[primedregion=green!30] (-.6,-1.2) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2) -- (-2.4,1.2) -- (-2.4,-1.2); \filldraw[boxregion=green!30] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2) -- (2.4,1.2) -- (2.4,-1.2); \filldraw[gray!55] (-.6,-.2) rectangle (-.2,.2); \end{scope} \draw[black,thick] (.6,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (0,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-.6,1.2); \draw[DarkGreen,thick] (-.6,-1.2) -- (-.6,-.6) -- (-1.2,0) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,1.2); \draw[DarkGreen,thick] (1.8,-1.2) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,0) -- (.6,.6) -- (.6,1.2); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,.6); \filldraw[DarkGreen] (.6,.6) circle (.05cm); \filldraw[DarkGreen] (-.6,-.6) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \node at (-.6,-1.4) {\scriptsize{$A(Q)$}}; \node at (.6,-1.4) {\scriptsize{$\gamma_b$}}; \node at (1.8,-1.4) {\scriptsize{$B(Q)$}}; \node at (-.4,0) {\scriptsize{$\gamma_Q$}}; } \qquad\qquad \varphi_X := \tikzmath[scale=.5, transform shape]{ \begin{scope} \clip[rounded corners = 5] (-3.6,-2.4) rectangle (3.6,2.4); \filldraw[primedregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-3,2.4) -- (-3,-.6) -- (-.6,-.6); \filldraw[boxregion=gray!30] (0,0) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4) -- (-.6,2.4) -- (-.6,1.8) -- (.6,.6); \filldraw[primedregion=gray!55] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (-.6,-.6) -- (.6,-1.8) -- (.6,-2.4); \filldraw[boxregion=gray!55] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (0,0) -- (.6,.6) -- (3,.6) -- (3,-2.4); \filldraw[primedregion=\PrColor] (-.6,-2.4) -- (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4) -- (-3.6,2.4) -- (-3.6,-2.4); \filldraw[boxregion=\PrColor] (-.6,2.4) -- (-.6,1.8) -- (.6,.6) -- (.6,2.4); \filldraw[primedregion=green!30] (.6,-2.4) -- (.6,-1.8) -- (-.6,-.6) -- (-.6,-2.4); \filldraw[boxregion=green!30] (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6) -- (.6,2.4) -- (3.6,2.4) -- (3.6,-2.4); \filldraw[gray!30] (-1.6,1.2) circle (.22cm); \filldraw[gray!30] (-.4,0) circle (.22cm); \filldraw[gray!55] (.8,-1.2) circle (.22cm); \end{scope} \draw[black,thick] (1.8,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (1.2,-1.2) -- (-1.2,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-1.8,2.4); \draw[\XColor,thick] (-.6,-2.4) -- (-.6,-.6) -- (.6,.6) -- (.6,2.4); \draw[\PsColor,thick] (-.6,-.6) -- (-2.4,1.2) .. controls ++(135:.2cm) and ++(270:.4cm) .. (-3,2.4); \draw[\PsColor,thick] (.6,.6) -- (-.6,1.8) -- (-.6,2.4); \draw[\PsColor,thick] (-1.8,.6) -- (-.6,1.8); \draw[DarkGreen,thick] (-.6,-.6) -- (.6,-1.8) -- (.6,-2.4); \draw[DarkGreen,thick] (3,-2.4) .. controls ++(90:.4cm) and ++(-45:.2cm) .. (2.4,-1.2) -- (.6,.6); \draw[DarkGreen,thick] (1.8,-.6) -- (.6,-1.8); \filldraw[\XColor] (-.6,-.6) circle (.05cm); \filldraw[\XColor] (.6,.6) circle (.05cm); \filldraw[\PsColor] (-1.8,.6) circle (.05cm); \filldraw[\PsColor] (-.6,1.8) circle (.05cm); \filldraw[DarkGreen] (1.8,-.6) circle (.05cm); \filldraw[DarkGreen] (.6,-1.8) circle (.05cm); \filldraw[white] (0,0) circle (.1cm); \draw[thick] (0,0) circle (.1cm); \filldraw[white] (-1.2,1.2) circle (.1cm); \draw[thick] (-1.2,1.2) circle (.1cm); \filldraw[white] (1.2,-1.2) circle (.1cm); \draw[thick] (1.2,-1.2) circle (.1cm); \node at (-.6,-2.6) {\normalsize{$A(X)$}}; \node at (.6,-2.6) {\normalsize{$A(Q)$}}; \node at (1.8,-2.6) {\normalsize{$\gamma_b$}}; \node at (3,-2.6) {\normalsize{$B(Q)$}}; \node at (.6,2.6) {\normalsize{$B(X)$}}; \node at (-.6,2.6) {\normalsize{$B(P)$}}; \node at (-1.8,2.6) {\normalsize{$\gamma_a$}}; \node at (-3,2.6) {\normalsize{$A(P)$}}; \node at (-.4,0) {\normalsize{$\gamma_X$}}; \node at (-1.6,1.2) {\normalsize{$\gamma_P$}}; \node at (.8,-1.2) {\normalsize{$\gamma_Q$}}; } \qquad\qquad \begin{aligned} \tikzmath{ \filldraw[gray!30, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= 1_a & \tikzmath{ \filldraw[gray!55, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= 1_b \\ \tikzmath{ \filldraw[\PrColor, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= P & \tikzmath{ \filldraw[green!30, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= Q \\ \tikzmath{ \filldraw[primedregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= A & \tikzmath{ \filldraw[boxregion=white, rounded corners = 5pt] (0,0) rectangle (.6,.6); \draw[thin, dotted, rounded corners = 5pt] (0,0) rectangle (.6,.6); } &= B \end{aligned} \] Then for each 0-cell $b\in \cC$, and 1-cell ${}_aX_b\in\cC(a\to b)$, by Construction \ref{const:1CompositionIn2Cat}, $$ (\varphi\circ \iota_\cC)_b = \varphi_{\iota_\cC(b)} = \varphi_{1_b} = \gamma_b \qquad \text{and} \qquad (\varphi\circ \iota_\cC)_X = \varphi_{\iota_\cC(X)} = \varphi_{X} = \gamma_X $$ where the latter is viewed as $1_a-1_b$ bimodular. Therefore $\varphi\circ \iota_\cC\cong \gamma$ as desired, so $-\circ \iota_\cC$ is gives a dagger functor on hom 1-categories whose underlying functor is an equivalence. Since ${\sf Fun}^\dag(\cC\to \cD)$ is $\rm C^*$, $-\circ \iota_\cC$ is a dagger equivalence on hom 1-categories by polar decomposition as discussed in Remark \ref{rem:Underlying2FunctorEquivalence}. \end{proof} By Propositions \ref{prop:TruncatedGroupoids} and \ref{prop:PrecompositionEquivalenceOnHomCats}, $-\circ \iota_\cC$ is $(-1)$-truncated when restricted to unitary cores, i.e., the homotopy fiber at each $F\in \core^\dag({\sf Fun}^\dag(\cC\to \cD))$ is either empty or equivalent to a point. By Constructions \ref{construction:Qsys(F)} and \ref{construction:LiftExists}, the homotopy fiber of $-\circ \iota_\cC$ is non-empty at each $F\in {\sf Fun}^\dag(\cC\to \cD)$. Indeed, since $\cD$ is Q-system complete, $\iota_\cD$ is invertible, so there exists a $\dag$ 2-functor $\iota^{-1}_\cD:{\sf QSys}(\cD)\to\cD$ together with an invertible $\dag$ 2-transformation $\theta_\cD:1_\cD\Rightarrow \iota_\cD^{-1}\circ\iota_\cD$. Thus $\iota_\cD^{-1}\circ {\sf QSys}(F)$ provides the desired lift together with the composite invertible transformation \begin{equation} \label{eq:LiftExists} \begin{tikzcd} {\sf QSys}(\cC) \arrow[rr, "{\sf QSys}(F)"] && {\sf QSys}(\cD) \arrow[dr, "\iota_\cD^{-1}"] \\ \cC \arrow[r,swap, "F"] \arrow[u, "\iota_\cC"] & \cD \arrow[rr,swap,"1_\cD"] \arrow[ur, "\iota_\cD"] \arrow[ul,Rightarrow,shorten <= 1em, shorten >= 1em,"\cong","\psi^F"'] & \arrow[u,Rightarrow,shorten <= .5em, shorten >= .5em, "\cong","\theta"'] & \cD. \end{tikzcd} \end{equation} Thus the homotopy fiber of $-\circ \iota_\cC$ at each $F\in \core^\dag({\sf Fun}^\dag(\cC\to \cD))$ is equivalent to a point. By Proposition \ref{prop:TruncatedGroupoids}, $-\circ\iota_\cC$ is (-2)-truncated when restricted to unitary cores. This implies $-\circ\iota_\cC : {\sf Fun}^\dag(\cE\to \cD) \to {\sf Fun}^\dag(\cC\to \cD)$ is essentially surjective on objects. Again by Remark \ref{rem:Underlying2FunctorEquivalence}, Proposition \ref{prop:PrecompositionEquivalenceOnHomCats}, and \cite[Thm.~7.4.1]{2002.06055}, $-\circ\iota_\cC : {\sf Fun}^\dag(\cE\to \cD) \to {\sf Fun}^\dag(\cC\to \cD)$ is a $\dag$-equivalence of $\rm C^*/W^*$ 2-categories. \qed \begin{rem} Observe that we did not really need to pass to (unitary) cores, nor use Proposition \ref{prop:TruncatedGroupoids}. Indeed, $-\circ \iota_\cC$ is an equivalence on hom categories by Proposition \ref{prop:PrecompositionEquivalenceOnHomCats} and essentially surjective on objects by \eqref{eq:LiftExists}, and thus an equivalence by \cite[Thm.~7.4.1]{2002.06055} and Remark \ref{rem:Underlying2FunctorEquivalence}. \end{rem} \bibliographystyle{alpha} {\footnotesize{
{ "redpajama_set_name": "RedPajamaArXiv" }
2,119
\section{Hierarchy of voids} Voids were defined as low density regions or, alternatively, as regions completely devoid of a certain type of object. Mean void diameters listed in Table~1 demonstrate the dependence of the void size on the type of object used in the (second) void definition. Both definitions imply that voids are not completely empty. Thus, the question is meaningful whether the distribution of galaxies in voids is homogeneous or reveals any structure. For example, it was concluded that Blue Compact Galaxies (BCG) from the Second Byurakan Survey (SBS) or other peculiar galaxies occur isolated within voids (Pustil'nik {\it et al.} 1995). Such questions are very relevant concerning scenarios of large scale structure and galaxy formation, but they are not conclusively answered up to now. \vskip 0.3truecm \noindent{\bf Table~1} \ {\small Mean diameters of voids surrounded by different types of object} \vskip -0.4truecm {{$$\vbox {\tabskip=0.05truecm \halign to \hsize { \hfil# & \hfil# & \hfill# & \hfil#\cr \noalign {\smallskip} \noalign{\hrule} \noalign{\smallskip} type of object \qquad & \qquad mean void diameter \cr \noalign{\smallskip} \noalign{\hrule} \noalign {\medskip} rich clusters (Abell/ACO--Catalogue) & 100 $h^{-1}$~{\rm Mpc}\qquad \cr poor clusters (Zwicky--Catalogue) & 37 $h^{-1}$~{\rm Mpc}\qquad \cr \qquad bright ($M \le -20.3$) elliptical galaxies & 30 $h^{-1}$~{\rm Mpc}\qquad \cr galaxies brighter than $M = -20.3$ & 23 $h^{-1}$~{\rm Mpc}\qquad \cr galaxies brighter than $M = -19.7$ & 16 $h^{-1}$~{\rm Mpc}\qquad \cr galaxies brighter than $M = -18.8$ & 13 $h^{-1}$~{\rm Mpc}\qquad \cr \noalign{\medskip} \noalign{\hrule} }}$$}} \vskip -0.3truecm Using the second void definition we have studied the properties of voids surrounded by galaxies from three different luminosity (absolute magnitude $M$) limited samples. Three void catalogues have been compiled. Comparisons of voids from different catalogues revealed that voids form a hierarchical system (cf. Lindner {\it et al.} 1995, A\&A 301, 329) as it is visualized in Fig.~1a). In this hierarchical concept apparently isolated galaxies in voids may have faint close neighbors which are not detected because of selection effects as it is shown in Fig.~1b). \begin{figure} \epsfysize=8.5cm \vskip -2.7truecm {\epsffile{voids_fig1ab.ps}} \vskip -0.5truecm \caption{Wedge diagrams of a slice of the Universe 6000 km s$^{-1}$\ deep and bordered by about $9^h < \alpha < 15^h$ and $49^\circ < \delta < 57^\circ$. {\bf a)} The three circles indicate an example of hierarchically interlaced voids defined by galaxies of different luminosity limit. {\bf b)} Additionally BCGs from SBS are shown (crosses). The circle indicates the distance to the nearest bright ($M < -19.7$) neigboring galaxy.} \end{figure} \section{Conclusions} By now the concept of void hierarchy is established only for galaxies brighter than $M = -18.8$ in the nearby Universe (up to distance $60 h^{-1}$Mpc). The study of the radial distribution of fainter galaxies in voids along with nearest neighbor tests (Lindner {\it et al.}\ 1996) suggests that this hierarchy continues to fainter magnitudes and therefore contradicts a homogeneous distribution of dwarf ga\-laxies in voids claimed by some theories of galaxy formation (e.g. Dekel \& Silk 1986). With second generation instruments attached to the VLT (e.g. VIRMOS) it will be possible to confirm the hierarchy of voids towards fainter luminosity limits and for more distant regions of the Universe. The void hierarchy itself will be helpful to devise new concepts for the study of the large scale structure in the Universe.
{ "redpajama_set_name": "RedPajamaArXiv" }
3,204
{"url":"https:\/\/crypto.stackexchange.com\/questions\/31979\/proof-that-this-is-not-a-secure-pseudorandom-function\/53971#53971","text":"# Proof that this is not a secure pseudorandom function?\n\n$$p$$ is a large prime number. Consider the following function $$F:\\mathbb Z^*_p \\times \\mathbb D\\rightarrow\\mathbb Z^*_p$$ where $$\\mathbb D=2,....,p-1$$.\n\n$$F_k(x)=x^k \\bmod p$$\n\nProof that it's not a secure pseudorandom function.\n\nWhat I\u2018ve tried:\n\nI give a value to $$x_1$$ and obtain $$y_1$$, then I set $$x_2 =y_1+p$$. At the end $$y_1$$ is equal to $$y_2$$. Could this be right or is it totally wrong?\n\n## 1 Answer\n\nHint: multiplicative property.\n\nWhat you have tried does not work because $y_1=y_2$ does not hold in general when $x_2=y_1+p$ (further, adding $p$ is identity in $Z_p$ ).\n\nYou want to build a distinguisher for $F_k$. Arguably, $F_k(1)=1$ is enough for that, but you can build a more general distinguisher from $F_k(x_1\\cdot x_2\\bmod p)=F_k(x_1)\\cdot F_k(x_2)\\bmod p$.","date":"2021-10-20 00:45:43","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 9, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.824865996837616, \"perplexity\": 336.71435352437504}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": false}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2021-43\/segments\/1634323585290.83\/warc\/CC-MAIN-20211019233130-20211020023130-00283.warc.gz\"}"}
null
null
Home / Camdaki Kız / The Girl in the Window Episode 5 Camdaki Kız (The Girl in the Window): Trailer And Summary By Naila Annisa 29 Apr, 2021 Post a Comment Watch the 5th episode trailer of Camdaki Kız (The Girl in the Window) in full! In the end episode; Nalan and her family are confronting Cana! What happen in the episode on May 6th? Despite the efforts of the Koroglu family to prevent it, the photo of Sadat and Cana is published in the newspaper. When photos of Sadat and Cana were leaked to the press, a major earthquake occurred in the Koroglu Family. Feride rips up all the photos. Cana pressures Sadat to deny this love. What will Muzo do if he thinks about not saving Sadat this time? Rafet Koroglu invites Nalan and her family to his house, and it would be a bad surprise for Nalan to have Cana there. What's Nalan going to do in front of Cana? Nalan in the tender spot! Cana doesn't know the border... One by one, the truth that is kept secret in Kanal D's flowery series project, Camdaki Kız (The Girl in the Window), is revealed. In the end episode; Sadat's father, who sees the footage on camera, has a nervous breakdown. Faced with the shocking truth just as the eve of the marriage, Nalan's experiences are not enough, and she undergoes a great test. The trailer for Episode 5 of Camdaki Kız (The Girl in the Window), which started its Kanal D screen adventure with the production of OGM Pictures, written by Seda Altaylı Tuğutlu and directed by Nadim Güç'ün, has been released. Despite the efforts of the Koroglu family to prevent it, the photo of Sadat and Cana is published in the newspaper. The bombshell of the story will mobilize all journalists. Everyone is curious about Nalan's reaction to this news. Or is this magical love story over before it even begins? With the publication of the news, an apocalypse will break out at Koroglu Manor. For the dignity of their families and the future of the company's shares, they need to fix this situation as soon as possible and prevent Nalan from throwing away the ring. Nalan is very disappointed to learn from the newspapers that Sadat is seeing another woman. Feride, who has never trusted the Koroglu family from the beginning, goes crazy and furiously waits for the Koroglu family to make a statement to them. However, Feride's expected explanation does not come because the Koroglu family has a completely different plan to solve this crisis. Camdaki Kız Episode 5 Trailer Camdaki Kız Episode 5 Summary: Nalan can't tell her mother the truth because she's afraid of Feride's reaction. While the Koroglu family has yet to solve the crisis created by the photo in the press, Nalan's last-minute throwing away the ring comes as a shock to both families. While no one knew the reason for this sudden decision, Muzo, who had watched what was happening on camera, understood everything. Nalan can't tell her mother the truth because she is afraid of Feride's reaction. Feride, on the other hand, does not know the bracelet that Sadat bought For Cana, so she tries to turn Nalan away from the decision to leave by force. While Nalan is in great pain of love, she is trying to understand what is real and what is a lie. After Nalan threw away the ring, Koroglu Manor returned to the scene of the fire. Rafet can't stand it in the end, he says he'll disown Sadat. This would be the last straw for Sadat, and Sadat would leave the house. But this anger will cost him dearly, and perhaps an accident will change the course of everything. Camdaki Kız (The Girl in the Window) Episode 5 on Kanal D on Thursday, May 6,2021 at 08 P.M! Read Also All Episode of Camdaki Kız (The Girl in the Window) Post a Comment for "Episode 5 Camdaki Kız (The Girl in the Window): Trailer And Summary"
{ "redpajama_set_name": "RedPajamaCommonCrawl" }
8,448
\section{Open system initial correlations} An open quantum system is a quantum system `S' that interacts with some environment `E' whose degrees of freedom have been coarse grained (colloquially, `traced out' or `integrated over' ). The unitary evolution of the combined system + environment, `C' (for combined or closed), is generated by the Hamiltonian \begin{equation} \mathbf{H}_\mathrm{C} \equiv \mathbf{H}_\mathrm{S} + \mathbf{H}_\mathrm{E} + \mathbf{H}_\mathrm{I} \, , \end{equation} where $\mathbf{H}_\mathrm{I}$ denotes the system-environment interaction. Specifying the initial of the combined system is necessary to determine the open-system dynamics. The most common choice is to assume factorized initial states for the system and environment% \footnote{Even after resolving issues of renormalization, the system will typically be displaced by a finite amount of the order of the induced damping within a very short time of the order of the inverse UV cutoff of the environment.} \begin{eqnarray} \boldsymbol{\rho}_\mathrm{C}(0) &=& \boldsymbol{\rho}_\mathrm{S}(0) \otimes \boldsymbol{\rho}_\mathrm{E}(0) \, , \\ \boldsymbol{\rho}_\mathrm{E}(0) &=& \frac{1}{Z_\mathrm{E}(\beta)} e^{-\beta \, \mathbf{H}_\mathrm{E}} \, . \end{eqnarray} where $Z_\mathrm{E}(\beta)$ denotes the partition function of the free (noninteracting) environment and $T=1/\beta$ is the temperature of the environment, which acts here as a thermal reservoir. When considering environments with a large number of high-frequency modes and characterized by a UV frequency cutoff $\Lambda$, such a factorized initial state (chosen for mathematical simplicity) unfortunately engenders unphysical behavior such as a sudden jolt in physical quantities near the initial time (this was analyzed in some detail in Ref.~\cite{HPZ92}) or spurious cutoff sensitivity of certain system correlators (see Ref.~\cite{HRV04} and references therein). This kind of initial conditions assumes that an uncorrelated system and environment are instantaneously coupled with non-vanishing strength. The pathological behavior arises because the factorized initial state contains a number of highly excited energy states of the full Hamiltonian (including the interacting Hamiltonian), even when the initial reduced states of the system and environment are not highly excited in the free theory, and it is a reflection of the high-frequency modes of the environment quickly becoming correlated with the system within a time of order $1/\Lambda$. The next most common choice of initial state (see Ref.~\cite{Grabert88}) has been to consider system deformations or measurements of the global equilibrium state of the combined system `C', with density matrix \begin{equation} \boldsymbol{\rho}_\mathrm{C}(0) = \sum_n \mathbf{O}'_n \, \frac{1}{Z_\mathrm{C}(\beta)} e^{-\beta \, \mathbf{H}_\mathrm{C}} \, \mathbf{O}_n \, , \label{eq:PrepP} \end{equation} where the $\mathbf{O}$ and $\mathbf{O'}$ operators are restricted to act on the system. However, this method still gives rise to jolts for sufficiently general deformations or measurements \cite{Romero97}, which can be understood as a consequence of altering the state of the system instantaneously \cite{Anglin97}. To cure or tame these drastic effects, especially in the context of linear systems, the following procedure has been suggested: a) force the system by a constant amount, b) wait for it to relax into the displaced equilibrium state, and then c) release the force \cite{Grabert88}. Alternatively and in order to generate interesting coherent superposition states for the system, one can start with the equilibrium state of the combined system and act on the system, but for a non-vanishing time \cite{Anglin97}. Essentially we view the problem as an imbalance between initial correlations and initial coupling strength; the imbalance can be countered on either side. We also believe that the most natural resolution should be a dynamical preparation which relies upon equilibration \cite{Grabert88,Breuer01b} followed by an additional preparation of the system for a finite time \cite{Anglin97}. Our key contribution is showing that this can be achieved while still taking advantage of the simpler analytical results obtained when deriving the master equation for a factorized initial state, without the need to introduce inhomogeneous terms and an affine master equation \cite{Breuer01b}. In the next section we will briefly discuss the perturbative open-system master equation which we will use to approach these issues. Then in Secs.~\ref{sec:couple} and \ref{sec:Hamiltonian} we will provide resolutions based, respectively, on balancing the coupling and the correlations. \section{Open-system dynamics} In the time-local representation (also called the convolutionless or Markovian representation), the dynamics of the reduced density matrix of the system $\boldsymbol{\rho}$ can be expressed with a quantum Liouville equation \begin{eqnarray} \frac{d}{dt} \boldsymbol{\rho}(t) &=& \boldsymbol{\mathcal{L}}(t) \, \boldsymbol{\rho}(t) \, \label{eq:TCME}, \end{eqnarray} for any factorized initial condition. As a perturbative approximation, $\boldsymbol{\mathcal{L}}(t)$ is expanded in powers of the system-environment interaction $\mathbf{H}_\mathrm{I}(t)$ and truncated to some order. (We momentarily consider full time dependence in this section as the more general relations will be necessary for our techniques.) Such perturbative master equations can be derived in a variety of ways \cite{Kampen97,Breuer01,Strunz04} and find application in many branches of physics and chemistry \cite{Pollard97,Carmichael99,Breuer02,Kampen07}. The expansion of $\boldsymbol{\mathcal{L}}(t)$ will then take the form \begin{eqnarray} \boldsymbol{\mathcal{L}}(t) &=& \sum_{k=0}^\infty \boldsymbol{\mathcal{L}}_{2k}(t) \, ,\label{eq:PerturbExp} \\ \boldsymbol{\mathcal{L}}_{0} \, \boldsymbol{\rho} &=& \left[ -\imath \, \mathbf{H}_\mathrm{S}(t) , \boldsymbol{\rho} \right] \, , \end{eqnarray} where $ \boldsymbol{\mathcal{L}}_{2k} = \mathcal{O}(\mathbf{H}_\mathrm{I}^{2k})$ and to zeroth-order the system is driven in a unitary manner by its Hamiltonian $\mathbf{H}_\mathrm{S}(t)$. We take the expansion to be even as we only consider Gaussian noise, which is symmetric. A Gaussian noise distributional is necessary for higher-order perturbation theory to be non-secular in the late-time limit \cite{QOS}, though at second order one is effectively truncating the noise cumulants in a manner consistent with Gaussian noise. The linear master equation is derived under the general assumptions of a factorized initial state and an expansion of the interaction as a sum of separable operators: \begin{equation} \mathbf{H}_\mathrm{I}(t) = \sum_n \mathbf{L}_n(t) \otimes \mathbf{l}_n(t) \, , \label{eq:Hint} \end{equation} where $\mathbf{L}_n(t)$ and $\mathbf{l}_n(t)$ are system and environment operators respectively. The environment coupling operators $\mathbf{l}_n(t)$ will typically be collective observables of the environment, with dependence upon very many modes. Using the notation of Ref.~\cite{QOS}, the second-order master equation can be expressed as \begin{equation} \boldsymbol{\mathcal{L}}_2 \{ \boldsymbol{\rho} \} \equiv \sum_{nm} \left[ \mathbf{L}_n, \boldsymbol{\rho} \, (\mathbf{A}_{nm}\! \diamond \mathbf{L}_m)^\dagger - (\mathbf{A}_{nm}\! \diamond \mathbf{L}_m) \, \boldsymbol{\rho} \right] \, , \label{eq:WCGME} \end{equation} where the $\mathbf{A}$ operators and $\diamond$ product define the second-order operator \begin{equation} (\mathbf{A}_{nm}\! \diamond \mathbf{L}_m)(t) \equiv \int_0^t \!\! d\tau \, \alpha_{nm}(t,\tau) \, \left\{ \mathbf{G}_0(t,\tau) \, \mathbf{L}_m(\tau) \right\} \, . \label{eq:WCOG} \end{equation} Here $\mathbf{G}_0(t,\tau): \boldsymbol{\rho}(\tau) \to \boldsymbol{\rho}(t)$ is the free-system propagator, which for a constant Hamiltonian $\mathbf{H}$ is given by \begin{equation} \mathbf{G}_0(t,\tau) \, \boldsymbol{\rho}= e^{-\imath(t-\tau) \mathbf{H}} \, \boldsymbol{\rho} \, e^{+\imath(t-\tau) \mathbf{H}} \, , \end{equation} while $\alpha_{nm}(t,\tau)$ are the environment correlation functions defined by \begin{equation} \alpha_{nm}(t,\tau) = \left\langle \underline{\mathbf{l}}_n\!(t) \, \underline{\mathbf{l}}_m\!(\tau) \right\rangle_\mathrm{E} \, , \label{eq:alpha} \end{equation} where $\underline{\mathbf{l}}_n\!(t)$ represents the time-evolving $\mathbf{l}_n$ in the interaction (Dirac) picture. In general the correlation function is Hermitian and positive definite. For constant coupling to any stationary environment, the correlation function will also be stationary, $\boldsymbol{\alpha}(t,\tau)=\boldsymbol{\alpha}(t\!-\!\tau)$. Furthermore, for a thermal environment the correlation function will satisfy the KMS relation \cite{Kubo57,Martin59}: \begin{equation} \tilde{\boldsymbol{\alpha}}(\omega) = \tilde{\boldsymbol{\alpha}}^*(-\omega) \, e^{-\beta \omega} \, , \end{equation} where $\tilde{\boldsymbol{\alpha}}(\omega) = \int_{-\infty}^{+\infty} dt \, \boldsymbol{\alpha}(t) \, e^{-\imath \omega t}$ denotes the Fourier transform. From this perspective, the mathematical cause of the initial jolt becomes clear. For constant Hamiltonians and an initially stationary environment, the second-order operator obeys the relation \begin{equation} \frac{d}{dt}(\mathbf{A}_{nm}\! \diamond \mathbf{L}_m)(t) = \alpha_{nm}(t) \, \left\{ \mathbf{G}_0(t) \, \mathbf{L}_m \right\} \, , \end{equation} which can be extremely large near the initial time when considering an environment with a sufficient amount of high frequency modes (such as low-temperature ohmic and supra-ohmic environments) since $\boldsymbol{\alpha}(t)$ is typically a very localized distribution in those cases. For a finite but large cutoff $\Lambda$, the correlation function becomes of order $\Lambda$ for a time of order $1/\Lambda$. \section{Coupling switch-on} \label{sec:couple} One method for balancing the initial coupling between the system and environment with their initial lack of correlation, is to turn on the coupling slowly with a time-dependent interaction such as \begin{equation} \mathbf{H}_\mathrm{I} = \theta_\mathrm{s}(t) \sum_n \mathbf{L}_n \otimes \mathbf{l}_n \, , \label{eq:Hint} \end{equation} where $\theta_\mathrm{s}(t) : [0,\infty) \to [0,1)$ is a smooth switch-on function with a characteristic timescale $\tau_\mathrm{s}$, which vanishes at the initial time and becomes (effectively) one for times longer than $\tau_\mathrm{s}$. To some extent, this was considered for linear systems in Ref.~\cite{QBM}. Such a time-dependent interaction is equivalent to employing the second-order operator \begin{equation} (\mathbf{A}_{nm}\! \diamond \mathbf{L}_m)(t) = \theta_\mathrm{s}(t) \! \int_0^t \!\!\! d\tau \, \theta_\mathrm{s}(t\!-\!\tau) \, \alpha_{nm}(\tau) \left\{ \mathbf{G}_0(\tau) \, \mathbf{L}_m \right\} \, , \label{eq:WCOG} \end{equation} for otherwise constant couplings and Hamiltonians. Therefore, any initial jolt due to the localized nature of $\boldsymbol{\alpha}(t)$ will be suppressed by $\theta_\mathrm{s}(t)$ as long as $\tau_\mathrm{s} \gg 1/\Lambda$. \begin{figure}[h] \centering \includegraphics[width=7cm]{switched.pdf} \caption{Zero-temperature, ohmic decay rate for the \textcolor{red}{$\bullet$ instantaneously coupled} and \textcolor{blue}{$\cdot$ gradually coupled} initial states of a two-level system with exponential cutoff frequency $\Lambda = 100 \, \Omega$. In this case the switch-on function is exponential, $\theta_\mathrm{s}(t) = 1-e^{-t/\tau_\mathrm{s}}$, and the switch-on times $\tau_\mathrm{s}$ are chosen to take the values $1/\Lambda$, $2/\Lambda$, $4/\Lambda$, $8/\Lambda$, $16/\Lambda$. } \label{fig:switched} \end{figure} As can be seen in Fig.~\ref{fig:switched}, the cutoff-frequency jolts are essentially replaced by jolts of frequency $\mathrm{min}[\Lambda,1/\tau_\mathrm{s}]$ and amplitude proportional to the same value. This approach provides a useful way of generating initial system-environment correlations when $\tau_\mathrm{s}$ is much larger than $1/\Lambda$ but smaller than any other relevant timescales (such as the system frequencies). Furthermore, even if a mild jolt is still present, the important point is that it is cutoff insensitive (for fixed $\tau_\mathrm{s}$ and sufficiently large $\Lambda$). \section{Dynamically prepared initial states} \label{sec:Hamiltonian} Alternatively, in order to balance the initial correlations with an initially non-vanishing interaction strength, we will consider here initial states with suitable correlations to the environment. Such states will be obtained via an auxiliary construction which involves evolving an initially uncorrelated state for a sufficiently long time (a similar procedure was used in Ref.~\cite{PerezNadal08,PerezNadal08b} within the context of semiclassical gravity). The system-enviroment correlations are then dynamically generated through the environmental interaction itself. Our first examples of \emph{equilibrium preparation} will be the simplest mathematically, while the final examples of \emph{non-equilibrium preparation} will be closer to actual laboratory experiments. In all cases we will take the system and environment to be uncorrelated not at $t=0$ but in the infinite past. \begin{equation} \boldsymbol{\rho}_\mathrm{C}(-\infty) = \boldsymbol{\rho}_\mathrm{S}(-\infty) \otimes \boldsymbol{\rho}_\mathrm{E}(-\infty) \, , \label{eq:PreHistory} \end{equation} for some (possibly unimportant) system state $\boldsymbol{\rho}_\mathrm{S}(-\infty)$ and thermal $\boldsymbol{\rho}_\mathrm{E}(-\infty)$. We then define the system Hamiltonian piecewise in time \begin{equation} \mathbf{H}_\mathrm{S}(t) = \left\{ \begin{array}{l@{\;\;\;\;}rcl} \mathbf{H}_+(t) & 0 & < & t \\ \mathbf{H}_- & t & < & 0 \end{array} \right. \, , \end{equation} such that in past the system is allowed to equilibrate with the environment for an infinite time, which determines the correlated initial state at $t=0$. The second-order master equation is then determined by \begin{equation} \left( \mathbf{A}_{nm}\! \diamond \mathbf{L}_m \right)\!(t) = \int_{\!-\infty}^t \!\!\! d\tau \, \alpha_{nm}(t,\tau) \, \left\{ \mathbf{G}_\mathrm{S}(t,\tau) \, \mathbf{L}_m(\tau) \right\} \, . \end{equation} To analyze the coefficients associated with the initially-correlated state, we will reduce them to a sum of coefficients for the auxiliary initially-uncorrelated state involving various time ranges. First, we split the integration into two parts \begin{equation} \int_{\!-\infty}^t \!\!\! d\tau = \int_{0}^t \!\! d\tau + \int_{\!-\infty}^0 \!\!\! d\tau \, , \end{equation} with the first integral depending only upon $\mathbf{G}_+(t,\tau)$ and corresponding to the uncorrelated coefficients. Inserting the product $\mathbf{G}_-(0,t) \, \mathbf{G}_-(t,0)$, which equals the identity, the second integral can be written as \begin{equation} \boldsymbol{\mathcal{M}}(t) \int_{\!-\infty}^0 \!\!\! d\tau \, \alpha_{nm}(t,\tau) \, \left\{ \mathbf{G}_-(t,\tau) \, \mathbf{L}_m(\tau) \right\} \, , \label{eq:Int2} \end{equation} given the operator \begin{equation} \boldsymbol{\mathcal{M}}(t) \equiv \mathbf{G}_+(t,0) \, \mathbf{G}_-(0,t) \, . \end{equation} The integral in Eq.~\eqref{eq:Int2} is then broken up into two parts \begin{equation} \int_{\!-\infty}^0 \!\!\! d\tau = \int_{\!-\infty}^t \!\!\! d\tau - \int_{0}^t \!\! d\tau \, , \end{equation} corresponding to the asymptotic and finite-time coefficients for an initially uncorrelated system driven by the time-independent preparation Hamiltonian $\mathbf{H}_-$. Finally, our correlated coefficients can be expressed in terms of the uncorrelated coefficients as \begin{align} & \underbrace{\left( \mathbf{A}_{nm}\! \diamond \mathbf{L}_m \right)\!(t)}_\mathrm{correlated} = \underbrace{\left( \mathbf{A}_{nm}\! \diamond \mathbf{L}_m \right)_+\!(t)}_\mathrm{uncorrelated} \label{eq:Aprep} \\ & - \boldsymbol{\mathcal{M}}(t) \Bigl\{ \underbrace{\left( \mathbf{A}_{nm}\! \diamond \mathbf{L}_m \right)_-\!(t)}_\mathrm{jolt \; suppression} - \underbrace{\left( \mathbf{A}_{nm}\! \diamond \mathbf{L}_m \right)_-\!(\infty)}_\mathrm{preparation \; eraser} \Bigr\} \, , \nonumber \end{align} where the subscripted $\left( \mathbf{A} \diamond \mathbf{L} \right)_\pm$ coefficients are defined as \begin{equation} (\mathbf{A}_{nm}\! \diamond \mathbf{L}_m)_\pm(t) \equiv \int_0^t \!\! d\tau \, \alpha_{nm}(t,\tau) \, \left\{ \mathbf{G}_\pm(t,\tau) \, \mathbf{L}_m(\tau) \right\} \, . \end{equation} If the system frequencies are always small as compared to the cutoff, we can inspect the early-time behavior (and jolts) by letting $\mathbf{G}_\pm(t) \approx \mathbf{1}$. Then one can see that the first two terms of Eq.~\eqref{eq:Aprep} will precisely cancel in the early-time regime. Therefore, the correlated initial states are jolt-free given sufficiently small system frequencies as compared to the cutoff: $\Omega \ll \Lambda$. The final term in turn is such that in the late-time limit it precisely cancels the second term and erases all memory of $\mathbf{H}_-$. Finally note that, quite trivially, if we choose $\mathbf{H}_+(t) = \mathbf{H}_-$, then the first two terms cancel and we recover the equilibrium coefficients at any finite time. \subsection{Equilibrium preparation} To prepare an initial state in this approach, we choose the past Hamiltonian $\mathbf{H}_-$ such that its dynamics along with the environment interaction relaxes our system to the desired initial state: \begin{eqnarray} \lim_{t \to \infty} e^{t \, \boldsymbol{\mathcal{L}}_-(\infty)} \, \boldsymbol{\rho}_0 &=& \boldsymbol{\rho}_0 \, , \\ \boldsymbol{\mathcal{L}}_-(\infty) \, \boldsymbol{\rho}_0 &=& \mathbf{0} \, , \end{eqnarray} where $\boldsymbol{\mathcal{L}}_-(\infty)$ is the stationary limit of the Liouvillian for a system with the past Hamiltonian as well as the coupling to the environment. Our target state $\boldsymbol{\rho}_0$ will only be specified to zeroth order in the system-environment interaction. This is because for sufficiently long times (and in particular for the asymptotic equilibrium state) the diagonal elements of the reduced density matrix in the energy basis cannot be determined beyond zeroth order anyway when using the second-order perturbative master equation \cite{Accuracy}. Due to unavoidable degeneracy present in all open-system dynamics, one actually requires components of the fourth-order master equation to calculate the full second-order solutions. The second-order master equation provides for all second-order dynamical quantities, such as frequency shifts, dissipation, diffusion and decoherence rates. We are concerned here with the induced jolts, which are dynamical quantities, and so this subtle point does not raise any additional problems for us. \subsubsection{Preparation by decoherence} For $\mathbf{L}_n$ all commuting with each other, one can force a general environment into $\ell$-state preparation via decoherence. If the past Hamiltonian is deactivated, or more generally taken to commute with $\mathbf{L}_n$, then since all system operators commute with each other, the master equation and its solutions will trivially result in a system which decoheres in the $\ell$-basis associated with the $\mathbf{L}_n$. Thus, coefficients prepared in this manner are consistent with any initial state which is a completely incoherent mixture of $\ell$-states. [Note that if $\boldsymbol{\rho}_\mathrm{S}(-\infty)$ corresponds to a pure eigenstate of the set $\{ \mathbf{L}_n \}$, this procedure simply adjusts the state of the environment, while system and environment remain unentangled.] \subsubsection{Preparation by equilibration} A finite-temperature environment allows mixed state preparation by equilibration. Essentially one chooses the past Hamiltonian so that its thermal state (or some other steady state) is the desired initial state. For a positive-temperature environment, at zeroth order one can prepare a (sufficiently) mixed state $\boldsymbol{\rho}_0$ with the past Hamiltonian $\mathbf{H}_- = - T \, \log(\boldsymbol{\rho}_0)$. However, one must be careful that past system frequencies are small as compared to the high frequency jolts, otherwise this preparation will fail to remedy jolting. One can work out that the \emph{adiabatic preparation} regime is given by \begin{eqnarray} \frac{p_\mathrm{max}}{p_\mathrm{min}} & \ll & e^{\beta \, \Lambda} \, , \end{eqnarray} where $\Lambda$ is the jolt frequency and $p$ are the initial state probabilities of preparation energy levels connected by $\mathbf{L}_n$. (Clearly, for this method to work there can only be a finite number of such energy levels.) \subsubsection{Preparation by freezing} To prepare an initially pure state via equilibration at the order that we are working, one requires a zero-temperature environment for preparation by freezing. Then one can choose any $\mathbf{H}_-$ with ground state $\boldsymbol{\rho}_0$. It is important to emphasize that the reduced density matrix of the system corresponding to the ground state of the combined system will not be a pure state in general due to the entanglement between the system and the environment: the free ground state of the system is a pure state, but the reduced density matrix of the open system is in general a mixed state beyond zeroth order in the system-environment coupling. However, this point becomes irrelevant at the order that we are working since, as explained above, when using the second-order perturbative master equation to prepare the initial state by equilibration, one cannot meaningfully specify $\boldsymbol{\rho}_0$ beyond zeroth order. \begin{figure}[h] \centering \includegraphics[width=7cm]{prepared.pdf} \caption{Zero-temperature, ohmic decay rate for the \textcolor{red}{$\bullet$ unprepared} and \textcolor{blue}{$\cdot$ prepared} initial states of a two-level system with exponential cutoff frequency $\Lambda = 100 \, \Omega$. In this case preparation by freezing was used to create an initially excited state.} \label{fig:prepared} \end{figure} \subsection{Non-equilibrium preparation} In order to consider situations closer to actual laboratory experiments, here we will first allow the system to equilibrate with the environment (as described in the previous subsection) and then choose some preparation Hamiltonian $\mathbf{H}_\mathrm{P}(t)$, which would (in the absence of coupling to the environment) generate the desired initial state in some finite time $\tau_\mathrm{P}$. One simply applies the master-equation coefficients in Eq.~\eqref{eq:Aprep} with future Hamiltonian \begin{equation} \mathbf{H}_+(t) = \left\{ \begin{array}{l@{\;\;\;\;}rcl} \mathbf{H}_0(t) & \tau_\mathrm{P} & < & t \\ \mathbf{H}_\mathrm{P}(t) & t & < & \tau_\mathrm{P} \end{array} \right. \, , \end{equation} where $\mathbf{H}_0(t)$ is the desired post-preparation Hamiltonian. All jolts will be avoided if $1/\tau_\mathrm{P} \ll \Lambda$: the introduction of a non-vanishing preparation frequency serves to tame the jolts and eliminate their high-cutoff sensitivity. \subsubsection{State flipping} A possible preparation Hamiltonian, which could model as a particular case Rabbi oscillations induced by an appropriate laser field acting on a two-level system, is the following: \begin{equation} \mathbf{H}_\mathrm{P} = \frac{\pi}{2 \tau_\mathrm{P}} \left( \ket{\psi_0}\!\!\bra{0} + \ket{0}\!\!\bra{\psi_0} \right) \, . \end{equation} Assuming that one has a zero-temperature environment and that the system is already equilibrated, driving the system with this Hamiltonian for a time $\tau_\mathrm{P}$ provides a relatively easy way of preparing an initial pure state $\ket{\psi_0}$. As discussed above, the reduced density matrix of the system will actually be a mixed state in general, because of the system-environment entanglement of the equilibrium state as well as the interaction to the environment while evolving the combined system during this additional finite preparation time. In fact, the preparation time $\tau_\mathrm{P}$ cannot be too long if we want the state of the system to be more or less close to $\ket{\psi_0}$. \subsubsection{State swapping} Let us consider a system which is initially equilibrated, without making any assumption as to the temperature of the environment. We couple the system to an ancillary and analog system (equivalent Hilbert spaces) that is already prepared in the desired initial state. The system of interest and ancilla are temporarily coupled in such a way that they swap states, for instance by means of the following block-matrix preparation Hamiltonian: \begin{equation} \mathbf{H}_\mathrm{P} = \frac{\pi}{2 \tau_\mathrm{P}} \left[ \begin{array}{cc} \mathbf{0} & \mathbf{1} \\ \mathbf{1} & \mathbf{0} \end{array} \right] \, . \end{equation} In the absence of coupling to the environment this would exactly swap the system and ancilla states in a time $\tau_\mathrm{P}$. The same remarks as for state flipping concerning the purity and accuracy of the prepared state when taking into account the coupling to the environment also apply in this case. \subsubsection{Other possibilities} Within the second-order perturbative approach, generation of equilibrium correlations in a laboratory setting can always be calculated using Eq.~\eqref{eq:Aprep}. One only needs to make sure that any additional state preparation does not rely upon large system energies as compared to the bath cutoff. For instance, one can consider the preparation of Ref.~\cite{Anglin97}, which relies on ancillary degrees of freedom to drive the equilibrium state into a coherent superposition. In fact, one could simply apply their own time-dependent Hamiltonian to our formulas as $\mathbf{H}_+(t)$ and obtain results consistent with theirs. \acknowledgments This work is supported partially by NSF Grants PHY-0426696, PHY-0801368, DARPA grant DARPAHR0011-09-1-0008 and the Laboratory of Physical Sciences.
{ "redpajama_set_name": "RedPajamaArXiv" }
673
{"url":"https:\/\/nebusresearch.wordpress.com\/tag\/infinite-series\/","text":"## From my Sixth A-to-Z: Taylor\u00a0Series\n\nBy the time of 2019 and my sixth A-to-Z series , I had some standard narrative tricks I could deploy. My insistence that everything is polynomials, for example. Anecdotes from my slight academic career. A prose style that emphasizes what we do with the idea of something rather than instructions. That last comes from the idea that if you wanted to know how to compute a Taylor series you\u2019d just look it up on Mathworld or Wikipedia or whatnot. The thing a pop mathematics blog can do is give some reason that you\u2019d want to know how to compute a Taylor series. I regret talking about functions that break Taylor series, though. I have to treat these essays as introducing the idea of a Taylor series to someone who doesn\u2019t know anything about them. And it\u2019s bad form to teach how stuff doesn\u2019t work too close to teaching how it does work. Readers tend to blur what works and what doesn\u2019t together. Still, $f(x) = \\exp(-\\frac{1}{x^2})$ is a really neat weird function and it\u2019d be a shame to let it go completely unmentioned.\n\nToday\u2019s A To Z term was nominated by APMA, author of the Everybody Makes DATA blog. It was a topic that delighted me to realize I could explain. Then it started to torment me as I realized there is a lot to explain here, and I had to pick something. So here\u2019s where things ended up.\n\n# Taylor Series.\n\nIn the mid-2000s I was teaching at a department being closed down. In its last semester I had to teach Computational Quantum Mechanics. The person who\u2019d normally taught it had transferred to another department. But a few last majors wanted the old department\u2019s version of the course, and this pressed me into the role. Teaching a course you don\u2019t really know is a rush. It\u2019s a semester of learning, and trying to think deeply enough that you can convey something to students. This while all the regular demands of the semester eat your time and working energy. And this in the leap of faith that the syllabus you made up, before you truly knew the subject, will be nearly enough right. And that you have not committed to teaching something you do not understand.\n\nSo around mid-course I realized I needed to explain finding the wave function for a hydrogen atom with two electrons. The wave function is this probability distribution. You use it to find things like the probability a particle is in a certain area, or has a certain momentum. Things like that. A proton with one electron is as much as I\u2019d ever done, as a physics major. We treat the proton as the center of the universe, immobile, and the electron hovers around that somewhere. Two electrons, though? A thing repelling your electron, and repelled by your electron, and neither of those having fixed positions? What the mathematics of that must look like terrified me. When I couldn\u2019t procrastinate it farther I accepted my doom and read exactly what it was I should do.\n\nIt turned out I had known what I needed for nearly twenty years already. Got it in high school.\n\nOf course I\u2019m discussing Taylor Series. The equations were loaded down with symbols, yes. But at its core, the important stuff, was this old and trusted friend.\n\nThe premise behind a Taylor Series is even older than that. It\u2019s universal. If you want to do something complicated, try doing the simplest thing that looks at all like it. And then make that a little bit more like you want. And then a bit more. Keep making these little improvements until you\u2019ve got it as right as you truly need. Put that vaguely, the idea describes Taylor series just as well as it describes making a video game or painting a state portrait. We can make it more specific, though.\n\nA series, in this context, means the sum of a sequence of things. This can be finitely many things. It can be infinitely many things. If the sum makes sense, we say the series converges. If the sum doesn\u2019t, we say the series diverges. When we first learn about series, the sequences are all numbers. $1 + \\frac{1}{2} + \\frac{1}{3} + \\frac{1}{4} + \\cdots$, for example, which diverges. (It adds to a number bigger than any finite number.) Or $1 + \\frac{1}{2^2} + \\frac{1}{3^2} + \\frac{1}{4^2} + \\cdots$, which converges. (It adds to $\\frac{1}{6}\\pi^2$.)\n\nIn a Taylor Series, the terms are all polynomials. They\u2019re simple polynomials. Let me call the independent variable \u2018x\u2019. Sometimes it\u2019s \u2018z\u2019, for the reasons you would expect. (\u2018x\u2019 usually implies we\u2019re looking at real-valued functions. \u2018z\u2019 usually implies we\u2019re looking at complex-valued functions. \u2018t\u2019 implies it\u2019s a real-valued function with an independent variable that represents time.) Each of these terms is simple. Each term is the distance between x and a reference point, raised to a whole power, and multiplied by some coefficient. The reference point is the same for every term. What makes this potent is that we use, potentially, many terms. Infinitely many terms, if need be.\n\nCall the reference point \u2018a\u2019. Or if you prefer, x0. z0 if you want to work with z\u2019s. You see the pattern. This \u2018a\u2019 is the \u201cpoint of expansion\u201d. The coefficients of each term depend on the original function at the point of expansion. The coefficient for the term that has $(x - a)$ is the first derivative of f, evaluated at a. The coefficient for the term that has $(x - a)^2$ is the second derivative of f, evaluated at a (times a number that\u2019s the same for the squared-term for every Taylor Series). The coefficient for the term that has $(x - a)^3$ is the third derivative of f, evaluated at a (times a different number that\u2019s the same for the cubed-term for every Taylor Series).\n\nYou\u2019ll never guess what the coefficient for the term with $(x - a)^{122,743}$ is. Nor will you ever care. The only reason you would wish to is to answer an exam question. The instructor will, in that case, have a function that\u2019s either the sine or the cosine of x. The point of expansion will be 0, $\\frac{\\pi}{2}$, $\\pi$, or $\\frac{3\\pi}{2}$.\n\nOtherwise you will trust that this is one of the terms of $(x - a)^n$, \u2018n\u2019 representing some counting number too great to be interesting. All the interesting work will be done with the Taylor series either truncated to a couple terms, or continued on to infinitely many.\n\nWhat a Taylor series offers is the chance to approximate a function we\u2019re genuinely interested in with a polynomial. This is worth doing, usually, because polynomials are easier to work with. They have nice analytic properties. We can automate taking their derivatives and integrals. We can set a computer to calculate their value at some point, if we need that. We might have no idea how to start calculating the logarithm of 1.3. We certainly have an idea how to start calculating $0.3 - \\frac{1}{2}(0.3^2) + \\frac{1}{3}(0.3^3)$. (Yes, it\u2019s 0.3. I\u2019m using a Taylor series with a = 1 as the point of expansion.)\n\nThe first couple terms tell us interesting things. Especially if we\u2019re looking at a function that represents something physical. The first two terms tell us where an equilibrium might be. The next term tells us whether an equilibrium is stable or not. If it is stable, it tells us how perturbations, points near the equilibrium, behave.\n\nThe first couple terms will describe a line, or a quadratic, or a cubic, some simple function like that. Usually adding more terms will make this Taylor series approximation a better fit to the original. There might be a larger region where the polynomial and the original function are close enough. Or the difference between the polynomial and the original function will be closer together on the same old region.\n\nWe would really like that region to eventually grow to the whole domain of the original function. We can\u2019t count on that, though. Roughly, the interval of convergence will stretch from \u2018a\u2019 to wherever the first weird thing happens. Weird things are, like, discontinuities. Vertical asymptotes. Anything you don\u2019t like dealing with in the original function, the Taylor series will refuse to deal with. Outside that interval, the Taylor series diverges and we just can\u2019t use it for anything meaningful. Which is almost supernaturally weird of them. The Taylor series uses information about the original function, but it\u2019s all derivatives at a single point. Somehow the derivatives of, say, the logarithm of x around x = 1 give a hint that the logarithm of 0 is undefinable. And so they won\u2019t help us calculate the logarithm of 3.\n\nThings can be weirder. There are functions that just break Taylor series altogether. Some are obvious. A function needs lots of derivatives at a point to have a good Taylor series approximation. So, many fractal curves won\u2019t have a Taylor series approximation. These curves are all corners, points where they aren\u2019t continuous or where derivatives don\u2019t exist. Some are obviously designed to break Taylor series approximations. We can make a function that follows different rules if x is rational than if x is irrational. There\u2019s no approximating that, and you\u2019d blame the person who made such a function, not the Taylor series. It can be subtle. The function defined by the rule $f(x) = \\exp{-\\frac{1}{x^2}}$, with the note that if x is zero then f(x) is 0, seems to satisfy everything we\u2019d look for. It\u2019s a function that\u2019s mostly near 1, that drops down to being near zero around where x = 0. But its Taylor series expansion around a = 0 is a horizontal line always at 0. The interval of convergence can be a single point, challenging our idea of what an interval is.\n\nThat\u2019s all right. If we can trust that we\u2019re avoiding weird parts, Taylor series give us an outstanding new tool. Grant that the Taylor series describes a function with the same rule as our original function. The Taylor series is often easier to work with, especially if we\u2019re working on differential equations. We can automate, or at least find formulas for, taking the derivative of a polynomial. Or adding together derivatives of polynomials. Often we can attack a differential equation too hard to solve otherwise by supposing the answer is a polynomial. This is essentially what that quantum mechanics problem used, and why the tool was so familiar when I was in a strange land.\n\nRoughly. What I was actually doing was treating the function I wanted as a power series. This is, like the Taylor series, the sum of a sequence of terms, all of which are $(x - a)^n$ times some coefficient. What makes it not a Taylor series is that the coefficients weren\u2019t the derivatives of any function I knew to start. But the experience of Taylor series trained me to look at functions as things which could be approximated by polynomials.\n\nThis gives us the hint to look at other series that approximate interesting functions. We get a host of these, with names like Laurent series and Fourier series and Chebyshev series and such. Laurent series look like Taylor series but we allow powers to be negative integers as well as positive ones. Fourier series do away with polynomials. They instead use trigonometric functions, sines and cosines. Chebyshev series build on polynomials, but not on pure powers. They\u2019ll use orthogonal polynomials. These behave like perpendicular directions do. That orthogonality makes many numerical techniques behave better.\n\nThe Taylor series is a great introduction to these tools. Its first several terms have good physical interpretations. Its calculation requires tools we learn early on in calculus. The habits of thought it teaches guides us even in unfamiliar territory.\n\nAnd I feel very relieved to be done with this. I often have a few false starts to an essay, but those are mostly before I commit words to text editor. This one had about four branches that now sit in my scrap file. I\u2019m glad to have a deadline forcing me to just publish already.\n\nThank you, though. This and the essays for the Fall 2019 A to Z should be at this link. Next week: the letters U and V. And all past A to Z essays ought to be at this link.\n\n## My 2019 Mathematics A To Z: Wallis\u00a0Products\n\nToday\u2019s A To Z term was suggested by Dina Yagodich, whose YouTube channel features many topics, including calculus and differential equations, statistics, discrete math, and Matlab. Matlab is especially valuable to know as a good quick calculation can answer many questions.\n\n# Wallis Products.\n\nThe Wallis named here is John Wallis, an English clergyman and mathematician and cryptographer. His most tweetable work is how we follow his lead in using the symbol \u221e to represent infinity. But he did much in calculus. And it\u2019s a piece of that which brings us to today. He particularly noticed this:\n\n$\\frac{1}{2}\\pi = \\frac{2}{1}\\cdot \\frac{2}{3}\\cdot \\frac{4}{3}\\cdot \\frac{4}{5}\\cdot \\frac{6}{5}\\cdot \\frac{6}{7}\\cdot \\frac{8}{7}\\cdot \\frac{8}{9}\\cdot \\frac{10}{9}\\cdot \\frac{10}{11}\\cdots$\n\nThis is an infinite product. It\u2019s multiplication\u2019s answer to the infinite series. It always amazes me when an infinite product works. There are dangers when you do anything with an infinite number of terms. Even the basics of arithmetic, like that you can change the order in which you calculate but still get the same result, break down. Series, in which you add together infinitely many things, are risky, but I\u2019m comfortable with the rules to know when the sum can be trusted. Infinite products seem more mysterious. Then you learn an infinite product converges if and only if the series made from the logarithms of the terms in it also converges. Then infinite products seem less exciting.\n\nThere are many infinite products that give us \u03c0. Some work quite efficiently, giving us lots of digits for a few terms\u2019 work. Wallis\u2019s formula does not. We need about a thousand terms for it to get us a \u03c0 of about 3.141. This is a bit much to calculate even today. In 1656, when he published it in Arithmetica Infinitorum, a book I have never read? Wallis was able to do mental arithmetic well. His biography at St Andrews says once when having trouble sleeping he calculated the square root of a 53-digit number in his head, and in the morning, remembered it, and was right. Still, this would be a lot of work. How could Wallis possibly do it? And what work could possibly convince anyone else that he was right?\n\nAs it common to striking discoveries it was a mixture of insight and luck and persistence and pattern recognition. He seems to have started with pondering the value of\n\n$\\int_0^1 \\left(1 - x^2\\right)^{\\frac{1}{2}} dx$\n\nHappily, he knew exactly what this was: $\\frac{1}{4}\\pi$. He knew this because of a bit of insight. We can interpret the integral here as asking for the area that\u2019s enclosed, on a Cartesian coordinate system, by the positive x-axis, the positive y-axis, and the set of points which makes true the equation $y = \\left(1 - x^2\\right)^\\frac{1}{2}$. This curve is the upper half of a circle with radius 1 and centered on the origin. The area enclosed by all this is one-fourth the area of a circle of radius 1. So that\u2019s how he could know the value of the integral, without doing any symbol manipulation.\n\nThe question, in modern notation, would be whether he could do that integral. And, for this? He couldn\u2019t. But, unable to do the problem he wanted, he tried doing the most similar problem he could and see what that proved. $\\left(1 - x^2\\right)^{\\frac{1}{2}}$ was beyond his power to integrate; but what if he swapped those exponents? Worked on $\\left(1 - x^{\\frac{1}{2}}\\right)^2$instead? This would not \u2014 could not \u2014 give him what he was interested in. But it would give him something he could calculate. So can we:\n\n$\\int_0^1 \\left(1 - x^{\\frac{1}{2}}\\right)^2 dx = \\int_0^1 1 - 2x^{\\frac{1}{2}} + x dx = 1 - 2\\cdot\\frac{2}{3} + \\frac{1}{2} = \\frac{1}{6}$\n\nAnd now here comes persistence. What if it\u2019s not $x^{\\frac{1}{2}}$ inside the parentheses there? If it\u2019s x raised to some other unit fraction instead? What if the parentheses aren\u2019t raised to the second power, but to some other whole number? Might that reveal something useful? Each of these integrals is calculable, and he calculated them. He worked out a table for many values of\n\n$\\int_0^1 \\left(1 - x^{\\frac{1}{p}}\\right)^q dx$\n\nfor different sets of whole numbers p and q. He trusted that if he kept this up, he\u2019d find some interesting pattern. And he does. The integral, for example, always turns out to be a unit fraction. And there\u2019s a deeper pattern. Let me share results for different values of p and q; the integral is the reciprocal of the number inside the table. The topmost row is values of q; the leftmost column is values of p.\n\n0 1 2 3 4 5 6 7\n0 1 1 1 1 1 1 1 1\n1 1 2 3 4 5 6 7 7\n2 1 3 6 10 15 21 28 36\n3 1 4 10 20 35 56 84 120\n4 1 5 15 35 70 126 210 330\n5 1 6 21 56 126 252 462 792\n6 1 7 28 84 210 462 924 1716\n7 1 8 36 120 330 792 1716 3432\n\nThere is a deep pattern here, although I\u2019m not sure Wallis noticed that one. Look along the diagonals, running from lower-left to upper-right. These are the coefficients of the binomial expansion. Yang Hui\u2019s triangle, if you prefer. Pascal\u2019s triangle, if you prefer that. Let me call the term in row p, column q of this table $a_{p, q}$. Then\n\n$a_{p, q} = \\frac{(p + 1)!}{p! q!}$\n\nGreat material, anyway. The trouble is that it doesn\u2019t help Wallis with the original problem, which \u2014 in this notation \u2014 would have $p = \\frac12$ and $q = \\frac12$. What he really wanted was the Binomial Theorem, but western mathematicians didn\u2019t know it yet. Here a bit of luck comes in. He had noticed there\u2019s a relationship between terms in one column and terms in another, particularly, that\n\n$a_{p, q} = \\frac{p + q}{q} a_{p, q - 1}$\n\nSo why shouldn\u2019t that hold if p and q aren\u2019t whole numbers? \u2026 We would today say why should they hold? But Wallis was working with a different idea of mathematical rigor. He made assumptions that it turned out in this case were correct. Of course, had he been wrong, we wouldn\u2019t have heard of any of this and I would have an essay on some other topic.\n\nWith luck in Wallis\u2019s favor we can go back to making a table. What would the row for $p = \\frac12$ look like? We\u2019ll need both whole and half-integers. $p = \\frac12, q = 1$ is easy; its reciprocal is 1. $p = \\frac12, q = \\frac12$ is also easy; that\u2019s the insight Wallis had to start with. Its reciprocal is $\\frac{4}{\\pi}$. What about the rest? Use the equation just up above, relating $a_{p, q}$ to $a_{p, q - 1}$; then we can start to fill in:\n\n0 1\/2 1 3\/2 2 5\/2 3 7\/2\n1\/2 1 $\\frac{4}{\\pi}$ $\\frac{3}{2}$ $\\frac{4}{3}\\frac{4}{\\pi}$ $\\frac{3\\cdot 5}{2\\cdot 4}$ $\\frac{2\\cdot 4}{5}\\frac{4}{\\pi}$ $\\frac{3\\cdot 5\\cdot 7}{2\\cdot 4\\cdot 6}$ $\\frac{2\\cdot 2\\cdot 4\\cdot 4}{5\\cdot 7}\\frac{4}{\\pi}$\n\nAnything we can learn from this? \u2026 Well, sure. For one, as we go left to right, all these entries are increasing. So, like, the second column is less than the third which is less than the fourth. Here\u2019s a triple inequality for you:\n\n$\\frac{4}{\\pi} < \\frac{3}{2} < \\frac{4}{3}\\frac{4}{\\pi}$\n\nMultiply all that through by, on, $\\frac{2}{\\pi}$. And then divide it all through by $\\frac{3}{2}$. What have we got?\n\n$\\frac{2\\cdot 2}{3} < \\frac{\\pi}{2} < \\frac{2\\cdot 2}{3}\\cdot \\frac{2\\cdot 2}{3}$\n\nI did some rearranging of terms, but, that\u2019s the pattern. One-half \u03c0 has to be between $\\frac{2\\cdot 2}{3}$ and four-thirds that.\n\nMove over a little. Start from the row where $q = \\frac32$. This starts us out with\n\n$\\frac{4}{3}\\frac{4}{\\pi} < \\frac{3}{2} < \\frac{2\\cdot 4}{5}\\frac{4}{\\pi}$\n\nMultiply everything by $\\frac{\\pi}{4}$, and divide everything by $\\frac{3}{2}$ and follow with some symbol manipulation. And here\u2019s a tip which would have saved me some frustration working out my notes: $\\frac{\\pi}{4} = \\frac{\\pi}{2}\\cdot\\frac{3}{6}$. Also, 6 equals 2 times 3. Later on, you may want to remember that 8 equals 2 times 4. All this gets us eventually to\n\n$\\frac{2\\cdot 2\\cdot 4\\cdot 4}{3\\cdot 3\\cdot 5} < \\frac{\\pi}{2} < \\frac{2\\cdot 2\\cdot 4\\cdot 4}{3\\cdot 3\\cdot 5}\\cdot \\frac{6}{5}$\n\nMove over to the next terms, starting from $q = \\frac52$. This will get us eventually to\n\n$\\frac{2\\cdot 2\\cdot 4\\cdot 4 \\cdot 6 \\cdot 6}{3\\cdot 3\\cdot 5\\cdot 5\\cdot 7} < \\frac{\\pi}{2} < \\frac{2\\cdot 2\\cdot 4\\cdot 4 \\cdot 6 \\cdot 6}{3\\cdot 3\\cdot 5\\cdot 5\\cdot 7}\\cdot \\frac{8}{7}$\n\nYou see the pattern here. Whatever the value of $\\frac{\\pi}{2}$, it\u2019s squeezed between some number, on the left side of this triple inequality, and that same number times \u2026 uh \u2026 something like $\\frac{10}{9}$ or $\\frac{12}{11}$ or $\\frac{14}{13}$ or $\\frac{1,000,000,000,002}{1,000,000,000,001}$. That last one is a number very close to 1. So the conclusion is that $\\frac{\\pi}{2}$ has to equal whatever that pattern is making for the number on the left there.\n\nWe can make this more rigorous. Like, we don\u2019t have to just talk about squeezing the number we want between two nearly-equal values. We can rely on the use of the \u2026 Squeeze Theorem \u2026 to prove this is okay. And there\u2019s much we have to straighten out. Particularly, we really don\u2019t want to write out expressions like\n\n$\\frac{2\\cdot 2 \\cdot 4\\cdot 4\\cdot 6\\cdot 6\\cdot 8\\cdot 8 \\cdot 10\\cdot 10 \\cdots}{3\\cdot 3\\cdot 5\\cdot 5 \\cdot 7\\cdot 7 \\cdot 9\\cdot 9 \\cdot 11\\cdot 11 \\cdots}$\n\nPut that way, it looks like, well, we can divide each 3 in the denominator into a 6 in the numerator to get a 2, each 5 in the denominator to a 10 in the numerator to get a 2, and so on. We get a product that\u2019s infinitely large, instead of anything to do with \u03c0. This is that problem where arithmetic on infinitely long strings of things becomes dangerous. To be rigorous, we need to write this product as the limit of a sequence, with finite numerator and denominator, and be careful about how to compose the numerators and denominators.\n\nBut this is all right. Wallis found a lovely result and in a way that\u2019s common to much work in mathematics. It used a combination of insight and persistence, with pattern recognition and luck making a great difference. Often when we first find something the proof of it is rough, and we need considerable work to make it rigorous. The path that got Wallis to these products is one we still walk.\n\nThere\u2019s just three more essays to go this year! I hope to have the letter X published here, Thursday. All the other A-to-Z essays for this year are also at that link. And past A-to-Z essays are at this link. Thanks for reading.\n\n## My 2019 Mathematics A To Z: Taylor\u00a0Series\n\nToday\u2019s A To Z term was nominated by APMA, author of the Everybody Makes DATA blog. It was a topic that delighted me to realize I could explain. Then it started to torment me as I realized there is a lot to explain here, and I had to pick something. So here\u2019s where things ended up.\n\n# Taylor Series.\n\nIn the mid-2000s I was teaching at a department being closed down. In its last semester I had to teach Computational Quantum Mechanics. The person who\u2019d normally taught it had transferred to another department. But a few last majors wanted the old department\u2019s version of the course, and this pressed me into the role. Teaching a course you don\u2019t really know is a rush. It\u2019s a semester of learning, and trying to think deeply enough that you can convey something to students. This while all the regular demands of the semester eat your time and working energy. And this in the leap of faith that the syllabus you made up, before you truly knew the subject, will be nearly enough right. And that you have not committed to teaching something you do not understand.\n\nSo around mid-course I realized I needed to explain finding the wave function for a hydrogen atom with two electrons. The wave function is this probability distribution. You use it to find things like the probability a particle is in a certain area, or has a certain momentum. Things like that. A proton with one electron is as much as I\u2019d ever done, as a physics major. We treat the proton as the center of the universe, immobile, and the electron hovers around that somewhere. Two electrons, though? A thing repelling your electron, and repelled by your electron, and neither of those having fixed positions? What the mathematics of that must look like terrified me. When I couldn\u2019t procrastinate it farther I accepted my doom and read exactly what it was I should do.\n\nIt turned out I had known what I needed for nearly twenty years already. Got it in high school.\n\nOf course I\u2019m discussing Taylor Series. The equations were loaded down with symbols, yes. But at its core, the important stuff, was this old and trusted friend.\n\nThe premise behind a Taylor Series is even older than that. It\u2019s universal. If you want to do something complicated, try doing the simplest thing that looks at all like it. And then make that a little bit more like you want. And then a bit more. Keep making these little improvements until you\u2019ve got it as right as you truly need. Put that vaguely, the idea describes Taylor series just as well as it describes making a video game or painting a state portrait. We can make it more specific, though.\n\nA series, in this context, means the sum of a sequence of things. This can be finitely many things. It can be infinitely many things. If the sum makes sense, we say the series converges. If the sum doesn\u2019t, we say the series diverges. When we first learn about series, the sequences are all numbers. $1 + \\frac{1}{2} + \\frac{1}{3} + \\frac{1}{4} + \\cdots$, for example, which diverges. (It adds to a number bigger than any finite number.) Or $1 + \\frac{1}{2^2} + \\frac{1}{3^2} + \\frac{1}{4^2} + \\cdots$, which converges. (It adds to $\\frac{1}{6}\\pi^2$.)\n\nIn a Taylor Series, the terms are all polynomials. They\u2019re simple polynomials. Let me call the independent variable \u2018x\u2019. Sometimes it\u2019s \u2018z\u2019, for the reasons you would expect. (\u2018x\u2019 usually implies we\u2019re looking at real-valued functions. \u2018z\u2019 usually implies we\u2019re looking at complex-valued functions. \u2018t\u2019 implies it\u2019s a real-valued function with an independent variable that represents time.) Each of these terms is simple. Each term is the distance between x and a reference point, raised to a whole power, and multiplied by some coefficient. The reference point is the same for every term. What makes this potent is that we use, potentially, many terms. Infinitely many terms, if need be.\n\nCall the reference point \u2018a\u2019. Or if you prefer, x0. z0 if you want to work with z\u2019s. You see the pattern. This \u2018a\u2019 is the \u201cpoint of expansion\u201d. The coefficients of each term depend on the original function at the point of expansion. The coefficient for the term that has $(x - a)$ is the first derivative of f, evaluated at a. The coefficient for the term that has $(x - a)^2$ is the second derivative of f, evaluated at a (times a number that\u2019s the same for the squared-term for every Taylor Series). The coefficient for the term that has $(x - a)^3$ is the third derivative of f, evaluated at a (times a different number that\u2019s the same for the cubed-term for every Taylor Series).\n\nYou\u2019ll never guess what the coefficient for the term with $(x - a)^{122,743}$ is. Nor will you ever care. The only reason you would wish to is to answer an exam question. The instructor will, in that case, have a function that\u2019s either the sine or the cosine of x. The point of expansion will be 0, $\\frac{\\pi}{2}$, $\\pi$, or $\\frac{3\\pi}{2}$.\n\nOtherwise you will trust that this is one of the terms of $(x - a)^n$, \u2018n\u2019 representing some counting number too great to be interesting. All the interesting work will be done with the Taylor series either truncated to a couple terms, or continued on to infinitely many.\n\nWhat a Taylor series offers is the chance to approximate a function we\u2019re genuinely interested in with a polynomial. This is worth doing, usually, because polynomials are easier to work with. They have nice analytic properties. We can automate taking their derivatives and integrals. We can set a computer to calculate their value at some point, if we need that. We might have no idea how to start calculating the logarithm of 1.3. We certainly have an idea how to start calculating $0.3 - \\frac{1}{2}(0.3^2) + \\frac{1}{3}(0.3^3)$. (Yes, it\u2019s 0.3. I\u2019m using a Taylor series with a = 1 as the point of expansion.)\n\nThe first couple terms tell us interesting things. Especially if we\u2019re looking at a function that represents something physical. The first two terms tell us where an equilibrium might be. The next term tells us whether an equilibrium is stable or not. If it is stable, it tells us how perturbations, points near the equilibrium, behave.\n\nThe first couple terms will describe a line, or a quadratic, or a cubic, some simple function like that. Usually adding more terms will make this Taylor series approximation a better fit to the original. There might be a larger region where the polynomial and the original function are close enough. Or the difference between the polynomial and the original function will be closer together on the same old region.\n\nWe would really like that region to eventually grow to the whole domain of the original function. We can\u2019t count on that, though. Roughly, the interval of convergence will stretch from \u2018a\u2019 to wherever the first weird thing happens. Weird things are, like, discontinuities. Vertical asymptotes. Anything you don\u2019t like dealing with in the original function, the Taylor series will refuse to deal with. Outside that interval, the Taylor series diverges and we just can\u2019t use it for anything meaningful. Which is almost supernaturally weird of them. The Taylor series uses information about the original function, but it\u2019s all derivatives at a single point. Somehow the derivatives of, say, the logarithm of x around x = 1 give a hint that the logarithm of 0 is undefinable. And so they won\u2019t help us calculate the logarithm of 3.\n\nThings can be weirder. There are functions that just break Taylor series altogether. Some are obvious. A function needs lots of derivatives at a point to have a good Taylor series approximation. So, many fractal curves won\u2019t have a Taylor series approximation. These curves are all corners, points where they aren\u2019t continuous or where derivatives don\u2019t exist. Some are obviously designed to break Taylor series approximations. We can make a function that follows different rules if x is rational than if x is irrational. There\u2019s no approximating that, and you\u2019d blame the person who made such a function, not the Taylor series. It can be subtle. The function defined by the rule $f(x) = \\exp{-\\frac{1}{x^2}}$, with the note that if x is zero then f(x) is 0, seems to satisfy everything we\u2019d look for. It\u2019s a function that\u2019s mostly near 1, that drops down to being near zero around where x = 0. But its Taylor series expansion around a = 0 is a horizontal line always at 0. The interval of convergence can be a single point, challenging our idea of what an interval is.\n\nThat\u2019s all right. If we can trust that we\u2019re avoiding weird parts, Taylor series give us an outstanding new tool. Grant that the Taylor series describes a function with the same rule as our original function. The Taylor series is often easier to work with, especially if we\u2019re working on differential equations. We can automate, or at least find formulas for, taking the derivative of a polynomial. Or adding together derivatives of polynomials. Often we can attack a differential equation too hard to solve otherwise by supposing the answer is a polynomial. This is essentially what that quantum mechanics problem used, and why the tool was so familiar when I was in a strange land.\n\nRoughly. What I was actually doing was treating the function I wanted as a power series. This is, like the Taylor series, the sum of a sequence of terms, all of which are $(x - a)^n$ times some coefficient. What makes it not a Taylor series is that the coefficients weren\u2019t the derivatives of any function I knew to start. But the experience of Taylor series trained me to look at functions as things which could be approximated by polynomials.\n\nThis gives us the hint to look at other series that approximate interesting functions. We get a host of these, with names like Laurent series and Fourier series and Chebyshev series and such. Laurent series look like Taylor series but we allow powers to be negative integers as well as positive ones. Fourier series do away with polynomials. They instead use trigonometric functions, sines and cosines. Chebyshev series build on polynomials, but not on pure powers. They\u2019ll use orthogonal polynomials. These behave like perpendicular directions do. That orthogonality makes many numerical techniques behave better.\n\nThe Taylor series is a great introduction to these tools. Its first several terms have good physical interpretations. Its calculation requires tools we learn early on in calculus. The habits of thought it teaches guides us even in unfamiliar territory.\n\nAnd I feel very relieved to be done with this. I often have a few false starts to an essay, but those are mostly before I commit words to text editor. This one had about four branches that now sit in my scrap file. I\u2019m glad to have a deadline forcing me to just publish already.\n\nThank you, though. This and the essays for the Fall 2019 A to Z should be at this link. Next week: the letters U and V. And all past A to Z essays ought to be at this link.\n\n## Reading the Comics, May 23, 2018: Nice Warm Gymnasium\u00a0Edition\n\nI haven\u2019t got any good ideas for the title for this collection of mathematically-themed comic strips. But I was reading the Complete Peanuts for 1999-2000 and just ran across one where Rerun talked about consoling his basketball by bringing it to a nice warm gymnasium somewhere. So that\u2019s where that pile of words came from.\n\nMark Anderson\u2019s Andertoons for the 21st is the Mark Anderson\u2019s Andertoons for this installment. It has Wavehead suggest a name for the subtraction of fractions. It\u2019s not by itself an absurd idea. Many mathematical operations get specialized names, even though we see them as specific cases of some more general operation. This may reflect the accidents of history. We have different names for addition and subtraction, though we eventually come to see them as the same operation.\n\nIn calculus we get introduced to Maclaurin Series. These are polynomials that approximate more complicated functions. They\u2019re the best possible approximations for a region around 0 in the domain. They\u2019re special cases of the Taylor Series. Those are polynomials that approximate more complicated functions. But you get to pick where in the domain they should be the best approximation. Maclaurin series are nothing but a Taylor series; we keep the names separate anyway, for the reasons. And slightly baffling ones; James Gregory and Brook Taylor studied Taylor series before Colin Maclaurin did Maclaurin series. But at least Taylor worked on Taylor series, and Maclaurin on Macularin series. So for a wonder mathematicians named these things for appropriate people. (Ignoring that Indian mathematicians were poking around this territory centuries before the Europeans were. I don\u2019t know whether English mathematicians of the 18th century could be expected to know of Indian work in the field, in fairness.)\n\nIn numerical calculus, we have a scheme for approximating integrals known as the trapezoid rule. It approximates the areas under curves by approximating a curve as a trapezoid. (Any questions?) But this is one of the Runge-Kutta methods. Nobody calls it that except to show they know neat stuff about Runge-Kutta methods. The special names serve to pick out particularly interesting or useful cases of a more generally used thing. Wavehead\u2019s coinage probably won\u2019t go anywhere, but it doesn\u2019t hurt to ask.\n\nPercy Crosby\u2019s Skippy for the 22nd I admit I don\u2019t quite understand. It mentions arithmetic anyway. I think it\u2019s a joke about a textbook like this being good only if it\u2019s got the questions and the answers. But it\u2019s the rare Skippy that\u2019s as baffling to me as most circa-1930 humor comics are.\n\nHam\u2019s Life on Earth for the 23rd presents the blackboard full of symbols as an attempt to prove something challenging. In this case, to say something about the existence of God. It\u2019s tempting to suppose that we could say something about the existence or nonexistence of God using nothing but logic. And there are mathematics fields that are very close to pure logic. But our scary friends in the philosophy department have been working on the ontological argument for a long while. They\u2019ve found a lot of arguments that seem good, and that fall short for reasons that seem good. I\u2019ll defer to their experience, and suppose that any mathematics-based proof to have the same problems.\n\nBill Amend\u2019s FoxTrot Classics for the 23rd deploys a Maclaurin series. If you want to calculate the cosine of an angle, and you know the angle in radians, you can find the value by adding up the terms in an infinitely long series. So if \u03b8 is the angle, measured in radians, then its cosine will be:\n\n$\\cos\\left(\\theta\\right) = \\sum_{k = 0}^{\\infty} \\left(-1\\right)^k \\frac{\\theta^k}{k!}$\n\n60 degrees is $\\frac{\\pi}{3}$ in radians and you see from the comic how to turn this series into a thing to calculate. The series does, yes, go on forever. But since the terms alternate in sign \u2014 positive then negative then positive then negative \u2014 you have a break. Suppose all you want is the answer to within an error margin. Then you can stop adding up terms once you\u2019ve gotten to a term that\u2019s smaller than your error margin. So if you want the answer to within, say, 0.001, you can stop as soon as you find a term with absolute value less than 0.001.\n\nFor high school trig, though, this is all overkill. There\u2019s five really interesting angles you\u2019d be expected to know anything about. They\u2019re 0, 30, 45, 60, and 90 degrees. And you need to know about reflections of those across the horizontal and vertical axes. Those give you, like, -30 degrees or 135 degrees. Those reflections don\u2019t change the magnitude of the cosines or sines. They might change the plus-or-minus sign is all. And there\u2019s only three pairs of numbers that turn up for these five interesting angles. There\u2019s 0 and 1. There\u2019s $\\frac{1}{2}$ and $\\frac{\\sqrt{3}}{2}$. There\u2019s $\\frac{1}{\\sqrt{2}}$ and $\\frac{1}{\\sqrt{2}}$. Three things to memorize, plus a bit of orienteering, to know whether the cosine or the sine should be the larger size and whether they should positive or negative. And then you\u2019ve got them all.\n\nYou might get asked for, like, the sine of 15 degrees. But that\u2019s someone testing whether you know the angle-addition or angle-subtraction formulas. Or the half-angle and double-angle formulas. Nobody would expect you to know the cosine of 15 degrees. The cosine of 30 degrees, though? Sure. It\u2019s $\\frac{\\sqrt{3}}{2}$.\n\nMike Thompson\u2019s Grand Avenue for the 23rd is your basic confused-student joke. People often have trouble going from percentages to decimals to fractions and back again. Me, I have trouble in going from percentage chances to odds, as in, \u201ctwo to one odds\u201d or something like that. (Well, \u201cone to one odds\u201d I feel confident in, and \u201ctwo to one\u201d also. But, say, \u201cseven to five odds\u201d I can\u2019t feel sure I understand, other than that the second choice is a perceived to be a bit more likely than the first.)\n\n\u2026 You know, this would have parsed as the Maclaurin Series Edition, wouldn\u2019t it? Well, if only I were able to throw away words I\u2019ve already written and replace them with better words before publishing, huh?\n\nI was all set to say how complaining about GoComics.com\u2019s pages not loading had gotten them fixed. But they only worked for Monday alone; today they\u2019re broken again. Right. I haven\u2019t tried sending an error report again; we\u2019ll see if that works. Meanwhile, I\u2019m still not through last week\u2019s comic strips and I had just enough for one day to nearly enough justify an installment for the one day. Should finish off the rest of the week next essay, probably in time for next week.\n\nMark Leiknes\u2019s Cow and Boy rerun for the 23rd circles around some of Zeno\u2019s Paradoxes. At the heart of some of them is the question of whether a thing can be divided infinitely many times, or whether there must be some smallest amount of a thing. Zeno wonders about space and time, but you can do as well with substance, with matter. Mathematics majors like to say the problem is easy; Zeno just didn\u2019t realize that a sum of infinitely many things could be a finite and nonzero number. This misses the good question of how the sum of infinitely many things, none of which are zero, can be anything but infinitely large? Or, put another way, what\u2019s different in adding $\\frac11 + \\frac12 + \\frac13 + \\frac14 + \\cdots$ and adding $\\frac11 + \\frac14 + \\frac19 + \\frac{1}{16} + \\cdots$ that the one is infinitely large and the other not?\n\nOr how about this. Pick your favorite string of digits. 23. 314. 271828. Whatever. Add together the series $\\frac11 + \\frac12 + \\frac13 + \\frac14 + \\cdots$except that you omit any terms that have your favorite string there. So, if you picked 23, don\u2019t add $\\frac{1}{23}$, or $\\frac{1}{123}$, or $\\frac{1}{802301}$ or such. That depleted series does converge. The heck is happening there? (Here\u2019s why it\u2019s true for a single digit being thrown out. Showing it\u2019s true for longer strings of digits takes more work but not really different work.)\n\nJ C Duffy\u2019s Lug Nuts for the 23rd is, I think, the first time I have to give a content warning for one of these. It\u2019s a porn-movie advertisement spoof. But it mentions Einstein and Pi and has the tagline \u201cshe didn\u2019t go for eggheads \u2026 until he showed her a new equation!\u201d. So, you know, it\u2019s using mathematics skill as a signifier of intelligence and riffing on the idea that nerds like sex too.\n\nJohn Graziano\u2019s Ripley\u2019s Believe It or Not for the 23rd has a trivia that made me initially think \u201cnot\u201d. It notes Vince Parker, Senior and Junior, of Alabama were both born on Leap Day, the 29th of February. I\u2019ll accept this without further proof because of the very slight harm that would befall me were I to accept this wrongly. But it also asserted this was a 1-in-2.1-million chance. That sounded wrong. Whether it is depends on what you think the chance is of.\n\nBecause what\u2019s the remarkable thing here? That a father and son have the same birthday? Surely the chance of that is 1 in 365. The father could be born any day of the year; the son, also any day. Trusting there\u2019s no influence of the father\u2019s birthday on the son\u2019s, then, 1 in 365 it is. Or, well, 1 in about 365.25, since there are leap days. There\u2019s approximately one leap day every four years, so, surely that, right?\n\nAnd not quite. In four years there\u2019ll be 1,461 days. Four of them will be the 29th of January and four the 29th of September and four the 29th of August and so on. So if the father was born any day but leap day (a \u201cnon-bissextile day\u201d, if you want to use a word that starts a good fight in a Scrabble match), the chance the son\u2019s birth is the same is 4 chances in 1,461. 1 in 365.25. If the father was born on Leap Day, then the chance the son was born the same day is only 1 chance in 1,461. Still way short of 1-in-2.1-million. So, Graziano\u2019s Ripley\u2019s is wrong if that\u2019s the chance we\u2019re looking at.\n\nAh, but what if we\u2019re looking at a different chance? What if we\u2019re looking for the chance that the father is born the 29th of February and the son is also born the 29th of February? There\u2019s a 1-in-1,461 chance the father\u2019s born on Leap Day. And a 1-in-1,461 chance the son\u2019s born on Leap Day. And if those events are independent, the father\u2019s birth date not influencing the son\u2019s, then the chance of both those together is indeed 1 in 2,134,521. So Graziano\u2019s Ripley\u2019s is right if that\u2019s the chance we\u2019re looking at.\n\nWhich is a good reminder: if you want to work out the probability of some event, work out precisely what the event is. Ordinary language is ambiguous. This is usually a good thing. But it\u2019s fatal to discussing probability questions sensibly.\n\nZach Weinersmith\u2019s Saturday Morning Breakfast Cereal for the 23rd presents his mathematician discovering a new set of numbers. This will happen. Mathematics has had great success, historically, finding new sets of things that look only a bit like numbers were understood. And showing that if they follow rules that are, as much as possible, like the old numbers, we get useful stuff out of them. The mathematician claims to be a formalist, in the punch line. This is a philosophy that considers mathematical results to be the things you get by starting with some symbols and some rules for manipulating them. What this stuff means, and whether it reflects anything of interest in the real world, isn\u2019t of interest. We can know the results are good because they follow the rules.\n\nThis sort of approach can be fruitful. It can force you to accept results that are true but intuition-defying. And it can give results impressive confidence. You can even, at least in principle, automate the creating and the checking of logical proofs. The disadvantages are that it takes forever to get anything done. And it\u2019s hard to shake the idea that we ought to have some idea what any of this stuff means.\n\n## Something Cute I Never Noticed Before About Infinite\u00a0Sums\n\nThis is a trifle, for which I apologize. I\u2019ve been sick. But I ran across this while reading Carl B Boyer\u2019s The History of the Calculus and its Conceptual Development. This is from the chapter \u201cA Century Of Anticipation\u201d, developments leading up to Newton and Leibniz and The Calculus As We Know It. In particular, while working out the indefinite integrals for simple powers \u2014 x raised to a whole number \u2014 John Wallis, whom you\u2019ll remember from such things as the first use of the \u221e symbol and beating up Thomas Hobbes for his lunch money, noted this:\n\n$\\frac{0 + 1}{1 + 1} = \\frac{1}{2}$\n\nWhich is fine enough. But then Wallis also noted that\n\n$\\frac{0 + 1 + 2}{2 + 2 + 2} = \\frac{1}{2}$\n\nAnd furthermore that\n\n$\\frac{0 + 1 + 2 + 3}{3 + 3 + 3 + 3} = \\frac{1}{2}$\n\n$\\frac{0 + 1 + 2 + 3 + 4}{4 + 4 + 4 + 4 + 4} = \\frac{1}{2}$\n\n$\\frac{0 + 1 + 2 + 3 + 4 + 5}{5 + 5 + 5 + 5 + 5 + 5} = \\frac{1}{2}$\n\nAnd isn\u2019t that neat? Wallis goes on to conclude that this is true not just for finitely many terms in the numerator and denominator, but also if you carry on infinitely far. This seems like a dangerous leap to make, but they treated infinities and infinitesimals dangerously in those days.\n\nWhat makes this work is \u2014 well, it\u2019s just true; explaining how that can be is kind of like explaining how it is circles have a center point. All right. But we can prove that this has to be true at least for finite terms. A sum like 0 + 1 + 2 + 3 is an arithmetic progression. It\u2019s the sum of a finite number of terms, each of them an equal difference from the one before or the one after (or both).\n\nIts sum will be equal to the number of terms times the arithmetic mean of the first and last. That is, it\u2019ll be the number of terms times the sum of the first and the last terms and divided that by two. So that takes care of the numerator. If we have the sum 0 + 1 + 2 + 3 + up to whatever number you like which we\u2019ll call \u2018N\u2019, we know its value has to be (N + 1) times N divided by 2. That takes care of the numerator.\n\nThe denominator, well, that\u2019s (N + 1) cases of the number N being added together. Its value has to be (N + 1) times N. So the fraction is (N + 1) times N divided by 2, itself divided by (N + 1) times N. That\u2019s got to be one-half except when N is zero. And if N were zero, well, that fraction would be 0 over 0 and we know what kind of trouble that is.\n\nIt\u2019s a tiny bit, although you can use it to make an argument about what to expect from $\\int{x^n dx}$, as Wallis did. And it delighted me to see and to understand why it should be so.\n\n## Calculating Pi Less\u00a0Terribly\n\nBack on \u201cPi Day\u201d I shared a terrible way of calculating the digits of \u03c0. It\u2019s neat in principle, yes. Drop a needle randomly on a uniformly lined surface. Keep track of how often the needle crosses over a line. From this you can work out the numerical value of \u03c0. But it\u2019s a terrible method. To be sure that \u03c0 is about 3.14, rather than 3.12 or 3.38, you can expect to need to do over three and a third million needle-drops. So I described this as a terrible way to calculate \u03c0.\n\nA friend on Twitter asked if it was worse than adding up 4 * (1 \u2013 1\/3 + 1\/5 \u2013 1\/7 + \u2026 ). It\u2019s a good question. The answer is yes, it\u2019s far worse than that. But I want to talk about working \u03c0 out that way.\n\nContinue reading \u201cCalculating Pi Less\u00a0Terribly\u201d\n\n## But How Interesting Is A Basketball\u00a0Score?\n\nWhen I worked out how interesting, in an information-theory sense, a basketball game \u2014 and from that, a tournament \u2014 might be, I supposed there was only one thing that might be interesting about the game: who won? Or to be exact, \u201cdid (this team) win\u201d? But that isn\u2019t everything we might want to know about a game. For example, we might want to know what a team scored. People often do. So how to measure this?\n\nThe answer was given, in embryo, in my first piece about how interesting a game might be. If you can list all the possible outcomes of something that has multiple outcomes, and how probable each of those outcomes is, then you can describe how much information there is in knowing the result. It\u2019s the sum, for all of the possible results, of the quantity negative one times the probability of the result times the logarithm-base-two of the probability of the result. When we were interested in only whether a team won or lost there were just the two outcomes possible, which made for some fairly simple calculations, and indicates that the information content of a game can be as high as 1 \u2014 if the team is equally likely to win or to lose \u2014 or as low as 0 \u2014 if the team is sure to win, or sure to lose. And the units of this measure are bits, the same kind of thing we use to measure (in groups of bits called bytes) how big a computer file is.\n\n## It Would Have Been One More Ride\u00a0Because\n\nI apologize for being slow writing the conclusion of the explanation for why my Dearly Beloved and I would expect one more ride following our plan to keep re-riding Disaster Transport as long as a fairly flipped coin came up tails. It\u2019s been a busy week, and actually, I\u2019d got stuck trying to think of a way to explain the sum I needed to take using only formulas that a normal person might find, or believe. I think I have it.\n\n## Proving A Number Is Not\u00a01\n\nI want to do some more tricky examples of using this \u03b5 idea, where I show two numbers have to be the same because the difference between them is smaller than every positive number. Before I do, I want to put out a problem where we can show two numbers are not the same, since I think that makes it easier to see why the proof works where it does. It\u2019s easy to get hypnotized by the form of an argument, and to not notice that the result doesn\u2019t actually hold, particularly if all you see are repetitions of proofs where things work out and don\u2019t see cases of the proof being invalid.\n\n## What Numbers Equal\u00a0Zero?\n\nI want to give some examples of showing numbers are equal by showing the difference between them is \u03b5. It\u2019s a fairly abstruse idea but when it works amazing things become possible.\n\nThe easy example, although one that produces strong resistance, is showing that the number 1 is equal to the number 0.9999\u2026. But here I have to say what I mean by that second number. It\u2019s obvious to me that I mean a number formed by putting a decimal point up, and then filling in a \u20189\u2019 to every digit past the decimal, repeating forever and ever without end. That\u2019s a description so easy to grasp it looks obvious. I can give a more precise, less intuitively obvious, description, though, which makes it easier to prove what I\u2019m going to be claiming.","date":"2023-03-22 15:13:52","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 0, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 95, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.7393754720687866, \"perplexity\": 485.4877926624062}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 20, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2023-14\/segments\/1679296943845.78\/warc\/CC-MAIN-20230322145537-20230322175537-00561.warc.gz\"}"}
null
null
This list covers the railway stations in the Berlin area. These include both passenger stations and marshalling yards, but not goods stations. Because the Berlin S-Bahn network has expanded to include stations in the state of Brandenburg, the table shows only those stations lying within the Verkehrsverbund Berlin-Brandenburg's present-day Berlin ABC fare zones (i.e. those up to about 15 kilometres from the Berlin city boundary), and those formerly served by Berlin's suburban services. The latter ran out beyond the capital's boundaries to the next largest towns along the main and branch lines. The farthest towns on the lines covered here are listed below: Rüdnitz (Stettin Railway) – Werneuchen (Wriezen Railway) – Strausberg (Prussian Eastern Railway) – Fürstenwalde (Lower Silesian-Märkisch Railway) – Kablow (Königs Wusterhausen–Grunow) – Königs Wusterhausen (Görlitz Railway) – Mittenwalde (Neukölln–Mittenwalde railway) – Wünsdorf (Dresden Railway) – Thyrow (Anhalt Railway) – Beelitz-Stadt (Brandenburg Ring Railway) – Beelitz-Heilstätten (Wetzlar Railway) – Werder (Berlin-Potsdam railway (Stammbahn) – Wustermark (Lehrte Railway) – Nauen (Hamburg Railway) – Vehlefanz (Kremmen Railway) – Sachsenhausen (Nordb) (Prussian Northern Railway) – Wensickendorf and Wandlitzsee (Heidekraut Railway). Overview In Berlin there are long-distance stations for rail travellers. The following stop at these stations: Deutsche Bahn AG trains: InterCityExpress (ICE) InterCity (IC) EuroCity (EC) and Trains of other railway companies: InterConneX (X) – (Veolia Verkehr) Harz-Elbe-Express (HEX) – (Veolia Verkehr) Berlin Night Express (Georg Verkehrsorganisation) (GVG) There are also regional stations. The following call at these stations: Deutsche Bahn AG trains: RegionalExpresse (RE) and RegionalBahnen (RB) Trains of other railway companies: Niederbarnimer Eisenbahn (NE), Ostdeutsche Eisenbahn (OE), Märkische Regiobahn (MR) und Prignitzer Eisenbahn (PE). The following stop at S-Bahn stations: S-Bahn Berlin GmbH trains Stadtschnellbahn trains (S-Bahn) There are also two marshalling yards in the Berlin area. Passenger stations The following table gives an overview of the current, former and planned railway stations and halts in the Berlin together with the associated district abbreviation (as used on car number plates) and the types of train that stop there. For readability only one example of the train category is given in the table. ICE for long-distance express trains, i.e. also TGV, Thalys etc. IC for special long-distance trains, i.e. Also private ones like InterConnex etc. RE for local express trains RB for regional trains, including those of private operators such as ODEG, UBB etc. S for S-Bahn trains x means that the train type (or a similar one) calls at the station x¹ means that the train type (or a similar one) used to call at the station For reasons of space only the car number plate abbreviation for the town or rural district is given. These are: Berlin (B) Landkreis Barnim (BAR) Landkreis Dahme-Spreewald (LDS) Landkreis Havelland (HVL) Landkreis Märkisch-Oderland (MOL) Landkreis Oberhavel (OHV) Landkreis Oder-Spree (LOS) Potsdam (P) Landkreis Potsdam-Mittelmark (PM) Landkreis Teltow-Fläming (TF) Marshalling yards Operational facilities Seddin (located outside the Berlin city area on the Wetzlar Railway south of Potsdam) Berlin Nordost (on the Outer Ring) Wustermark Rbf (formerly Berlin's largest marshalling yard, located outside the Berlin city area, closed in 2001, bought in 2008 by the Havelländische Eisenbahn (HVLE) and since 1 July 2008 used as a link line from the Rail & Logistik Center at Wustermark) Closed facilities Berlin Wuhlheide Rbf (1994 closed, demolished) Berlin-Tempelhof Rbf (1952 closed, demolished) Berlin-Pankow (1997 closed, partly demolished) Berlin-Lichtenberg (converted to a storage yard for passenger coaches) Berlin-Rummelsburg (converted to a storage yard for passenger coaches) Berlin-Niederschöneweide (1996 closed) In addition, during the Second World War there were three supplementary marshalling yards (Hilfsrangierbahnhöfe) outside the political city boundaries: Rüdnitz Fredersdorf Großbeeren See also List of Berlin S-Bahn stations List of Berlin U-Bahn stations List of railway stations in Brandenburg List of scheduled railway routes in Germany List of closed railway lines in Brandenburg and Berlin External links Berlin S-Bahn stations ! ! Railway stations Rail Berl
{ "redpajama_set_name": "RedPajamaWikipedia" }
789
{"url":"https:\/\/www.aimsciences.org\/article\/doi\/10.3934\/proc.2003.2003.313","text":"# American Institute of Mathematical Sciences\n\n2003,\u00a02003(Special):\u00a0313-319. doi:\u00a010.3934\/proc.2003.2003.313\n\n## A three point boundary value problem containing the operator\n\n 1 Department of Mathematics, Pontificia Universidad Cat\u00f3lica de Chile, Casilla 306, Correo 22, Santiago 2 Centro de Modelamiento Matem\u00e1tico and Departamento de Ingenieria Matem\u00e1tica, F.C.F.M, Universidad de Chile, Casilla 170, Correo 3, Santiago\n\nReceived\u00a0 September 2002 Revised\u00a0 February 2003 Published\u00a0 April 2003\n\nWe consider problems of the form\n\n$(\\phi(u'))' = f(t, u, u'), t \\in (0, 1)$;\n\nunder the three point boundary condition\n\n$u'(0) = 0, u(n) = u(1);$\n\nwhere $n \\in$ (0, 1) is given. This problem is at resonance. Three-point boundary value problems at resonance have been studied in several papers, we present here some new result as well as generalizations of some results valid for particular forms of the operator -$(\\phi(u'))'. Citation: Marta Garc\u00eda-Huidobro, Raul Man\u00e1sevich. A three point boundary value problem containing the operator. Conference Publications, 2003, 2003 (Special) : 313-319. doi: 10.3934\/proc.2003.2003.313 [1] Mehdi Badsi. Collisional sheath solutions of a bi-species Vlasov-Poisson-Boltzmann boundary value problem. Kinetic & Related Models, , () : -. doi: 10.3934\/krm.2020052 [2] Antoine Benoit. Weak well-posedness of hyperbolic boundary value problems in a strip: when instabilities do not reflect the geometry. Communications on Pure & Applied Analysis, 2020, 19 (12) : 5475-5486. doi: 10.3934\/cpaa.2020248 [3] Mokhtar Bouloudene, Manar A. Alqudah, Fahd Jarad, Yassine Adjabi, Thabet Abdeljawad. Nonlinear singular$ p $-Laplacian boundary value problems in the frame of conformable derivative. Discrete & Continuous Dynamical Systems - S, 2020 doi: 10.3934\/dcdss.2020442 [4] Marco Ghimenti, Anna Maria Micheletti. Compactness results for linearly perturbed Yamabe problem on manifolds with boundary. Discrete & Continuous Dynamical Systems - S, 2020 doi: 10.3934\/dcdss.2020453 [5] Shenglan Xie, Maoan Han, Peng Zhu. A posteriori error estimate of weak Galerkin fem for second order elliptic problem with mixed boundary condition. Discrete & Continuous Dynamical Systems - B, 2020 doi: 10.3934\/dcdsb.2020340 [6] Mostafa Mbekhta. Representation and approximation of the polar factor of an operator on a Hilbert space. Discrete & Continuous Dynamical Systems - S, 2020 doi: 10.3934\/dcdss.2020463 [7] Federico Rodriguez Hertz, Zhiren Wang. On$ \\epsilon \\$-escaping trajectories in homogeneous spaces. Discrete & Continuous Dynamical Systems - A, 2021, 41 (1) : 329-357. doi: 10.3934\/dcds.2020365 [8] Zedong Yang, Guotao Wang, Ravi P. Agarwal, Haiyong Xu. Existence and nonexistence of entire positive radial solutions for a class of Schr\u00f6dinger elliptic systems involving a nonlinear operator. Discrete & Continuous Dynamical Systems - S, 2020\u00a0 doi: 10.3934\/dcdss.2020436 [9] Min Chen, Olivier Goubet, Shenghao Li. Mathematical analysis of bump to bucket problem. Communications on Pure & Applied Analysis, 2020, 19 (12) : 5567-5580. doi: 10.3934\/cpaa.2020251 [10] Qingfang Wang, Hua Yang. Solutions of nonlocal problem with critical exponent. Communications on Pure & Applied Analysis, 2020, 19 (12) : 5591-5608. doi: 10.3934\/cpaa.2020253 [11] Stefano Bianchini, Paolo Bonicatto. Forward untangling and applications to the uniqueness problem for the continuity equation. Discrete & Continuous Dynamical Systems - A, 2020\u00a0 doi: 10.3934\/dcds.2020384 [12] Vieri Benci, Marco Cococcioni. The algorithmic numbers in non-archimedean numerical computing environments. Discrete & Continuous Dynamical Systems - S, 2020\u00a0 doi: 10.3934\/dcdss.2020449 [13] H\u00e9ctor Barge. \u010cech cohomology, homoclinic trajectories and robustness of non-saddle sets. Discrete & Continuous Dynamical Systems - A, 2020\u00a0 doi: 10.3934\/dcds.2020381 [14] Ying Lin, Qi Ye. Support vector machine classifiers by non-Euclidean margins. Mathematical Foundations of Computing, 2020, 3 (4) : 279-300. doi: 10.3934\/mfc.2020018 [15] Sergey Rashkovskiy. Hamilton-Jacobi theory for Hamiltonian and non-Hamiltonian systems. Journal of Geometric Mechanics, 2020, 12 (4) : 563-583. doi: 10.3934\/jgm.2020024 [16] Mengni Li. Global regularity for a class of Monge-Amp\u00e8re type equations with nonzero boundary conditions. Communications on Pure & Applied Analysis, , () : -. doi: 10.3934\/cpaa.2020267 [17] Alberto Bressan, Sondre Tesdal Galtung. A 2-dimensional shape optimization problem for tree branches. Networks & Heterogeneous Media, 2020\u00a0 doi: 10.3934\/nhm.2020031 [18] Fioralba Cakoni, Pu-Zhao Kow, Jenn-Nan Wang. The interior transmission eigenvalue problem for elastic waves in media with obstacles. Inverse Problems & Imaging, , () : -. doi: 10.3934\/ipi.2020075 [19] Shun Zhang, Jianlin Jiang, Su Zhang, Yibing Lv, Yuzhen Guo. ADMM-type methods for generalized multi-facility Weber problem. Journal of Industrial & Management Optimization, 2020\u00a0 doi: 10.3934\/jimo.2020171 [20] Yangrong Li, Shuang Yang, Qiangheng Zhang. Odd random attractors for stochastic non-autonomous Kuramoto-Sivashinsky equations without dissipation. Electronic Research Archive, 2020, 28 (4) : 1529-1544. doi: 10.3934\/era.2020080\n\nImpact Factor:","date":"2020-11-30 20:54:39","metadata":"{\"extraction_info\": {\"found_math\": true, \"script_math_tex\": 0, \"script_math_asciimath\": 0, \"math_annotations\": 0, \"math_alttext\": 0, \"mathml\": 0, \"mathjax_tag\": 0, \"mathjax_inline_tex\": 1, \"mathjax_display_tex\": 0, \"mathjax_asciimath\": 0, \"img_math\": 0, \"codecogs_latex\": 0, \"wp_latex\": 0, \"mimetex.cgi\": 0, \"\/images\/math\/codecogs\": 0, \"mathtex.cgi\": 0, \"katex\": 0, \"math-container\": 0, \"wp-katex-eq\": 0, \"align\": 0, \"equation\": 0, \"x-ck12\": 0, \"texerror\": 0, \"math_score\": 0.6148850321769714, \"perplexity\": 8629.74871020309}, \"config\": {\"markdown_headings\": true, \"markdown_code\": true, \"boilerplate_config\": {\"ratio_threshold\": 0.18, \"absolute_threshold\": 10, \"end_threshold\": 15, \"enable\": true}, \"remove_buttons\": true, \"remove_image_figures\": true, \"remove_link_clusters\": true, \"table_config\": {\"min_rows\": 2, \"min_cols\": 3, \"format\": \"plain\"}, \"remove_chinese\": true, \"remove_edit_buttons\": true, \"extract_latex\": true}, \"warc_path\": \"s3:\/\/commoncrawl\/crawl-data\/CC-MAIN-2020-50\/segments\/1606141486017.50\/warc\/CC-MAIN-20201130192020-20201130222020-00156.warc.gz\"}"}
null
null
\section{Introduction} Let $\mathbb{D}$ be the open unit disk, let $\mathbb{D}^{*}:= \widehat{\mathbb{C}} \setminus \overline{\mathbb{D}}$ be the complement of the closed unit disk in the Riemann sphere $\widehat{\mathbb{C}}$, and let $\mathbb{T}:= \partial \mathbb{D}$ be the unit circle. Given a Jordan curve $\Gamma$, let $f:\mathbb{D} \to \Omega$ and $g:\mathbb{D}^{*} \to \Omega^{*}$ be conformal maps onto the bounded and unbounded complementary components of $\Gamma$ respectively. Then $f$ and $g$ extend to homeomorphisms on the closure of their respective domains, so that $h_\Gamma:=g^{-1} \circ f : \mathbb{T} \to \mathbb{T}$ defines an orientation-preserving homeomorphism of the unit circle onto itself, called the \textit{conformal welding homeomorphism} of $\Gamma$. Note that $h_\Gamma$ is uniquely determined by $\Gamma$ up to pre- and post-composition by automorphisms of the unit disk. Moreover, if $T$ is a M\"{o}bius transformation, then $\Gamma$ and $T(\Gamma)$ have the same conformal welding homeomorphism, so that the \textit{conformal welding correspondence} $$\mathcal{W}: [\Gamma] \mapsto [h_\Gamma]$$ is well-defined, from the family of Jordan curves, modulo M\"{o}bius equivalence, to the family of orientation-preserving homeomorphisms of the circle, modulo pre- and post-composition by automorphisms of the disk. This correspondence between Jordan curves and circle homeomorphisms has appeared over the years to be of central importance in a wide variety of areas of mathematics and applications, such as Teichm\"{u}ller theory, Kleinian groups, computer vision and numerical pattern recognition (\cite{EKS},\cite{SHM}), and so forth. For more information on the applications of conformal welding, the interested reader may consult the survey article \cite{HAM2}. We also mention that recent years have witnessed a strong renewal of interest in conformal welding as other variants and generalizations have been introduced and developed, such as generalized conformal welding (\cite{BIS2}, \cite{HAM}), random conformal welding \cite{AST}, conformal welding for finitely connected regions \cite{MAR}, conformal welding of random surfaces \cite{SHE} and conformal laminations of trees (\cite{MARS}, \cite{ROH}), including applications to Shabat polynomials and Grothendieck's \textit{dessins d'enfants}. Conformal laminations are also related to the recent groundbreaking work of Miller and Sheffield on the relationship between the Brownian map and Liouville Quantum Gravity (\cite{MILS}, \cite{MILS2}, \cite{MILS3}). It is well-known that the conformal welding correspondence $\mathcal{W}$ is not surjective; in other words, there are orientation-preserving homeomorphisms of the circle (even analytic everywhere except at one point) which are not conformal welding homeomorphisms. On the other hand, every quasisymmetric homeomorphism $h:\mathbb{T} \to \mathbb{T}$ is the conformal welding of some Jordan curve (in this case, a quasicircle). Here \textit{quasisymmetric} means that adjacent arcs $I,J \subset \mathbb{T}$ of equal length are mapped by $h$ onto arcs of comparable length : $$M^{-1} \leq \frac{|h(I)|}{|h(J)|} \leq M.$$ The fact that every such $h$ is a conformal welding homeomorphism is usually referred to as the \textit{fundamental theorem of conformal welding} and was first proved by Pfluger \cite{PFL} in 1960. Another proof based on quasiconformal mappings was published shortly after by Lehto and Virtanen \cite{LEHV}. See also the papers of Bishop \cite{BIS2} and Schippers--Staubach \cite{SCS}. We also mention that other sufficient conditions for a given circle homeomorphism $h$ to be a welding were obtained by Lehto \cite{LEH} and Vainio \cite{VAI}, in terms of $h$ being sufficiently ``nice''. In Section \ref{sec1}, we recall a deep theorem of Bishop \cite{BIS2} saying that on the other hand, any ``wild'' enough $h$ is also the welding of some Jordan curve. Finding a complete characterization of conformal welding homeomorphisms is most likely a very difficult problem. This paper, however, deals with the (non) injectivity of the welding correspondence $\mathcal{W}$. Recall that if $\Gamma$ is a Jordan curve, then $T(\Gamma)$ has the same welding homeomorphism $h_\Gamma$, for any M\"{o}bius transformation $T$. Are these the only curves with this property? If $\Gamma$ and $\tilde{\Gamma}$ are two Jordan curves such that $h_\Gamma = h_{\tilde{\Gamma}}$, then there are conformal maps $f:\mathbb{D} \to \Omega$, $g:\mathbb{D}^{*} \to \Omega^{*}$, $\tilde{f} : \mathbb{D} \to \tilde{\Omega}$, $\tilde{g}: \mathbb{D}^{*} \to \tilde{\Omega}^{*}$ such that $$g^{-1} \circ f = \tilde{g}^{-1} \circ \tilde{f}$$ on $\mathbb{T}$, i.e. $$\tilde{g} \circ g^{-1} = \tilde{f} \circ f^{-1}$$ on $\Gamma$, where $\tilde{\Omega}, \tilde{\Omega}^{*}$ are the bounded and unbounded complementary components of the curve $\tilde{\Gamma}$. It follows that the map $F: \widehat{\mathbb{C}} \to \widehat{\mathbb{C}}$ defined by \begin{displaymath} F(z) = \left\{ \begin{array}{ll} (\tilde{f} \circ f^{-1})(z) & \textrm{if $z \in \Omega \cup \Gamma$}\\ (\tilde{g} \circ g^{-1})(z) & \textrm{if $z \in \Omega^{*}$}\\ \end{array} \right. \end{displaymath} is a homeomorphism conformal on $\widehat{\mathbb{C}} \setminus \Gamma$ which maps the curve $\Gamma$ onto the curve $\tilde{\Gamma}$. This shows that if $\Gamma$ is conformally removable, then $\Gamma$ uniquely corresponds to its conformal welding homeomorphism $h_\Gamma$, up to M\"{o}bius equivalence. \begin{definition} We say that a compact set $E \subset \mathbb{C}$ is \textit{conformally removable} if every $F \in \operatorname{CH}(E)$ is M\"{o}bius, where $\operatorname{CH}(E)$ is the collection of homeomorphisms $F:\widehat{\mathbb{C}} \to \widehat{\mathbb{C}}$ which are conformal on $\widehat{\mathbb{C}} \setminus E$. \end{definition} Single points, smooth curves and more generally, sets of $\sigma$-finite length, are all conformally removable. On the other hand, the theory of quasiconformal mappings can be used to prove that sets of positive area are never conformally removable. The converse is well-known to be false, and there exist nonremovable sets (even Jordan curves) of Hausdorff dimension one (\cite{BIS}, \cite{KAU}) and removable sets of Hausdorff dimension two (\cite[Chapter V, Section 3.7]{LEHV2}). In fact, no geometric characterization of conformally removable sets is known. We also mention that this notion of removability appears naturally in the study of various important problems in Complex Analysis and related areas, such as Koebe's uniformization conjecture (\cite{HES}, \cite{YOU2}) and the MLC conjecture on the local connectivity of the Mandelbrot set (\cite{KAH}, \cite{KAL}, \cite{KAL2}). See also the survey article \cite{YOU} for more information. Now, recall that if a Jordan curve $\Gamma$ is conformally removable, then $\Gamma$ uniquely corresponds to its conformal welding homeomorphism, modulo M\"{o}bius equivalence. The starting point of this paper is the following question. \begin{question} \label{ques1} Does the converse hold? Namely, if $\Gamma$ is a non-removable Jordan curve, does there necessarily exist another curve having the same welding homeomorphism, but which is not a M\"{o}bius image of $\Gamma$? \end{question} Several papers in the literature seem to imply that the answer is trivially yes, either without proof or with the following argument : \\ \textit{If $\Gamma$ is not removable, then there exists a non-M\"{o}bius $F \in \operatorname{CH}(\Gamma)$. But then the curve $F(\Gamma)$ has the same welding homeomorphism as $\Gamma$, and is not M\"{o}bius equivalent to it, since $F$ is not a M\"{o}bius transformation} \\ See e.g. \cite[Lemma 2]{OIK}, \cite[Corollary II.2]{KNS}, \cite[Section 4]{HAM}, \cite[Corollary 1]{BIS3}, \cite[p.324--325]{BIS}, \cite[Section 3]{HAM2}, \cite[Remark 2]{BIS2}, \cite[Section 2.3]{AST}, \cite[Corollary 1.4]{LIR}). Although it is true and easy to see that $\Gamma$ and $F(\Gamma)$ have the same welding homeomorphism, it is not clear at all that these two curves are not M\"{o}bius equivalent, since there could a priori exist a M\"{o}bius transformation $T$ such that $F(\Gamma)=T(\Gamma)$, even though $F$ itself is not M\"{o}bius. As far as we know, this remark first appeared in Maxime Fortier Bourque's Master's Thesis \cite{MFB}. The question of whether such $\Gamma, F$ and $T$ as above actually exist was, however, left open. In this paper, we answer that question in the affirmative. \begin{theorem} \label{mainthm} There exists a Jordan curve $\Gamma$ and a non-M\"{o}bius homeomorphism $F: \widehat{\mathbb{C}} \to \widehat{\mathbb{C}}$ conformal on $\widehat{\mathbb{C}} \setminus \Gamma$ such that $F(\Gamma)=\Gamma$. Moreover, the curve $\Gamma$ may be taken to have zero area. \end{theorem} The construction is based on a result of Bishop \cite{BIS2} characterizing the conformal welding homeomorphisms of so-called flexible curves. Theorem \ref{mainthm} shows that the above argument claiming to answer Question \ref{ques1} is in fact incorrect, and whether removability really characterizes injectivity of conformal welding remains unknown. The remainder of the paper is organized as follows. In Section \ref{sec1}, we recall Bishop's characterization of the conformal welding homeomorphisms of flexible curves. Then, in Section \ref{sec2}, we use this result to prove Theorem \ref{mainthm}. In Section \ref{sec3}, we discuss the non-injectivity of conformal welding for curves of positive area. Finally, Section \ref{sec4} contains some open problems related to Question \ref{ques1}. \section{Flexible curves and log-singular homeomorphisms} \label{sec1} We first need the definition of logarithmic capacity, following \cite{BIS2}. For $E \subset \mathbb{T}$ Borel, let $\mathcal{P}(E)$ denote the collection of all Borel probability measures on $E$. \begin{definition} Let $\mu \in \mathcal{P}(E)$. \begin{enumerate}[\rm(i)] \item The \textit{energy} of $\mu$, noted $I(\mu)$, is given by $$I(\mu):= \int \int \log{\frac{2}{|z-w|}} \, d\mu(z) \, d\mu(w).$$ \item The \textit{logarithmic capacity} of $E$, noted $\operatorname{cap}(E)$, is defined as $$\operatorname{cap}(E):= \frac{1}{\inf \{I(\mu): \mu \in \mathcal{P}(E) \} }.$$ \end{enumerate} \end{definition} It is well-known that logarithmic capacity is nonnegative, monotone and countably subadditive (see e.g. \cite{CAR}). We will also need the simple fact that bi-H\"{o}lder homeomorphisms of the circle preserve sets of zero capacity. We can now define log-singular homeomorphisms. \begin{definition} Let $I,J$ be two subarcs of the unit circle. An orientation-preserving homeomorphism $h: I \to J$ is \textit{log-singular} if there exists a Borel set $E \subset I$ such that both $E$ and $h(I \setminus E)$ have zero logarithmic capacity. \end{definition} The following inductive construction of log-singular homeomorphisms was outlined in \cite[Remark 9]{BIS2}. We reproduce it here for the reader's convenience. \begin{proposition} \label{prop1} Let $I,J$ be two subarcs of $\mathbb{T}$. Then there exists a log-singular homeomorphism $h:I \to J$. \end{proposition} \begin{proof} Start with any orientation-preserving linear homeomorphism $h_1 : I \to J$. At the first step, divide $I$ into two subarcs, denoted by \textit{red} and \textit{blue} respectively, in such a way that the red subarc has small logarithmic capacity, say less than $2^{-1}$. Now, define a homeomorphism $h_2$ on $I$ which is linear on the red subarc and the blue subarc, and which satisfies $h_2(I)=h_1(I)=J$. We also construct $h_2$ such that it maps the blue subarc onto an arc of logarithmic capacity also less than $2^{-1}$. Now, at the $n$-th step, suppose that $I$ has been divided into a finite number of arcs $\{I^{k,n}\}$, and that we have a homeomorphism $h_n:I \to J$ which is linear on each of those arcs. First, divide each $I^{k,n}$ into $n$ arcs of equal length $I_1^{k,n}, I_2^{k,n}, \dots, I_n^{k,n}$. Then, divide each $I_j^{k,n}$ into a red and a blue subarc, in such a way that the union of all the red subarcs has logarithmic capacity less than $2^{-n}$. Now, define a homeomorphism $h_{n+1}:I \to J$ which is linear on each of the red and blue subarcs, and which satisfies $h_{n+1}(I_j^{k,n}) = h_n(I_j^{k,n})$ for each $j,k$. We also construct $h_{n+1}$ so that the union of all the images of the blue subarcs under the map has logarithmic capacity less than $2^{-n}$. It is not difficult to see that these maps $h_n$ converge to an orientation-preserving homeomorphism $h:I \to J$. Indeed, if $\epsilon>0$, then we can choose $N$ sufficiently large so that for $n \geq N$, the arcs $h_n(I_j^{k,n})$ all have length less than $\epsilon$. If $m \geq n$ and $x \in I$, then $x$ belongs to one of the arcs $I_j^{k,n}$ and $h_m(x)$ belongs to $h_n(I_j^{k,n})$ by construction, thus $$|h_n(x)-h_m(x)| < \epsilon.$$ This shows that the sequence $(h_n)$ is uniformly Cauchy and therefore has a continuous limit $h:I \to J$, which has to be an orientation-preserving homeomorphism, by construction. Finally, the map $h:I \to J$ is log-singular. Indeed, if $E$ is the set of points in $I$ which belong to infinitely many of the red subarcs, then $$\operatorname{cap}(E) \leq \sum_{n \geq m} 2^{-n} \qquad (m \in \mathbb{N})$$ by the subadditivity of logarithmic capacity, so that $\operatorname{cap}(E) =0$. On the other hand, we have $$h(\mathbb{T} \setminus E) \subset \bigcup_{m=1}^{\infty} \bigcap_{n \geq m} h_{n+1}(B_n),$$ where $B_n$ is the union of all the blue subarcs constructed at the $n$-th step. It follows that $h(\mathbb{T} \setminus E)$ is contained in a countable union of sets of zero logarithmic capacity, so that $\operatorname{cap}(h(\mathbb{T} \setminus E)) =0$, again by the subadditivity of logarithmic capacity. \end{proof} We will also need the following definition. \begin{definition} \label{def1} A Jordan curve $\Gamma \subset \mathbb{C}$ is a \textit{flexible curve} if the following two conditions hold : \begin{enumerate}[\rm(i)] \item Given any Jordan curve $\tilde{\Gamma}$ and $\epsilon>0$, there exists $F \in \operatorname{CH}(\Gamma)$ such that $$d(F(\Gamma),\tilde{\Gamma}) < \epsilon,$$ where $d$ is the Hausdorff distance. \item Given points $z_1,z_2$ in each complementary component of $\Gamma$ and points $w_1,w_2$ in each complementary component of $\tilde{\Gamma}$, we can choose $F$ above so that $F(z_1)=w_1$ and $F(z_2)=w_2$. \end{enumerate} \end{definition} One can think of a flexible curve $\Gamma$ as being ``highly'' non-removable, in the sense that $\operatorname{CH}(\Gamma)$ is very large. Examples of flexible curves with any Hausdorff dimension between 1 and 2 were constructed in \cite{BIS}. There is a close relationship between flexible curves and log-singular homeomorphisms. Indeed, Bishop proved in \cite{BIS2} that an orientation-preserving homeomorphism $h: \mathbb{T} \to \mathbb{T}$ is log-singular if and only if it is the conformal welding of a flexible curve $\Gamma$. In particular, this implies that every such $h$ is the conformal welding of a dense family of curves, since if $F \in \operatorname{CH}(\Gamma)$ is as in Definition \ref{def1}, then $h$ is also the conformal welding homeomorphism of $F(\Gamma)$. We shall actually need the following stronger result, see \cite[Theorem 25]{BIS2}. \begin{theorem}[Bishop] \label{thmBis} Let $h:\mathbb{T} \to \mathbb{T}$ be an orientation-preserving log-singular homeomorphism with $h(1)=1$, and let $f_0, g_0$ be two conformal maps of $\mathbb{D}$ and $\mathbb{D}^{*}$ respectively onto disjoint domains. Then for any $0<r<1$ and any $\epsilon>0$, there are conformal maps $f$ and $g$ of $\mathbb{D}$ and $\mathbb{D}^{*}$ onto the two complementary components of a Jordan curve $\Gamma$ satisfying the following conditions : \begin{enumerate}[\rm(i)] \item $h= g^{-1} \circ f$ on $\mathbb{T}$; \item $|f(z)-f_0(z)| < \epsilon$ for all $z \in \mathbb{D}$ with $|z| \leq r$; \item $|g(z)-g_0(z)| < \epsilon$ for all $z \in \mathbb{D}^{*}$ with $|z| \geq 1/r$; \end{enumerate} Moreover, the maps $f,g$ may be constructed such that $f(1)=g(1)=\infty$ and such that the curve $\Gamma$ has zero area. \end{theorem} \begin{remark} Since the last part of Theorem \ref{thmBis} was not stated explicitly in \cite{BIS2}, let us briefly explain how it follows from the construction. First, since $h(1)=1$, condition $(i)$ implies that $f(1)=g(1)$. Composing $f$ and $g$ by a M\"{o}bius transformation $T$ if necessary, we can assume that $f(1)=g(1)=\infty$. Note that if $f$ and $g$ approximate $T^{-1} \circ f_0$ and $T^{-1} \circ g_0$ on compact subsets of $\mathbb{D}$ and $\mathbb{D}^{*}$ respectively, then $T \circ f$ and $T \circ g$ approximate $f_0$ and $g_0$. Now, to see that the curve $\Gamma$ can be taken to have zero area, note that the main part of the proof in \cite{BIS2} is to construct \textit{quasiconformal} mappings $f:\mathbb{D} \to \Omega$ and $g:\mathbb{D} \to \Omega^*$ onto the complementary components of a Jordan curve $\Gamma$ satisfying conditions (i), (ii) and (iii), and having quasiconstants close to 1. These maps $f$ and $g$ are obtained as limits of quasiconformal mappings $f_n : \mathbb{D} \to \Omega_n$ and $g_n : \mathbb{D}^{*} \to \Omega_n^{*}$ onto smooth Jordan domains with disjoint closures, with $\infty \in \Omega_n^*$. By construction, the curve $\Gamma$ is contained in the topological annulus $A_n:=\widehat{\mathbb{C}} \setminus (\Omega_n \cup \Omega_n^{*})$, for each $n$. Moreover, the domains $\Omega_n$ and $\Omega_n^{*}$ are of the form $\Omega_n:=F_n(t\mathbb{D})$ and $\Omega_n^*:=G_n(\mathbb{D}^{*}/t)$, where $t<1$ is sufficiently close to 1 and $F_n, G_n$ are quasiconformal mappings of $\mathbb{D}, \mathbb{D}^*$ onto the complementary components of a smooth Jordan curve $\Gamma_n$. Clearly, by taking $t$ closer to $1$ if necessary, we can assume that the topological annulus $A_n$ has area as small as we want, say less than $2^{-n}$. Then the curve $\Gamma$ will have zero area. Finally, the last part of the proof is to apply the measurable Riemann mapping theorem to replace $f$ and $g$ by conformal maps $\Phi \circ f$ and $\Phi \circ g$, where $\Phi$ is a quasiconformal mapping of the sphere. Since $\Phi$ preserves sets of area zero and since it can be assumed to fix $\infty$ by composing with a M\"{o}bius transformation, the new conformal maps can also be taken so that they send $1$ to $\infty$ and map $\mathbb{T}$ onto a curve of zero area. \end{remark} \section{Conformal homeomorphisms fixing a curve} \label{sec2} We can now prove Theorem \ref{mainthm}. Recall that we want to construct a Jordan curve $\Gamma$ and a non-M\"{o}bius homeomorphism $F: \widehat{\mathbb{C}} \to \widehat{\mathbb{C}}$ conformal on $\widehat{\mathbb{C}} \setminus \Gamma$ such that $F(\Gamma)=\Gamma$. The idea of the construction is the following. Suppose that such a curve $\Gamma$ and such a non-M\"{o}bius homeomorphism $F \in \operatorname{CH}(\Gamma)$ exist, and suppose that $F$ preserves the orientation of $\Gamma$. Let $f : \mathbb{D} \to \Omega$ and $g : \mathbb{D}^{*} \to \Omega^{*}$ be conformal maps onto the two complementary components of the curve. Then $F \circ f$ and $F \circ g$ are also conformal maps of $\mathbb{D}$ and $\mathbb{D}^{*}$ onto $\Omega$ and $\Omega^{*}$ respectively, so that $$F \circ f = f \circ \sigma$$ and $$F \circ g = g \circ \tau$$ for some $\sigma, \tau \in \operatorname{Aut}(\mathbb{D})$. Note that neither $\sigma$ nor $\tau$ is the identity, since otherwise $F$ would also be the identity, contradicting the fact that it is not M\"{o}bius. Now, since $F$ is continuous on $\Gamma$, we must have $$f \circ \sigma \circ f^{-1} = g \circ \tau \circ g^{-1}$$ there, which can be rewritten as \begin{equation} \label{functionalequation} W \circ \sigma = \tau \circ W \end{equation} on $\mathbb{T}$, where $W:=h_\Gamma$ is the conformal welding of $\Gamma$. We thus obtain a functional equation for the welding of the curve. The strategy now is to proceed backward. Start with an orientation-preserving homeomorphism $W : \mathbb{T} \to \mathbb{T}$ satisfying the functional equation (\ref{functionalequation}) for some $\sigma, \tau \in \operatorname{Aut}(\mathbb{D})$. Suppose in addition that we can construct $W$ so that it is the conformal welding homeomorphism of some Jordan curve $\Gamma$, i.e. $W = g^{-1} \circ f$ where $f$ and $g$ are conformal maps of $\mathbb{D}$ and $\mathbb{D}^{*}$ respectively onto the two complementary components $\Omega$, $\Omega^{*}$ of $\gamma$. Then we can define a map $F$ conformal on both sides of $\gamma$ by \begin{displaymath} F(z) = \left\{ \begin{array}{ll} (f \circ \sigma \circ f^{-1})(z) & \textrm{if $z \in \Omega$}\\ (g \circ \tau \circ g^{-1})(z) & \textrm{if $z \in \Omega^{*}$},\\ \end{array} \right. \end{displaymath} and the fact that $W=g^{-1} \circ f$ satisfies Equation (\ref{functionalequation}) implies that $F$ extends to a homeomorphism of the whole sphere. Clearly, the map $F \in \operatorname{CH}(\Gamma)$ fixes the curve $\Gamma$, and all that remains to prove is that we can choose $\sigma, \tau$ and $W$ such that $F$ is not a M\"{o}bius transformation. The main difficulty here is that if we choose $W$ to be sufficiently nice, e.g. quasisymmetric, so that it is a conformal welding homeomorphism, then the curve $\Gamma$ will be removable and $F$ will necessarily be M\"{o}bius. In order to circumvent this difficulty, a promising approach is to construct log-singular homeomorphic solutions of the functional equation (\ref{functionalequation}). \begin{lemma} \label{lemlog} Let $a,b>0$, and let $\phi$ be a M\"{o}bius transformation mapping the upper half-plane $\mathbb{H}$ onto the open unit disk $\mathbb{D}$ with $\phi(\infty)=1$, say $$\phi(z) := \frac{z-i}{z+i}.$$ Define $\tilde{\sigma}, \tilde{\tau} \in \operatorname{Aut}(\mathbb{H})$ by $\tilde{\sigma}(z):=z+a$ and $\tilde{\tau}(z):=z+b$, and set $$\sigma := \phi \circ \tilde{\sigma} \circ \phi^{-1}$$ and $$\tau := \phi \circ \tilde{\tau} \circ \phi^{-1},$$ so that $\sigma, \tau \in \operatorname{Aut}(\mathbb{D})$. Then there exists a log-singular orientation-preserving homeomorphism $W: \mathbb{T} \to \mathbb{T}$ which satisfies $$W \circ \sigma = \tau \circ W$$ \end{lemma} \begin{proof} Let $I_0:= \phi([0,a))$ and $J_0:=\phi([0,b))$, and let $W: I_0 \to J_0$ be a log-singular orientation-preserving homeomorphism, as in Proposition \ref{prop1}. For $n \in \mathbb{Z}$, set $I_n := \sigma^n(I_0)$ and $J_n := \tau^n(J_0)$. Note that the subarcs $I_n$ are pairwise disjoint and that $$\bigcup_{n \in \mathbb{Z}} I_n = \mathbb{T} \setminus \{1\},$$ and similarly for the $J_n$'s. Now, extend $W$ to all of $\mathbb{T}$ by setting $$W(z) := (\tau^n \circ W \circ \sigma^{-n})(z) \qquad (z \in I_n)$$ and $W(1):=1$. Clearly, the map $W : \mathbb{T} \to \mathbb{T}$ thereby obtained is an orientation-preserving homeomorphism. Moreover, if $z \in I_n$ for some $n$, then $\sigma(z) \in I_{n+1}$, so that \begin{eqnarray*} W(\sigma(z)) &=& (\tau^{n+1} \circ W \circ \sigma^{-(n+1)})(\sigma(z))\\ &=& (\tau \circ (\tau^n \circ W \circ \sigma^{-n}))(z)\\ &=& (\tau \circ W)(z), \end{eqnarray*} which shows that $W$ satisfies Equation (\ref{functionalequation}). It remains to prove that $W: \mathbb{T} \to \mathbb{T}$ satisfies the log-singular condition. Let $E_0 \subset I_0$ such that $\operatorname{cap}(E_0)=0$ and $\operatorname{cap}(W(I_0 \setminus E_0))=0$. For $n \in \mathbb{Z}$, let $E_n:=\sigma^n(E_0) \subset I_n$. Note that $\operatorname{cap}(E_n)=0$ for all $n$, since bi-H\"{o}lder homeomorphisms preserve sets zero logarithmic capacity. It follows that $E:=\cup_n E_n$ also has capacity zero, by subadditivity. Finally, we have \begin{eqnarray*} W(\mathbb{T} \setminus E) &=& \bigcup_{n \in \mathbb{Z}} W(I_n \setminus E_n) \cup \{1\}\\ &=& \bigcup_{n \in \mathbb{Z}} W(\sigma^n(I_0 \setminus E_0)) \cup \{1\}\\ &=& \bigcup_{n \in \mathbb{Z}} \tau^n(W(I_0 \setminus E_0)) \cup \{1\}, \end{eqnarray*} which shows that $\operatorname{cap}(W(\mathbb{T} \setminus E))=0$, again by subadditivity and the fact that bi-H\"{o}lder homeomorphisms preserve sets of zero logarithmic capacity. This completes the proof of the lemma. \end{proof} We can now proceed with the proof of Theorem \ref{mainthm}. \begin{proof} Let $a,b >0$ with $a \neq b$, and let $W: \mathbb{T} \to \mathbb{T}$, $\sigma$ and $\tau$ be as in Lemma \ref{lemlog}, so that $$W \circ \sigma = \tau \circ W.$$ Also, let $0<\epsilon<|a-b|/4$, let $z_1, z_2$ be points in the upper half-plane $\mathbb{H}$ and lower half-plane $\mathbb{H}^{*}$ respectively, and let $K_1 \subset \mathbb{H}$ and $K_2 \subset \mathbb{H}^{*}$ be compact sets such that $z_1, z_1+a \in K_1$ and $z_2, z_2+b \in K_2$. Lastly, take $0<r<1$ sufficiently close to $1$ so that $\phi(K_1) \subset \mathbb{D}(0,r)$ and $\phi(K_2) \subset \mathbb{C} \setminus \mathbb{D}(0,1/r)$, where $\phi$ is the M\"{o}bius transformation of Lemma \ref{lemlog}. By Theorem \ref{thmBis}, we can write $W:= g^{-1} \circ f$, where $f$ and $g$ are conformal maps of $\mathbb{D}$ and $\mathbb{D}^{*}$ onto the two complementary components $\Omega$ and $\Omega^{*}$ of a Jordan curve $\Gamma$, such that $f(1)=g(1)=\infty$, \begin{equation} \label{ineq1} |f(z)-\phi^{-1}(z)| < \epsilon \qquad (|z| \leq r) \end{equation} and \begin{equation} \label{ineq2} |g(z)-\phi^{-1}(z)| < \epsilon \qquad (|z| \geq 1/r). \end{equation} In particular, the above inequalities hold for $z \in \phi(K_1)$ and $z \in \phi(K_2)$ respectively. Now, define a map $F$ by \begin{displaymath} F(z) = \left\{ \begin{array}{ll} (f \circ \sigma \circ f^{-1})(z) & \textrm{if $z \in \Omega$}\\ (g \circ \tau \circ g^{-1})(z) & \textrm{if $z \in \Omega^{*}$}, \end{array} \right. \end{displaymath} so that $F$ is conformal on $\widehat{\mathbb{C}} \setminus \Gamma$. Also, the equation $W \circ \sigma = \tau \circ W$ on $\mathbb{T}$ is equivalent to $$f \circ \sigma \circ f^{-1} = g \circ \tau \circ g^{-1}$$ on $\Gamma$, so that $F$ extends to a homeomorphism of $\widehat{\mathbb{C}}$. Clearly, this map satisfies $F(\Gamma)=\Gamma$. It remains to prove that $F$ is not a M\"{o}bius transformation. Suppose, in order to obtain a contradiction, that $F$ is M\"{o}bius. First, note that $F(\infty)=\infty$, so that $F$ has to be linear, say $F(z)=cz+d$. Now, the map $F$ can be rewritten as \begin{displaymath} F(z) = \left\{ \begin{array}{ll} (\tilde{f} \circ \tilde{\sigma} \circ \tilde{f}^{-1})(z) & \textrm{if $z \in \Omega$}\\ (\tilde{g} \circ \tilde{\tau} \circ \tilde{g}^{-1})(z) & \textrm{if $z \in \Omega^{*}$},\\ \end{array} \right. \end{displaymath} where $\tilde{f}:=f \circ \phi : \mathbb{H} \to \Omega$, $\tilde{g}:= g \circ \phi : \mathbb{H}^{*} \to \Omega^{*}$, and $\tilde{\sigma}(z):=z+a$, $\tilde{\tau}(z):=z+b$ are as in Lemma \ref{lemlog}. It is easy to see from this that $F$ has only one fixed point, at infinity, so that $c=1$. Now, note that for $z \in \mathbb{H}$, we have $$f(\phi(z))+d=\tilde{f}(z) + d = F(\tilde{f}(z)) = (\tilde{f} \circ \tilde{\sigma})(z) = \tilde{f}(z+a) = f(\phi(z+a)),$$ so that in particular, $$d-a = f(\phi(z_1+a)) - f(\phi(z_1))-a = f(\phi(z_1+a)) - (z_1+a) - f(\phi(z_1))+z_1.$$ By Inequality (\ref{ineq1}) with $z$ replaced by $\phi(z_1), \phi(z_1+a) \in \phi(K_1)$, we get $$|d-a| \leq \epsilon + \epsilon = 2\epsilon.$$ Similarly, using the formula for $F$ on $\Omega^{*}$ and Inequality (\ref{ineq2}), we get $$|d-b| \leq 2\epsilon,$$ and combining the two inequalities yields $$|a-b| \leq |a-d| + |d-b| \leq 4\epsilon,$$ which contradicts the choice of $\epsilon$. It follows that $F$ is not a M\"{o}bius transformation. Finally, we can take $\Gamma$ to have zero area, by Theorem \ref{thmBis}. This completes the proof of Theorem \ref{mainthm}. \end{proof} \section{Non-injectivity of conformal welding for curves of positive area} \label{sec3} In this section, we mention that the argument described in the introduction, although incorrect in general, can nonetheless be made to work in the case of curves with positive area. \begin{theorem} \label{thm2} If $\Gamma$ is a Jordan curve with positive area, then there is another curve having the same welding homeomorphism, but which is not a M\"{o}bius image of $\Gamma$. \end{theorem} The idea of the proof is quite simple. Since $\Gamma$ has positive area, it is in particular not removable, so there is a non-M\"{o}bius homeomorphism $F : \widehat{\mathbb{C}} \to \widehat{\mathbb{C}}$ which is conformal off $\Gamma$. As already mentioned, the curve $F(\Gamma)$ has the same welding as $\Gamma$, thus is a good candidate for the curve we want to construct. Unfortunately, as we saw in Section \ref{sec2}, it may happen that this curve is a M\"{o}bius image of the original one, even though $F$ itself is not M\"{o}bius. However, in the positive-area case, it is easy to see using the measurable Riemann mapping theorem that the collection of non-M\"{o}bius elements of $\operatorname{CH}(\Gamma)$ is infinite-dimensional, in some sense. A dimension argument relying on Ahlfors-Bers and Brouwer's Invariance of Domain can then be applied to conclude that there must be at least one non-M\"{o}bius $F \in \operatorname{CH}(\Gamma)$ such that $F(\Gamma) \neq T(\Gamma)$ for every M\"{o}bius transformation $T$. As far as we know, Theorem \ref{thm2} was first stated by Katznelson, Nag and Sullivan in \cite{KNS}. See \cite[Theorem 4.22]{MFB} for a complete and detailed proof. It would be very interesting to find a more constructive proof though. \section{Concluding remarks} \label{sec4} In view of Theorem \ref{thm2}, Question \ref{ques1} can be reduced to the following. \begin{question} \label{ques2} If $\Gamma$ is a non-removable Jordan curve with zero area, does there necessarily exist another curve having the same conformal welding homeomorphism, but which is not a M\"{o}bius image of $\Gamma$? \end{question} As observed in Section \ref{sec3}, the size of $\operatorname{CH}(\Gamma)$ may be relevant here. \begin{question} If $\Gamma$ is a non-removable Jordan curve with zero area, is the collection of non-M\"{o}bius elements of $\operatorname{CH}(\Gamma)$ necessarily large, or infinite-dimensional, in some sense? \end{question} If $\Gamma$ is non-removable, then there exists at least one non-M\"{o}bius homeomorphism $F:\widehat{\mathbb{C}} \to \widehat{\mathbb{C}}$ conformal off $\Gamma$, but as far as we know, it is still open in general whether there must exist another non-M\"{o}bius element of $\operatorname{CH}(\Gamma)$ which is not of the form $T \circ F$, for $T$ M\"{o}bius. Finally, we conclude by mentioning that a positive answer to Question \ref{ques2} would follow if one could prove that there always exists a non-M\"{o}bius homeomorphism of $\widehat{\mathbb{C}}$ conformal off $\Gamma$ which maps $\Gamma$ onto a curve of positive area. \acknowledgments{The author thanks Chris Bishop and Don Marshall for helpful discussions.} \bibliographystyle{amsplain}
{ "redpajama_set_name": "RedPajamaArXiv" }
9,793
This report gives a critical review of steps taken in 10 countries (Belgium, Denmark, Finland, France, Germany, Netherlands, Japan, Sweden, UK, USA) with regard to testing and reporting schemes as well as overall quality approaches to improve building airtightness. The analyses are mostly based on contributions and discussions with 20 speakers invited to the AIVC-TightVent airtightness international workshop held in Brussels, 28-29 March 2012; they also include information from earlier publications as well as from the authors' experience. We have examined the schemes derived to increase the reliability of air leakage tests because of the potentially large energy and economic impacts of erroneous results. This includes test specifications going beyond existing standards regarding building preparation, choice of reference values, data collection protocols and reference pressure, sampling rules for large or multi-family buildings, equipment calibration and analysis software validation. To enforce these specifications, some countries have derived competent tester schemes including trainings with an array of subsequent procedures—e.g., for training bodies, auditors trainings, centralized test data collection. We have also analysed the various approaches to encourage tighter constructions. These range from purely voluntary schemes to systematic testing of minimum requirements, via requirements or incentives for subsidized projects, programmes or quality scheme implementation. Overall, the main lessons learnt are that a) clear encouragements to systematic or nonsystematic testing have led to market transformations while other options have failed to do so; and b) that carefully designed competent tester schemes are essential to give credit to incentives or requirements as well as to monitor airtightness policy measures.
{ "redpajama_set_name": "RedPajamaC4" }
713
\subsection{Generating $\mathcal{H}^+_k$} \label{sec:generate} We started by generating the set $R(k)$ of all monotone Boolean functions on variables $\langle k \rangle$ \emph{up to isomorphism}, that is, up to renaming the variables. The size of $R(k)$ corresponds to the OEIS sequence A003182, which is only known up to $k=6$ (computed in~\cite{caze2013dendrites} and in~\cite{stephen2014counting}). We used parts of the code from~\cite{caze2013dendrites} to generate all functions in $R(k)$ for $k$ in $\{1,\ldots,6\}$. We then filtered $R(k)$ to obtain the set of functions that are nondegenerate. Then we tested whether $Q^\phi_k$ is safe by computing the CNF lattice of $\phi$ and checking that $\mu(\hat{0}, \hat{1}) = 0$. Let us call $\mathit{SND}(k)$ the set of remaining functions (that is, the functions that are safe and nondegenerate). It took about $2$ weeks (using the $40$ CPUs) to compute the explicit lists of all the functions in $R(k)$ and $\mathit{SND}(k)$ for $k \in \{1,\ldots,6\}$, and the sizes of these sets can be found in Table~\ref{tab:results}. We next explain how we tested the niceness of each function in $\mathit{SND}(k)$. \subsection{Testing Niceness} \label{sec:test_niceness} Let us call \emph{boxes} the functions $\phi_i$ used in Definition~\ref{def:nice}. That is, $\phi$ is nice if and only if we can partition its satisfying valuations into $k+1$ ordered boxes (we allow some boxes to be empty), where the $i$-th box has a \emph{symmetry around variable $i$}: \begin{definition} Let $\nu \subseteq \langle k \rangle$ be a valuation of $\langle k \rangle$, and $l \in \langle k \rangle$ be a variable. We define the valuation $\tgl(\nu,l)$ to be the valuation $\nu \cup \{l\}$ if $l \notin \nu$ and $\nu \setminus \{l\}$ if $l \in \nu$. We say that a set $B$ of valuations of $\langle k \rangle$ \emph{has a symmetry around variable $l$} if for every valuation $\nu \subseteq \langle k \rangle$ we have $\nu \in B$ iff $\tgl(\nu,l) \in B$. \end{definition} To check if $\phi$ is nice, we build a CNF $\nice(\phi)$ that expresses exactly that $\sat(\phi)$ can be partitioned nicely, i.e., $\nice(\phi)$ is satisfiable if and only if $\phi$ is nice. We can then use a SAT solver. \begin{definition} Let $k \geq 1$ and $\phi$ be a Boolean function on $\langle k \rangle$. We define the CNF $\nice(\phi)$ as follows. Its set of variables is $\{\x_\nu^l \mid \nu \in \sat(\phi) \text{ and } l \in \langle k \rangle \}$, where $\x_\nu^l$ intuitively expresses that $\nu$ is put in box $l$. Its set of clauses is: \begin{enumerate} \item For each $\nu \in \sat(\phi)$, the clause $\bigvee\limits_{l=0}^{k} \x_\nu^l$, expressing the valuation $\nu$ must be put in at least one box; \item For each $\nu \in \sat(\phi)$ and $l,l' \in \{0,\ldots,k\}$ with $l \neq l'$, the clause $\lnot \x_\nu^l \lor \lnot \x_\nu^{l'}$, expressing that the valuation $\nu$ is in at most one box; \item For each $\nu \in \sat(\phi)$ and $l \in \{0,\ldots,k\}$, then: \begin{enumerate} \item If $\tgl(\nu,l) \notin \sat(\phi)$, the clause $\lnot \x_\nu^l$; \item Else, the clause $\lnot \x_\nu^l \lor \x_{\tgl(\nu,l)}^l$. \end{enumerate} This ensures that the box $l$ has a symmetry around $l$. \end{enumerate} \end{definition} \begin{proposition} \label{prp:nice_is_nice} $\phi$ is nice iff $\nice(\phi)$ is satisfiable. \end{proposition} Now for each function $\phi$ in $\mathit{SND}(k)$, we constructed the CNF formula $\nice(\phi)$, and used the SAT solver Glucose~\cite{audemard2009predicting} to determine if it is satisfiable. If $\nice(\phi)$ is satisfiable then $Q^\phi_k \in \text{UCQ(d-DNNF)}$ and we store $\phi$ in $N(k)$. If it is not we give the formula $\nice(\lnot \phi)$ to Glucose. If this formula is satisfiable then $\phi$ is co-nice and $Q^\phi_k \in \text{UCQ(d-D)}$ (but we do not know if it is in UCQ(d-DNNF)) and we store $\phi$ in $\text{\emph{co-N}}(k)$. If $\nice(\lnot \phi)$ is not satisfiable then $\phi$ is in $\mathit{BAD}(k)$ and we do not know if $Q^\phi_k$ is in UCQ(d-D). The results of these experiments are displayed in Table~\ref{tab:results}, and, as we found no function in $\mathit{BAD}(k)$, imply: \setlength{\tabcolsep}{10pt} \begin{table}[t] \caption{Results of our experiments. The meaning of columns is explained after Proposition~\ref{prp:nice_is_nice}.} \centering \begin{tabular}{crrrrr} \toprule $k$ & $|R(k)|$ & $|\mathit{SND}(k)|$ & $|N(k)|$ & $|\text{\emph{co-N}}(k)|$ & $|\mathit{BAD}(k)|$ \\ \midrule $1$ & $5$ & $0$ & $0$ & $0$ & $0$ \\ $2$ & $10$ & $0$ & $0$ & $0$ & $0$ \\ $3$ & $30$ & $2$ & $2$ & $0$ & $0$ \\ $4$ & $210$ & $25$ & $25$ & $0$ & $0$ \\ $5$ & $16,353$ & $2,531$ & $2,529$ & $2$ & $0$ \\ $6$ & $490,013,148$ & $21,987,161$ & $21,987,094$ & $67$ & $0$ \\ \bottomrule \end{tabular} \label{tab:results} \end{table} \begin{proposition} All the safe queries in $\mathcal{H}^+_k$ for $k \in \{1,\ldots,6\}$ are in UCQ(d-D). \end{proposition} We give here one of the $2$ functions that are in $\text{\emph{co-N}}(5)$, $\phi_{\mathrm{co-N1}} \colonequals 24 \land 034 \land 013 \land 12 \land 15 \land 05 \land 35 \land 23 \land 02 \land 25 \land 014 \land 45$ where we write, for instance, $014$ to mean $0 \lor 1 \lor 4$. Could $\phi_{\mathrm{co-N1}}$ separate UCQ(d-DNNF) from UCQ(d-D)? \section{Introduction} \label{sec:introduction} \input{introduction} \section{Preliminaries} \label{sec:preliminaries} \input{preliminaries} \section{The $\mathcal{H}$-queries} \label{sec:hk} \input{hk} \section{Nice Boolean Functions} \label{sec:nice} \input{nice} \section{Experiments} \label{sec:experiments} \input{experiments} \section{Conclusion} \label{sec:conclusion} \input{conclusion} \bibliographystyle{splncs}
{ "redpajama_set_name": "RedPajamaArXiv" }
9,426