diff --git a/.gitattributes b/.gitattributes index 0403856f7cec16531c01dfacddcd70adc2340380..67e604ba84f866f0a30fed6706a51c1e111b0690 100644 --- a/.gitattributes +++ b/.gitattributes @@ -474,3 +474,22 @@ samples/pdfs/88513.pdf filter=lfs diff=lfs merge=lfs -text samples/pdfs/7100604.pdf filter=lfs diff=lfs merge=lfs -text samples/pdfs/6324184.pdf filter=lfs diff=lfs merge=lfs -text samples/pdfs/3594993.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/598288.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/213815.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/7642017.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/174916.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/2590883.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/503850.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/5718759.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/5396754.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/2515306.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/7100604.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/3594993.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/88513.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/3226827.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/450057.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/6535016.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/3884483.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/6324184.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/2779026.pdf filter=lfs diff=lfs merge=lfs -text +samples_new/pdfs/7569662.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/samples_new/pdfs/174916.pdf b/samples_new/pdfs/174916.pdf new file mode 100644 index 0000000000000000000000000000000000000000..67725d0846a3d794c327b0aa97ac0e96b16791e2 --- /dev/null +++ b/samples_new/pdfs/174916.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05a800cecc802eb3f82f462ddff2dd8daa0771a6ce717e3e84800e17f337561c +size 204800 diff --git a/samples_new/pdfs/213815.pdf b/samples_new/pdfs/213815.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2f05ad79fd241a68ec4fbeb86f99f59744700a3f --- /dev/null +++ b/samples_new/pdfs/213815.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06e9adeff2122523bd3938a54a4a2e9396f19a0493482d64c1aa2fa65a177ccc +size 6112738 diff --git a/samples_new/pdfs/2515306.pdf b/samples_new/pdfs/2515306.pdf new file mode 100644 index 0000000000000000000000000000000000000000..26519189f909f50d12824c26eb67f11b19562beb --- /dev/null +++ b/samples_new/pdfs/2515306.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5120455c0dc104437507c5370565044b483315cb48ada647722fdbdb4057b87c +size 764141 diff --git a/samples_new/pdfs/2590883.pdf b/samples_new/pdfs/2590883.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a7320c8adddcae99d9e4ec0f669666aeea331734 --- /dev/null +++ b/samples_new/pdfs/2590883.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5390b6be32aeb0875f7d4f522697d32dad2862eb9c51be71143a71e867957204 +size 167859 diff --git a/samples_new/pdfs/2779026.pdf b/samples_new/pdfs/2779026.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3df04adb1992017f9394755bab7856c819531706 --- /dev/null +++ b/samples_new/pdfs/2779026.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59a44297d9405d89e3fb080a6b7496229f7af203559c16c49d881bc637c42227 +size 542522 diff --git a/samples_new/pdfs/2909063.pdf b/samples_new/pdfs/2909063.pdf new file mode 100644 index 0000000000000000000000000000000000000000..016888f7c56e771fc0cb9fbce3e03bf4b65bac0b Binary files /dev/null and b/samples_new/pdfs/2909063.pdf differ diff --git a/samples_new/pdfs/3226827.pdf b/samples_new/pdfs/3226827.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e385317de094b8aeba91a7fa9a775ef6218f507a --- /dev/null +++ b/samples_new/pdfs/3226827.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:681ddeeb2a99bad4e10f704daf4da962e4edc64f6578a4dd36fe738232011ca3 +size 251483 diff --git a/samples_new/pdfs/3594993.pdf b/samples_new/pdfs/3594993.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2aaa9f0be5f5cc1bf31dd18b4aabe9898db90522 --- /dev/null +++ b/samples_new/pdfs/3594993.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74000028fd96bfc4062871bd5147aad8dc53df7ff99d18bc0fce190094e2fb85 +size 950286 diff --git a/samples_new/pdfs/3884483.pdf b/samples_new/pdfs/3884483.pdf new file mode 100644 index 0000000000000000000000000000000000000000..22aa15d562bc9988ec36cbc86a78b8ccd0d9acc6 --- /dev/null +++ b/samples_new/pdfs/3884483.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d5b428e4718f6eea9e4f339288c737ce6712885c927aa18629e1556b1cd84c8 +size 8815754 diff --git a/samples_new/pdfs/450057.pdf b/samples_new/pdfs/450057.pdf new file mode 100644 index 0000000000000000000000000000000000000000..56c7d943261daecb44d6be9677ca94fe125af933 --- /dev/null +++ b/samples_new/pdfs/450057.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e8f5cf638a50a4e41abbf7bb0bb69c31f8777bd2d596a4ef8856a3dfa5353aa +size 534024 diff --git a/samples_new/pdfs/4523932.pdf b/samples_new/pdfs/4523932.pdf new file mode 100644 index 0000000000000000000000000000000000000000..302d66a54cb3be8676e3b591771165203e147f0a Binary files /dev/null and b/samples_new/pdfs/4523932.pdf differ diff --git a/samples_new/pdfs/4808858.pdf b/samples_new/pdfs/4808858.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a89361bfb882ecc62f07d180167d314572668e17 Binary files /dev/null and b/samples_new/pdfs/4808858.pdf differ diff --git a/samples_new/pdfs/503850.pdf b/samples_new/pdfs/503850.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1fe033e1da2b9f0dfef59eb45213a3df63e211db --- /dev/null +++ b/samples_new/pdfs/503850.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10487c8ab89ccdf9e3fd5b63249bea553fed989039648635f64e7ee31bba3a2d +size 170955 diff --git a/samples_new/pdfs/5396754.pdf b/samples_new/pdfs/5396754.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1966eb6bc4835cc552b78c4c1195cae20317cdf0 --- /dev/null +++ b/samples_new/pdfs/5396754.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae2aaf9495b382ec4c0fa50a47325ecdfbf6aea22bcb9f9c2be4ee26753c78ab +size 648593 diff --git a/samples_new/pdfs/5718759.pdf b/samples_new/pdfs/5718759.pdf new file mode 100644 index 0000000000000000000000000000000000000000..327001298a82e02f7f35fb1885907f641041cef0 --- /dev/null +++ b/samples_new/pdfs/5718759.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b88daa241e1270f14bec6eeffa97f2e1d6896bbdeeeb438a292ca7a4543f25db +size 524618 diff --git a/samples_new/pdfs/598288.pdf b/samples_new/pdfs/598288.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ba2042ba846d3c9c6dddb9c93f3e5ed736a68650 --- /dev/null +++ b/samples_new/pdfs/598288.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1351ca814e16291a77eee385b3e6d0e757228d6db1e351d574672aa42e4592e7 +size 12844441 diff --git a/samples_new/pdfs/6324184.pdf b/samples_new/pdfs/6324184.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e7b9ef08bd33658046a1c9b9532f40467b40d81b --- /dev/null +++ b/samples_new/pdfs/6324184.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc163029d74c722c4e3627b1293ad1285200dab469830fcc721df7c526b6b998 +size 519591 diff --git a/samples_new/pdfs/6535016.pdf b/samples_new/pdfs/6535016.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5a2cff07f894d0ae252458acce8acf8f4797e0f1 --- /dev/null +++ b/samples_new/pdfs/6535016.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2e67da59147ff5910b0724ff8a46141d5b5ceccc1425987b8167c9d9a991679 +size 3727298 diff --git a/samples_new/pdfs/7100604.pdf b/samples_new/pdfs/7100604.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5f2f60d29642432da653460aae1a11c629b5bb24 --- /dev/null +++ b/samples_new/pdfs/7100604.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e52f1e44ce3c6cc834e106bdaf95939a40032c44f12a3fa6d088b1d7afcc3a28 +size 396778 diff --git a/samples_new/pdfs/7334540.pdf b/samples_new/pdfs/7334540.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ac7e65525a84192fabfa134bdf18e9226c9a5047 Binary files /dev/null and b/samples_new/pdfs/7334540.pdf differ diff --git a/samples_new/pdfs/7569662.pdf b/samples_new/pdfs/7569662.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3690de22f3a5261be24c7f969ba03e25de14d06d --- /dev/null +++ b/samples_new/pdfs/7569662.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4502ab7df7ec80f5adf2f232488fc92bbd4ae5bafec8ebe85abbc2d8e47eb94 +size 494074 diff --git a/samples_new/pdfs/7642017.pdf b/samples_new/pdfs/7642017.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c082399d735a94b00487ce95dcce5298752a4953 --- /dev/null +++ b/samples_new/pdfs/7642017.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6a5875ee6e30cc47f9ff423b1c5d0e057deb5112994076221e9d042c8c629f7 +size 9991213 diff --git a/samples_new/pdfs/88513.pdf b/samples_new/pdfs/88513.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fe98861c29808a9beef1ceef1e99614204feeedc --- /dev/null +++ b/samples_new/pdfs/88513.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02de63da41877b559923a4d2e6845c79c2f2d757f1ab43999c5438ea69a957e0 +size 682886 diff --git a/samples_new/pdfs/904681.pdf b/samples_new/pdfs/904681.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d5e018fa9f626b58c506c1ce9ba2be51850b16e3 Binary files /dev/null and b/samples_new/pdfs/904681.pdf differ diff --git a/samples_new/texts_merged/1117773.md b/samples_new/texts_merged/1117773.md new file mode 100644 index 0000000000000000000000000000000000000000..073a595e7e0a37e4b95166023e69c7f76492df48 --- /dev/null +++ b/samples_new/texts_merged/1117773.md @@ -0,0 +1,241 @@ + +---PAGE_BREAK--- + +Resolving electron transfer kinetics in porous electrodes via diffusion-less +cyclic voltammetry + +Shida Yang,ac Yang Li,b Qing Chen.ab* + +aDepartment of Chemistry, bDepartment of Mechanical and Aerospace Engineering, and +cThe Energy Institute, HKUST, Hong Kong. + +*Corresponding Author E-mail: chenqing@ust.hk (Qing Chen) +---PAGE_BREAK--- + +**Figure S1.** Background current on Ti foil as assembled in the cell with the active electrolyte but without the carbon felt. (a) $K_3Fe(CN)_6$, (b) $FeCl_3$, and (c) $VOSO_4$. The currents are at least two orders of magnitude lower than those measured with the carbon felt for all three cases, so no background subtraction is necessary for the analysis. +---PAGE_BREAK--- + +**Figure S2.** Electrochemical surface area measurements of the carbon felt electrode in the electrolytes of (a) $K_3Fe(CN)_6$, (b) $FeCl_3$, and (c) $VOSO_4$. We scan CV in ranges of potential with no visible Faradaic current and plot the average currents against the scan rates. The slopes are divided with a specific capacitance of 20 µF/cm² to derive the areas. +---PAGE_BREAK--- + +**Figure S3.** X-ray photoelectron spectra of different carbon felts. + +**Table S1.** O/C ratio of different carbon felts and the corresponding standard rate constants $k^0$ of VO$^{2+}$/VO$_2^+$ on these electrodes. + +
Carbon FeltC ratio/%O ratio/%O/Ck0 (cm/s)
CeTech CF020, 400 °C92.517.490.0811.56±0.15 × 10-6
SGL GFA6EA, 400 °C90.149.860.1091.642±0.072 × 10-7
SGL GFA6EA, 450 °C89.3410.660.1192.095±0.518 × 10-7
SGL GFA6EA, 500 °C88.9311.070.1242.455±0.216 × 10-8
+---PAGE_BREAK--- + +**Figure S4.** Additional results of the RFB tests. (a) Electrochemical impedance spectroscopy (EIS) and (b) IR-corrected polarization curves of VRFB with CF baked at different temperatures. + +**Table S2.** Polarization resistance of VRFB with different CF. + +
SGL CFRu/Ω cm²polarization resistance/Ω cm²corrected polarization resistance/Ω cm²
400°C0.3950.4870.092
450°C0.4210.5400.119
500°C0.4500.6640.214
+---PAGE_BREAK--- + +**Table S3.** Summary of standard rate constants *k* of VO2+/VO2+ reported in literature. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Electrodes + + Treatment + + Method + + Area + + k (cm/s) + + Ref +
+ SGL Carbon GFD4.6 + + Baked at 400 °C for 12 hrs + + Symmetrical RFB + + Electro-chemical + + 2.38×10-6 + + [1] +
+ Disk made from carbon felt (SigraCELL GFA6, SGL carbon) + + Baked at 400 °C for 30 hrs + + Linear sweep voltammetry (LSV) + + Geometric + + 1.6-8.8×10-8 + + [2] +
+ Ultra-microelectrode made from carbon felts (GrafTech) + + Electrochemical oxidation and reduction + + LSV and EIS + + Electro-chemical + + 1.7-17×10-5 + + [3] +
+ Carbon felt (Sigratherm GFA5) + + Not mentioned + + Galvanic charging / discharging + + Calculated + + 3×10-7 + + [4] +
+ Carbon felt (Liao Yang Carbon Fiber Sci-tech. Co., Ltd. China) + + None + + CV and EIS + + Geometric + + 1.84×10-3 + + [5] +
+ Carbon paper (29, SGL group) + + Baked at 450 °C for 30 hrs + + Polarization curve and EIS in a RFB + + Electro-chemical + + 0.2-1.8×10-7 + + [6] +
+ Carbon paper (10AA, SGL group) + + None + + Symmetrical RFB + + Gas adsorption + + 2.05×10-6 + + [7] +
+ Carbon paper (Shanghai Hesen, Ltd. HCP030 N) + + Electrochemical oxidation and reduction + + CV + + Gas adsorption + + 1.04×10-3 + + [8] +
+ +SI references: + +[1] M. V. Holland-Cunz, J. Friedl, U. Stimming, *J. Electroanal. Chem.* **2018**, *819*, 306-311. +---PAGE_BREAK--- + +[2] Y. Li, J. Parrondo, S. Sankarasubramanian, V. Ramani, *J. Phys. Chem. C* **2019**, *123*, 6370-6378. + +[3] M. A. Miller, A. Bourke, N. Quill, J. S. Wainright, R. P. Lynch, D. N. Buckley, R. F. Savinell, *J. Electrochem. Soc.* **2016**, *163*, A2095. + +[4] A. A. Shah, M. J. Watt-Smith, F. C. Walsh, *Electrochim. Acta* **2008**, *53*, 8087-8100. + +[5] W. Li, Z. Zhang, Y. Tang, H. Bian, T.-W. Ng, W. Zhang, C.-S. Lee, *Adv. Sci.* **2016**, *3*, 1500276. + +[6] K. V. Greco, A. Forner-Cuenca, A. Mularczyk, J. Eller, F. R. Brushett, *ACS Appl. Mater. Interfaces* **2018**, *10*, 44430-44442. + +[7] D. Aaron, C.-N. Sun, M. Bright, A. B. Papandrew, M. M. Mench, T. A. Zawodzinski, *ECS Electrochemistry Letters* **2013**, *2*, A29. + +[8] X. W. Wu, T. Yamamura, S. Ohta, Q. X. Zhang, F. C. Lv, C. M. Liu, K. Shirasaki, I. Satoh, T. Shikama, D. Lu, S. Q. Liu, *J Appl Electrochem* **2011**, *8*. \ No newline at end of file diff --git a/samples_new/texts_merged/1168240.md b/samples_new/texts_merged/1168240.md new file mode 100644 index 0000000000000000000000000000000000000000..fa795a60b47254e83b9c023bd8a69296176a765a --- /dev/null +++ b/samples_new/texts_merged/1168240.md @@ -0,0 +1,345 @@ + +---PAGE_BREAK--- + +# Approximating quadratic programming with bound constraints + +Yinyu Ye* + +Department of Management Sciences +The University of Iowa +Iowa City, Iowa 52242, U.S.A. + +March 31, 1997 + +## Abstract + +We consider the problem of approximating the global maximum of a quadratic program (QP) with $n$ variables subject to bound constraints. Based on the results of Goemans and Williamson [4] and Nesterov [6], we show that a $4/7$ approximate solution can be obtained in polynomial time. + +**Key words.** Quadratic programming, global maximizer, approximation algorithm + +*This author is supported in part by NSF grant DMI-9522507. +---PAGE_BREAK--- + +# 1 Introduction + +Consider the quadratic programming (QP) problem + +$$ +\begin{array}{ll} +\text{(QP)} & q(Q) := \text{Maximize} \quad q(x) := x^T Q x \\ +& \text{Subject to} \quad -e \leq x \leq e, +\end{array} +$$ + +where $Q \in \mathbb{R}^{n \times n}$ is given and $e \in \mathbb{R}^n$ is the vector of all ones. Let $x = x(Q)$ be a maximizer of the problem. In this paper, without loss of generality, we assume that $x \neq 0$. + +Normally, there is a linear term in the objective function: $q(x) = x^T Q x + c^T x$. However, the problem can be homogenized as + +$$ +\begin{array}{ll} +\text{Maximize} & q(x) := x^T Q x + tc^T x \\ +\text{Subject to} & -e \leq x \leq e, \quad -1 \leq t \leq 1 +\end{array} +$$ + +by adding a scalar variable $t$. There always is an optimal solution $(x, t)$ for this problem in which $t=1$ or $t=-1$. If $t=1$, then $x$ is also optimal for the non-homogeneous problem; if $t=-1$, then $-x$ is optimal for the non-homogeneous problem. Thus, without loss of generality, we can let $q(x) = x^T Q x$ throughout this paper. + +The function $q(x)$ has a minimizer and a maximizer over the bounded feasible set $-e \leq x \leq e$. Let $\underline{q} := -q(-Q)$ and $q := q(Q)$ denote their minimal and maximal objective values, respectively. An $\epsilon$-maximal solution or $\epsilon$-maximizer, $\epsilon \in [0, 1]$, for (QP) is defined as an $-e \leq x \leq e$ such that + +$$ \frac{\underline{q} - q(x)}{\underline{q} - q} \leq \epsilon. $$ + +Note that according to this definition any feasible solution $x$ is a 1-maximizer. + +Recently, there were several significant results on approximating specific quadratic problems. Goemans and Williamson [4] proved an approximation result for the Maxcut problem where $\epsilon \le 1 - 0.878$. Nesterov [6] generalized their result to approximating a boolean QP problem + +$$ +\begin{array}{ll} +\text{Maximize} & q(x) = x^T Q x \\ +\text{Subject to} & |x_j| = 1, \ j = 1, \dots, n. +\end{array} +$$ + +where $\epsilon \le 4/7$. Some negative results were given by Bellare and Rogaway [1]. + +There are also several approximation algorithms developed for approximating (QP) when the feasible set is a convex polytope. Pardalos and Rosen [8] developed a partitioning and linear programming based algorithm with an approximation bound $\epsilon = \epsilon(Q)$, where $\epsilon(Q)$, a function of the QP data, is less than 1. Vavasis [10] and Ye [11] developed a polynomial-time algorithm, based on solving a ball-constrained quadratic problem, to compute an $(1 - \frac{1}{n^2})$-maximal solution. When +---PAGE_BREAK--- + +the polytope is {$x: -e \le x \le e$}, Fu, Luo and Ye [2] further proved a $(1-\frac{1}{n})$ polynomial-time algorithm. + +In this note, we extend Goemans and Williamson and Nesterov's result to approximating (QP). We establish the same 4/7 result for approximating this problem. This result is based on a modification of Goemans and Williamson's algorithm and a generalization of Nesterov's proving technique. + +## 2 Positive Semi-Definite Relaxation + +The approximation algorithm for (QP) is to solve a positive semi-definite programming (SDP) relaxation problem + +$$ +\begin{array}{l@{\quad}c@{\quad}l} +\text{(SDP)} & \mathcal{s}(Q) := & \underset{\mathbf{X}}{\text{Maximize}} \quad \langle \mathbf{Q}, \mathbf{X} \rangle \\ +& & \text{Subject to} \quad d(\mathbf{X}) \le e, \mathbf{X} \succeq \mathbf{0}. +\end{array} +\tag{1} +$$ + +Here, $X \in \Re^{n \times n}$ is a symmetric matrix, $\langle \cdot, \cdot \rangle$ is the matrix inner product $\langle Q, X \rangle = \operatorname{trace}(QX)$, $d(X)$ is a vector containing the diagonal components of $X$, and $X \succeq Z$ means that $X - Z$ is positive semi-definite. + +The dual of the problem is + +$$ +\begin{array}{l@{\quad}c@{\quad}l} +\mathcal{s}(\mathbf{Q}) = & \text{Minimize} & e^T y \\ +\text{Subject to} & D(y) & \succeq Q, y \ge 0, +\end{array} +\tag{2} +$$ + +where $D(y)$ is the diagonal matrix such that $d(D(y)) = y \in \Re^n$. Denote by $X(Q)$ and $y(Q)$ an optimal solution pair for the primal (1) and dual (2). + +The positive semi-definite relaxation was first proposed by Lovász and Shrijver [5], also see recent papers by Fujie and Kojima [3] and Polijak, Rendl and Wolkowicz [9]. This relaxation problem can be solved in polynomial time, e.g., see Nesterov and Nemirovskii [7]. + +We have the following relations between (QP) and (SDP). + +**Proposition 1** Let $q = q(Q), \underline{q} = -q(-Q), s = s(Q), \underline{s} = -s(-Q), \text{ and } \underline{y} = -y(Q)$. Then, + +1. $\underline{q}$ is the minimal objective value of $x^T Q x$ in the feasible set of (QP); + +2. $\underline{s} = e^T \underline{y}$ and it is the minimal objective value of $\langle Q, X \rangle$ in the feasible set of (SDP); + +3. + +$$ \underline{s} = -s(-Q) \le \underline{q} = -q(-Q) \le q(Q) = q \le s(Q) = s. $$ +---PAGE_BREAK--- + +**Proof.** The first and second statements are straightforward to verify. Let $X = x(Q)x(Q)^T \in \mathbb{R}^{n \times n}$. +Then $X \succeq 0$, $d(X) \le e$ and $\langle Q, X \rangle = q(x(Q)) = q(Q)$. Thus, we have $q(Q) = \langle Q, X \rangle \le s(Q)$. +Similarly, we can prove $q(-Q) \le s(-Q)$, or $-s(-Q) \le -q(-Q)$. ■ + +In what follows, we also let $x = x(Q)$, $X = X(Q)$. Since $X$ is positive semi-definite, there is a factorization matrix $V = (v_1, \dots, v_n) \in \mathbb{R}^{n \times n}$, i.e., $v_j$ is the $j$th column of $V$, such that $X = V^T V$. +The algorithm, similar to Goemans and Williamson [4], generates a random vector $u$ uniformly distributed on an $n$-dimensional unit ball and then assigns + +$$ \hat{x} = D\sigma(V^Tu), \quad (3) $$ + +where + +$$ D = \operatorname{diag}(\|v_1\|, \dots, \|v_n\|) = \operatorname{diag}(\sqrt{x_{11}}, \dots, \sqrt{x_{nn}}), $$ + +and for any $x \in \mathbb{R}^n$, $\sigma(x)$ is the vector whose components are $\operatorname{sign}(x_j)$, $j = 1, \dots, n$, that is, +$\operatorname{sign}(x_j) = 1$ if $x_j \ge 0$ and $\operatorname{sign}(x_j) = -1$ otherwise. + +It is easily seen that $\hat{x}$ is a feasible point for (QP) and we will show later that the expected objective value, $E_u q(\hat{x})$, satisfies + +$$ \frac{q - E_u q(\hat{x})}{q - \underline{q}} \le \frac{\pi}{2} - 1 \le \frac{4}{7}. $$ + +# 3 Approximation Analysis + +The following two lemmas are analogues to Lemmas 1 and 2 of Nesterov [6]. + +**Lemma 1** + +$$ +\begin{array}{l@{\quad}l} +\text{Maximize} & \sigma(V^T u)^T D Q D \sigma(V^T u) \\ +\text{Subject to} & \|v_j\| \le 1, \quad j = 1, \dots, n, \quad \|u\| = 1, \\ +\text{where} & D = \operatorname{diag}(\|v_1\|, \dots, \|v_n\|). +\end{array} + $$ + +**Proof.** Since $D\sigma(V^Tu)$ is a feasible point for (QP) for any feasible $V$ and $u$, we have + +$$ q(Q) \geq \sigma(V^T u)^T D Q D \sigma(V^T u). $$ + +On the other hand, for any fixed $u$ with $\|u\| = 1$, we let $v_j = x_j u$, $j = 1, \dots, n$. Then $D\sigma(V^Tu) = x$. +Thus, for a particular feasible $V$ and $u$ we have + +$$ q(Q) = q(x) \leq \sigma(V^T u)^T D Q D \sigma(V^T u). $$ + +These two give the desired result. ■ +---PAGE_BREAK--- + +**Lemma 2** + +$$ +\begin{array}{ll} +q(Q) = & \text{Maximize} \quad \mathbb{E}_u(\sigma(V^T u)^T D Q D \sigma(V^T u)) \\ +& \text{Subject to} \quad \|v_j\| \le 1, j = 1, \dots, n, \\ +\text{where} & \\ +& D = \text{diag}(\|v_1\|, \dots, \|v_n\|). +\end{array} +$$ + +**Proof.** Again, since $D\sigma(V^T u)$ is a feasible point for (QP), we have for any feasible $V$ + +$$ +q(Q) \geq \mathbb{E}_u (\sigma(V^T u)^T D Q D \sigma(V^T u)). +$$ + +On the other hand, for any fixed $u$ with $\|u\| = 1$, we have + +$$ +\mathbb{E}_u (\sigma(V^T u)^T D Q D \sigma(V^T u)) = \sum_{i=1}^{n} \sum_{j=1}^{n} q_{ij} \|v_i\| \|v_j\| \mathbb{E}_u (\sigma(v_i^T u) \sigma(v_j^T u)). \quad (4) +$$ + +Let us choose $v_i = \frac{\bar{x}_i}{\|\bar{x}\|} x$, $i = 1, \dots, n$. Then + +$$ +\mathbb{E}_u(\sigma(v_i^T u)\sigma(v_j^T u)) = \begin{cases} 1 & \text{if } \sigma(x_i) = \sigma(x_j) \\ -1 & \text{otherwise.} \end{cases} +$$ + +Thus, + +$$ +\|v_i\| \|v_j\| \mathbb{E}_u (\sigma(v_i^T u) \sigma(v_j^T u)) = x_i x_j +$$ + +which implies that for a particular feasible V + +$$ +q(Q) = q(x) \leq \mathbb{E}_u (\sigma(V^T u)^T D Q D \sigma(V^T u)). +$$ + +These two give the desired result. ■ + +For any function of one variable $f(t)$ and $X \in \mathbb{R}^{n \times n}$, let $f[X] \in \mathbb{R}^{n \times n}$ be the matrix with the components $f(x_{ij})$. For example, $[X]^p$ denotes a matrix with the components $x_{ij}^p$. Nesterov [6] has also proved the next technical lemma. + +**Lemma 3** Let $X \succeq 0$ and $d(X) \le 1$. Then $\arcsin[X] \succeq X$. ■ + +Now we are ready to prove the following theorem. + +**Theorem 1** + +$$ +\begin{array}{ll} +q(Q) = & \text{Supremum} \quad \frac{2}{\pi} \langle Q, D \arcsin[D^{\top} X D^{-1}] D \rangle \\ +& \text{Subject to} \quad d(X) \le e, X > 0, +\end{array} +$$ + +where + +$$ +D = \operatorname{diag}(\sqrt{x_{11}}, \ldots, \sqrt{x_{nn}}). +$$ +---PAGE_BREAK--- + +**Proof.** For any $X = V^T V > 0$, $d(X) \le e$, we have + +$$E_u(\sigma(v_i^T u)\sigma(v_j^T u)) = 1 - 2\text{Pr}\{\sigma(v_i^T u) \neq \sigma(v_j^T u)\} = 1 - 2\text{Pr}\{\sigma(\frac{v_i^T u}{\|v_i\|}) \neq \sigma(\frac{v_j^T u}{\|v_j\|})\}.$$ + +From Lemma 1.2 of Goemans and Williamson [4], we have + +$$\mathrm{Pr}\{\sigma(\frac{v_i^T u}{\|v_i\|}) \neq \sigma(\frac{v_j^T u}{\|v_j\|})\} = \frac{1}{\pi} \arccos(\frac{v_i^T v_j}{\|v_i\|\|v_j\|}).$$ + +Using the above lemma and equality (4) and noting $\arcsin(t)+\arccos(t) = \frac{\pi}{2}$ give the desired result. + +Theorem 1 leads us to + +**Theorem 2** We have + +1. + +$$q - s \geq \frac{2}{\pi}(s - s).$$ + +2. + +$$s - q \geq \frac{2}{\pi}(s - s).$$ + +3. + +$$s - s \geq q - q \geq \frac{4 - \pi}{\pi}(s - s).$$ + +**Proof.** Recall $y = -y(-Q) \le 0$, $s = -s(-Q) = e^T y$, and $Q - D(y) \ge 0$. Thus, for any $X > 0$, $d(X) \le e$ and $D = \operatorname{diag}(\sqrt{x_{11}}, \dots, \sqrt{x_{nn}})$, we have from Theorem 1 + +$$ +\begin{align*} +q = q(Q) &\ge \frac{2}{\pi} \langle Q, D \arcsin[D^T X D]^T D \rangle \\ +&= \frac{2}{\pi} \langle Q - D(y) + D(y), D \arcsin[D^T X D]^T D \rangle \\ +&= \frac{2}{\pi} \left( \langle Q - D(y), D \arcsin[D^T X D]^T D \rangle + \langle D(y), D \arcsin[D^T X D]^T D \rangle \right) \\ +&\ge \frac{2}{\pi} \left( \langle Q - D(y), D D^T X D^T D \rangle + \langle D(y), D \arcsin[D^T X D^T D] \rangle \right) \\ +&\quad (\text{since } Q - D(y) \ge 0 \text{ and } \arcsin[D^T X D]^T D \ge D^T X D^T) \\ +&= \frac{2}{\pi} \left( \langle Q - D(y), X \rangle + \langle D(y), D \arcsin[D^T X D]^T D \rangle \right) \\ +&= \frac{2}{\pi} \left( \langle Q, X \rangle - \langle D(y), X \rangle + \langle D(y), D \arcsin[D^T X D]^T D \rangle \right) \\ +&= \frac{2}{\pi} \left( \langle Q, X \rangle - y^T d(X) + y^T d(D \arcsin[D^T X D]^T D) \right) +\end{align*} +$$ +---PAGE_BREAK--- + +$$ +\begin{align*} +&= \frac{2}{\pi} \left( \langle Q, X \rangle - \underline{y}^T d(X) + \overline{y}^T \left( \frac{\pi}{2} d(X) \right) \right) \\ +&= \frac{2}{\pi} \left( \langle Q, X \rangle + \left(\frac{\pi}{2} - 1\right) \underline{y}^T d(X) \right) \\ +&\geq \frac{2}{\pi} \left( \langle Q, X \rangle + \left(\frac{\pi}{2} - 1\right) \overline{y}^T e \rangle \right) \\ +&\quad (\text{since } 0 \le d(X) \le e \text{ and } \underline{y} \le 0) \\ +&= \frac{2}{\pi} \left( \langle Q, X \rangle + \left(\frac{\pi}{2} - 1\right) \underline{s} \right). +\end{align*} +$$ + +Let $X$ converge to $X$, then $\langle Q, X \rangle \to s$ and we have the desired first inequality. + +Replacing $Q$ with $-Q$ proves the second inequality in the theorem. + +Adding the first two inequalities gives the third statement in the theorem. ■ + +The result indicates that the positive semi-definite relaxation value $s - \underline{s}$ is a constant approximation of $q - \underline{q}$. + +The following corollary can be derived from the proof of the above theorem. + +**Corollary 1** Let $X = V^T V > 0$, $d(X) \le e$, $D = \operatorname{diag}(\sqrt{x_{11}}, \dots, \sqrt{x_{nn}})$, and $\hat{x} = D\sigma(V^T u)$ where $u$ with $\|u\| = 1$ is a random vector uniformly distributed on the unit ball. Moreover, let $X \to X$. Then, + +$$ +\lim_{X \succcurlyeq \bar{X}} E_u(q(\hat{x})) = \lim_{X \succcurlyeq \bar{X}} \frac{2}{\pi} \langle Q, D \arcsin[D^{-1}XD^{-1}]D \rangle \geq \frac{2}{\pi}s + (1 - \frac{2}{\pi})\underline{s}. +$$ + +Finally, we have + +**Theorem 3** Let $\hat{x}$ be generated above from $X = X$. Then + +$$ +\frac{q - E_u q(\hat{x})}{q - \underline{q}} \leq \frac{\pi}{2} - 1. +$$ + +**Proof.** Noting that + +$$ +s \ge q \ge \frac{2}{\pi}s + (1-\frac{2}{\pi})s \ge (1-\frac{2}{\pi})s + \frac{2}{\pi}\underline{s} \ge \underline{q} \ge \underline{s} +$$ + +we have + +$$ +\begin{align*} +\frac{q - E_u q(\hat{x})}{q - \underline{q}} &\le \frac{q - \frac{2}{\pi}s - (1 - \frac{2}{\pi})s}{q - \underline{q}} \\ +&\le \frac{q - \frac{2}{\pi}s - (1 - \frac{2}{\pi})s}{q - (1 - \frac{2}{\pi})s - \frac{2}{\pi}\underline{s}} \\ +&\le \frac{s - \frac{2}{\pi}s - (1 - \frac{2}{\pi})s}{s - (1 - \frac{2}{\pi})s - \frac{2}{\pi}\underline{s}} +\end{align*} +$$ +---PAGE_BREAK--- + +$$ +\begin{aligned} +&= \frac{(1 - \frac{2}{\pi})(s - s)}{\frac{2}{\pi}(s - s)} \\ +&= \frac{(1 - \frac{2}{\pi})}{\frac{2}{\pi}} = \frac{\pi}{2} - 1. +\end{aligned} +$$ + +References + +[1] M. Bellare and P. Rogaway, "The complexity of approximating a nonlinear program," *Mathematical Programming* 69 (1995) 429-442. + +[2] M. Fu, Z.-Q. Luo and Y. Ye, "Approximation algorithms for quadratic programming," manuscript, Department of Electrical and Computer Engineering, McMaster University, Hamilton, Ontario, CANADA L8S 4K1, 1996. + +[3] T. Fujie and M. Kojima, "Semidefinite programming relaxation for nonconvex quadratic programs," Research Report B-298, Dept. of Mathematical and Computing Sciences, Tokyo Institute of Technology, Meguro, Tokyo 152, May 1995. To appear in *Journal of Global Optimization*. + +[4] M. X. Goemans and D. P. Williamson, "Improved approximation algorithms for Maximum Cut and Satisfiability problems using semidefinite programming," *Journal of ACM* 42 (1995) 1115-1145. + +[5] L. Lovász and A. Shrijver, "Cones of matrices and setfunctions, and 0-1 optimization," *SIAM Journal on Optimization* 1 (1990) 166-190. + +[6] Yu. E. Nesterov, "Quality of semidefinite relaxation for nonconvex quadratic optimization," CORE Discussion Paper, #9719, Belgium, March 1997. + +[7] Yu. E. Nesterov and A. S. Nemirovskii, *Interior Point Polynomial Methods in Convex Programming: Theory and Algorithms* (SIAM Publications, SIAM, Philadelphia, 1993). + +[8] P. M. Pardalos and J. B. Rosen, *Constrained Global Optimization: Algorithms and Applications* (Springer-Verlag, Lecture Notes in Computer Sciences 268, 1987). + +[9] S. Polijak, F. Rendl and H. Wolkowicz, "A recipe for semidefinite relaxation for 0-1 quadratic programming," *Journal of Global Optimization* 7 (1995) 51-73. + +[10] S. A. Vavasis, *Nonlinear Optimization: Complexity Issues* (Oxford Science, New York, 1991). + +[11] Y. Ye, "On affine scaling algorithms for nonconvex quadratic programming," *Mathematical Programming* 56 (1992) 285-300. \ No newline at end of file diff --git a/samples_new/texts_merged/1772599.md b/samples_new/texts_merged/1772599.md new file mode 100644 index 0000000000000000000000000000000000000000..ee0fe8cf096daa7e30a00e24681021645feb3b46 --- /dev/null +++ b/samples_new/texts_merged/1772599.md @@ -0,0 +1,1063 @@ + +---PAGE_BREAK--- + +Stability Properties of Linear File-Sharing Networks + +L. Leskelä, Philippe Robert, Florian Simatos + +► To cite this version: + +L. Leskelä, Philippe Robert, Florian Simatos. Stability Properties of Linear File-Sharing Networks. +2009. inria-00401104 + +HAL Id: inria-00401104 + +https://hal.inria.fr/inria-00401104 + +Preprint submitted on 2 Jul 2009 + +**HAL** is a multi-disciplinary open access +archive for the deposit and dissemination of sci- +entific research documents, whether they are pub- +lished or not. The documents may come from +teaching and research institutions in France or +abroad, or from public or private research centers. + +L'archive ouverte pluridisciplinaire **HAL**, est +destinée au dépôt et à la diffusion de documents +scientifiques de niveau recherche, publiés ou non, +émanant des établissements d'enseignement et de +recherche français ou étrangers, des laboratoires +publics ou privés. +---PAGE_BREAK--- + +# STABILITY PROPERTIES OF LINEAR FILE-SHARING NETWORKS + +LASSE LESKELÄ, PHILIPPE ROBERT, AND FLORIAN SIMATOS + +**ABSTRACT.** File-sharing networks are distributed systems used to disseminate files among a subset of the nodes of the Internet. A file is split into several pieces called chunks, the general simple principle is that once a node of the system has retrieved a chunk, it may become a server for this chunk. A stochastic model is considered for arrival times and durations of time to download chunks. One investigates the maximal arrival rate that such a network can accommodate, i.e., the conditions under which the Markov process describing this network is ergodic. Technical estimates related to the survival of interacting branching processes are key ingredients to establish the stability of these systems. Several cases are considered: networks with one and two chunks where a complete classification is obtained and several cases of a network with *n* chunks. + +## CONTENTS + +1. Introduction 1 + +2. Analysis of the Single-Chunk Network 4 + +3. Yule Processes with Deletions 9 + +4. Analysis of the Multi-Chunk Network 15 + +Appendix A. Proof of Proposition 3.3 21 + +References 24 + +## 1. INTRODUCTION + +File-sharing networks are distributed systems used to disseminate information among a subset of the nodes of the Internet (overlay network). The general simple principle is the following: once a node of the system has retrieved a file it becomes a server for this file. The advantage of this scheme is that it disseminates information in a very efficient way as long as the number of servers is growing rapidly. The growth of the number of servers is not necessarily without bounds since a node having this file may stop being a server after some time. These schemes have been used for some time now in peer-to-peer systems such as BitTorrent or Emule, for example to distribute large files over the Internet. + +An improved version of this principle consists in splitting the original file into several pieces (called “chunks”) so that a given node can retrieve simultaneously several chunks of the same file from different servers. In this case, the rate to get a given file may thus increase significantly. At the same time, the global capacity of + +*Date:* July 2, 2009. + +*Key words and phrases.* Peer-to-Peer Algorithms; Killed Branching Processes; + +Work partially supported by SCALP Project funded by EEC Network of Excellence Euro-FGI, and the Academy of Finland. +---PAGE_BREAK--- + +the file-sharing system is also increased since a node becomes a server of a chunk as soon as it has retrieved it and not only when it has the whole file. This improvement has interesting algorithmic implications since each node has to establish a matching between chunks and servers. Strategies to maximize the global efficiency of the file sharing systems have to be devised. See for instance Massoulié and Vojnović [12], Bonald et al. [4] and Massoulié and Twigg [11]. + +The efficiency of these systems can be considered from different points of view. + +**Transient behavior:** A new file is owned by one node, given there are potentially *N* other nodes interested by it, how long does it take so that a given node retrieves it ? significant fraction $\alpha \in (0, 1]$ of the *N* nodes retrieve it ? See Yang and de Veciana [26] and Simatos et al. [22]. See also Robert and Simatos [19]. + +**Stationary behavior:** A constant flow of requests enters, is the capacity of the file-sharing system sufficient to cope with this flow ? + +In this paper, the stationary behavior is investigated in a stochastic context: arrival times are random as well as chunk transmission times. In this setting mathematical studies are quite scarce, see Qiu and Srikant [17], Simatos et al. [22], Susitaival et al. [24] and references therein. A simple strategy to disseminate chunks is considered: chunks are retrieved sequentially and a given node can be the server of only the last chunk it got. See Massoulié and Vojnović [12] and Parvez et al. [16] for a detailed motivation of this situation. + +In this paper, the sequential scheme for disseminating a file that is divided into +n chunks is analyzed. New requests arrive according to a Poisson process at rate $\lambda$, +and become downloaders of chunk 1. Users who have obtained chunks 1,...,k act +simultaneously as uploaders of chunk k and downloaders of chunk k + 1, and the +users who have all the chunks leave the network at rate $\nu$. The transmission rate +of chunk k is denoted by $\mu_k$, and $x_k$ is the number of users having obtained chunks +1,...,k. In this way, the total transmission rate of chunk k in the network is $\mu_k x_k$. +The flow of users can be modeled as the linear network depicted in Figure 1. + +FIGURE 1. Transition rates of the linear network outside boundaries. + +The main problem analyzed in the paper is the determination of a constant $\lambda^*$ such that if $\lambda < \lambda^*$ [resp. $\lambda > \lambda^*$], then the associated Markov process is ergodic [resp. transient]. As it will be seen, the constant $\lambda^*$ may be infinite in some cases so that the file-sharing network is always stable independently of the value of $\lambda$. The main technical difficulty to prove stability/instability results for this class of stochastic networks is that, except for the input, the Markov process has unbounded jump rates, in fact proportional to one of the coordinates of the current state. Note that loss networks have also this characteristic but in this case, the stability problem is trivial since the state space is finite. See Kelly [8]. + +**Fluid Limits for File-Sharing Networks.** Classically, to analyze the stability properties of stochastic networks, one can use the limits of a scaling of the Markov +---PAGE_BREAK--- + +process, the so-called fluid limits. The scaling consists in speeding up time by +the norm $\|x\|$ of the initial state $x$, by scaling the state vector by $1/\|x\|$ and by +letting $\|x\|$ go to infinity. See Bramson [5], Chen and Yao [6] and Robert [18] for +example. This scaling is, however, better suited to "locally additive" processes, that +is, Markov processes that behave locally as random walks. Since the transition rates +are unbounded, it may occur that the corresponding fluid limits have discontinuities; +this complicates a lot the analysis of a possible limiting dynamical system. Roughly +speaking, this is due to the fact that, because of the unbounded transition rates, +events occur on the time scale $t \mapsto t \log \|x\|$ instead of $t \mapsto \|x\|t$. See the case of +the $M/M/\infty$ queue in Chapter 9 of Robert [18], and Simatos and Tibi [23] for a +discussion of this phenomenon in a related context. + +A "fluid scaling" is nevertheless available for file-sharing networks. A possible description for a possible candidate $(x_i(t))$ for this limiting picture would satisfy the following differential equations, + +$$ (1) \qquad \begin{cases} \dot{x}_0(t) = \lambda - \mu_1 x_1(t), \\ \dot{x}_i(t) = \mu_i x_i(t) - \mu_{i+1} x_{i+1}(t), & 1 \le i \le n-1, \\ \dot{x}_n(t) = \mu_n x_n(t) - \nu x_n(t). \end{cases} $$ + +For the sake of simplicity the behavior at the boundaries {$x : x_i = 0$}, $i \ge 1$ is +ignored in the above equations. This has been, up to now, one of the main tools to +investigate mathematical models of file-sharing networks. See Qiu and Srikant [17], +Núñez-Queija and Prabhu [15] for example. In the context of loss networks, an +analogous limiting picture can be rigorously justified when the input rates and +buffer sizes are scaled by some $N$ and the state variable by $1/N$. This scaling is not +useful here, since the problem is precisely of determining the values of $\lambda$ for which +the associated Markov is ergodic whereas in the above scaling $\lambda$ is scaled. From +this point of view Equations (1) are therefore quite informal. They can nevertheless +give some insight into the qualitative behavior of these networks but they cannot +apparently be used to prove stability results. Their interpretation near boundaries +is in particular not clear. + +**Interacting Branching Processes.** Since scaling techniques do not apply here, one needs to resort to different techniques to study stability: coupling the linear file-sharing network with interacting branching processes is a key idea. For $i \ge 1$, without the departures the process $(X_i(t))$ would be a branching process where individuals give birth to one child at rate $\mu_i$. This description of such a file-sharing system as a branching process is quite natural. It has been used to analyze the transient behavior of these systems. See Yang and de Veciana [26], Dang *et al.* [7] and Simatos *et al.* [22]. A departure for $(X_i(t))$ can be seen as a death of an individual of class *i* and at the same time as a birth of an individual of class *i*+1. The file-sharing network can thus be described as a system of interacting branching processes with a constant input rate $\lambda$. + +To tackle the general problem of stability, several key ingredients are used in +this paper: Lyapunov functions, coupling arguments and precise estimations of +the growth of a branching process killed by another branching process. As it will +be seen, several results used come from the branching process formulation of the +stochastic model. In particular Section 3 is devoted to the derivation of results +concerning killed branching processes. The stability properties of networks with +---PAGE_BREAK--- + +a single-chunk file are analyzed in detail in Section 2. In Section 4, file-sharing networks with $n$ chunks are studied and the case $n = 2$ is investigated thoroughly. + +**Acknowledgements.** + +This paper has benefited from various interesting discussions with S. Borst, I. Norros, R. Núñez-Queija, B.J. Prabhu, and H. Reittu. + +## 2. ANALYSIS OF THE SINGLE-CHUNK NETWORK + +This section is devoted to the study of a class of two-dimensional Markov jump processes $(X_0(t), X_1(t))$, the corresponding Q-matrix $\Omega_r$ is given, for $x = (x_0, x_1) \in \mathbb{N}^2$, by + +$$ (2) \quad \begin{cases} \Omega_r[(x_0, x_1), (x_0 + 1, x_1)] = \lambda, \\ \Omega_r[(x_0, x_1), (x_0 - 1, x_1 + 1)] = \mu r(x_0, x_1) (x_1 \lor 1) \mathbf{1}_{\{x_0>0\}}, \\ \Omega_r[(x_0, x_1), (x_0, x_1 - 1)] = \nu x_1, \end{cases} $$ + +where $x \mapsto r(x)$, referred to as the *rate function*, is some fixed function on $\mathbb{N}^2$ with values in $[0, 1]$ and $n \lor m$ denotes $\max(n, m)$ for $n, m \in \mathbb{N}^2$. This corresponds to a more general model than the linear file-sharing network of Figure 1 in the case $n=1$, where for the sake of simplicity $\mu_1$ is noted $\mu$ in this section. + +From a modeling perspective, this Markov process describes the following system. Requests for a single file arrive with rate $\lambda$, the first component $X_0(t)$ is the number of requests which did not get the file, whereas the second component is the number of requests having the file and acting as servers until they leave the file-sharing network. The constant $\mu$ can be viewed as the file transmission rate, and $\nu$ as the rate at which servers having all chunks leave. The term $r(x_0, x_1)$ describes the interaction of downloaders and uploaders in the system. The term $x_1 \lor 1$ can be interpreted so that there is one server permanent server in the network, which is contacted if there are no other上传er nodes in the system. A related system where there is always one permanent server for the file can be modeled by replacing the term $x_1 \lor 1$ by $x_1 + 1$. See the remark at the end of this section. + +Several related examples of this class of models have been recently investigated. The case + +$$ r(x_0, x_1) = \frac{x_0}{x_0 + x_1} $$ + +is considered in Núñez-Queija and Prabhu [15] and Massoulié and Vojnović [12]; in this case the downloading time of the file is neglected. Susitaival et al. [24] analyzes the rate function $r(x)$ + +$$ r(x_0, x_1) = 1 \wedge \left( \alpha \frac{x_0}{x_1} \right) $$ + +with $\alpha > 0$ and $a \land b$ denotes $\min(a, b)$ for $a, b \in \mathbb{R}$. This model allows to take into account that a request cannot be served by more than one server. See also Qiu and Srikant [17]. + +With a slight abuse of notation, for $0 < \delta \le 1$, the matrix $\Omega_\delta$ will refer to the case when the function $r$ is identically equal to $\delta$. Note that the boundary condition $x_1 \lor 1$ for departures from the first queue prevents the second coordinate from ending up in the absorbing state 0. Other possibilities are discussed at the end of this section. In the following $(X^r(t)) = (X_0^r(t), X_1^r(t))$ [resp. $(X^\delta(t))$] will denote a Markov process with Q-matrix $\Omega_r$ [resp. $\Omega_\delta$]. +---PAGE_BREAK--- + +**Free Process.** For $\delta > 0$, $Q_\delta$ denotes the following $Q$-matrix + +$$ (3) \qquad \begin{cases} Q_\delta[(y_0, y_1), (y_0 + 1, y_1)] = \lambda, \\ Q_\delta[(y_0, y_1), (y_0 - 1, y_1 + 1)] = \mu\delta(y_1 \vee 1), \\ Q_\delta[(y_0, y_1), (y_0, y_1 - 1)] = \nu y_1. \end{cases} $$ + +The process $(Y^\delta(t)) = (Y_0^\delta(t), Y_1^\delta(t))$, referred to as the free process, will denote a Markov process with $Q$-matrix $Q_\delta$. Note that the first coordinate $Y_0^\delta$ may become negative. The second coordinate $(Y_1^\delta(t))$ of the free process is a classical birth-and-death process. It is easily checked that if $\rho_\delta$ defined as $\delta\mu/\nu$ is such that $\rho_\delta < 1$, then $(Y_1^\delta(t))$ is an ergodic Markov process converging in distribution to $Y_1^\delta(\infty)$ and that + +$$ (4) \quad \lambda^*(\delta) \stackrel{\text{def.}}{=} \nu \mathbb{E}(Y_1^\delta(\infty)) = \mu \mathbb{E}(Y_1^\delta(\infty) \vee 1) = \frac{\delta \mu}{(1 - \rho_\delta)(1 - \log(1 - \rho_\delta))}. $$ + +When $\rho_\delta > 1$, then the process $(Y^\delta(t))$ converges almost surely to infinity. In the sequel $\lambda^*(1)$ is simply denoted $\lambda^*$. + +In the following it will be assumed, Condition (C) below, that the rate function $r$ converges to 1 as the first coordinate goes to infinity; as will be seen, the special case $r \equiv 1$ then plays a special role, and so before analyzing the stability properties of $(X^r(t))$, one begins with an informal discussion when the rate function $r$ is identically equal to 1. Since the departure rate from the system is proportional to the number of requests/servers in the second queue, a large number of servers in the second queue gives a high departure rate, irrespectively of the state of the first queue. The input rate of new requests being constant, the real bottleneck with respect to stability is therefore when the first queue is large. The interaction of the two processes $(X_0^1(t))$ and $(X_1^1(t))$ is expressed through the indicator function of the set $\{X_0^1(t) > 0\}$. The second queue $(X_1^1(t))$ locally behaves like the birth-and-death process $(Y_1^1(t))$ as long as $(X_0^1(t))$ is away from 0. The two cases $\rho_1 > 1$ and $\rho_1 < 1$ are considered. + +If $\rho_1 > 1$, i.e., $\mu > \nu$, the process $(X_1^1(t))$ is a transient process as long as the first coordinate is non-zero. Consequently, departures from the second queue occur faster and faster. Since, on the other hand, arrivals occur at a steady rate, departures eventually outpace arrivals. The fact that the second queue grows when $(X_0(t))$ is away from 0 stabilizes the system independently of the value of $\lambda$, and so the system should be stable for any $\lambda > 0$. + +If $\rho_1 < 1$, and as long as $(X_0(t))$ is away from 0, the coordinate $(X_1^1(t))$ locally behaves like the ergodic Markov process $(Y_1^1(t))$. Hence if $(X_0^1(t))$ is non-zero for long enough, the requests in the first queue see in average $\mathbb{E}(Y_1^1(\infty) \vee 1)$ servers which work at rate $\mu$. Therefore, the stability condition for the first queue should be + +$$ \lambda < \mu \mathbb{E}(Y_1^1(\infty) \vee 1) = \lambda^* $$ + +where $\lambda^* = \lambda^*(1)$ is defined by Equation (4). Otherwise if $\lambda > \lambda^*$, the system should be unstable. + +**Markovian Notations.** In the following, one will use the following convention, if $(U(t))$ is a Markov process, the index $u$ of $\mathbb{P}_u((U(t)) \in \cdot)$ will refer to the initial condition of this Markov process. +---PAGE_BREAK--- + +**Transience and Recurrence Criteria for $(X^r(t))$.** + +**Proposition 2.1 (Coupling).** If $X^r(0) = Y^1(0) \in \mathbb{N}^2$, there exists a coupling of the processes $(X^r(t))$ and $(Y^1(t))$ such that the relation + +$$ (5) \qquad X_0^r(t) \ge Y_0^1(t) \text{ and } X_1^r(t) \le Y_1^1(t), $$ + +holds for all $t \ge 0$ and for any sample path. + +For any $0 \le \delta \le 1$, if + +$$ \tau_{\delta} = \inf\{t \ge 0 : r(X^r(t)) \le \delta\} \text{ and } \sigma = \inf\{t \ge 0 : X_0^r(t) = 0\}, $$ + +and if $X^1(0) = Y^\delta(0) \in \mathbb{N}^2$ then there exists a coupling of the processes $(X^r(t))$ and $(Y^\delta(t))$ such that, for any sample path, the relation + +$$ (6) \qquad X_0^r(t) \le Y_0^\delta(t) \text{ and } X_1^r(t) \ge Y_1^\delta(t) $$ + +holds for all $t \le \tau_\delta \wedge \sigma$. + +*Proof.* Let $X^r(0) = (x_0, x_1)$ and $Y^1(0) = (y_0, y_1)$ be such that $x_0 \ge y_0$ and $x_1 \le y_1$, one has to prove that the processes $(X^r(t))$ and $(Y^1(t))$ can be constructed such that Relation (5) holds at the time of the next jump of one of them. See Leskelä [10] for the existence of couplings using analytical, nonconstructive techniques. + +The arrival rates in the first queue are the same for both processes. If $x_1 < y_1$, a departure from the second queue for $(Y^1(t))$ or $(X^r(t))$ preserves the order relation (5) and if $x_1 = y_1$, this departure occurs at the same rate for both processes and thus the corresponding instant can be chosen at the same (exponential) time. For the departures from the first to the second queue, the departure rate for $(X^r(t))$ is $\mu r(x_0, x_1)(x_1 \vee 1)\mathbb{I}_{\{x_0>0\}} \le \mu(y_1 \vee 1)$ which is the departure rate for $(Y^1(t))$, hence the corresponding departure instants can be taken in the reverse order so that Relation (5) also holds at the next jump instant. The first part of the proposition is proved. + +The rest of the proof is done in a similar way: The initial states $X^r(0) = (x_0, x_1)$ and $Y^\delta(0) = (y_0, y_1)$ are such that $x_0 \le y_0$ and $x_1 \ge y_1$. With the killing of the processes at time $\tau_\delta \wedge \sigma$ one can assume additionally that $x_0 \neq 0$ and that the relation $r(x_0, x_1) \ge \delta$ holds; Under these assumptions one can check by inspecting the next transition that (6) holds. The proposition is proved. $\square$ + +**Proposition 2.2.** *Under the condition $\mu < \nu$, the relation* + +$$ \liminf_{t \to +\infty} \frac{X_0^r(t)}{t} \geq \lambda - \lambda^* $$ + +holds almost surely. In particular, if $\mu < \nu$ and $\lambda > \lambda^*$, then the process $(X^r(t))$ is transient. + +*Proof.* By Proposition 2.1, one can assume that there exists a version of $(Y^1(t))$ such that $X_0^r(0) = Y_0^1(0)$ and the relation $X_0^r(t) \ge Y_0^1(t)$ holds for any $t \ge 0$. From Definition (3) of the Q-matrix of $(Y^1(t))$, one has, for $t \ge 0$, + +$$ Y^{1}(t) = Y^{1}(0) + N_{\lambda}(t) - A(t), $$ + +where $(N_\lambda(t))$ is a Poisson process with parameter $\lambda$ and $(A(t))$ is the number of arrivals (jumps of size 1) for the second coordinate $(Y_1^1(t))$: in particular + +$$ \mathbb{E}(A(t)) = \mu \mathbb{E} \left( \int_{0}^{t} Y_{1}^{1}(s) \vee 1 ds \right). $$ +---PAGE_BREAK--- + +Since $(Y_1^1(t))$ is an ergodic Markov process under the condition $\mu < \nu$, the ergodic theorem in this setting gives that + +$$ \lim_{t \to +\infty} \frac{1}{t} A(t) = \lim_{t \to +\infty} \frac{1}{t} \mathbb{E}(A(t)) = \mu \mathbb{E} (Y_1^1(\infty) \lor 1) = \lambda^*, $$ + +by Equation (4), hence $(Y_0^1(t)/t)$ converges almost surely to $\lambda - \lambda^*$. The proposition is proved. $\square$ + +The next result establishes the ergodicity result of this section. + +**Proposition 2.3.** If the rate function $r$ is such that, for any $x_1 \in \mathbb{N}$, + +(C) + +$$ \lim_{x_0 \to +\infty} r(x_0, x_1) = 1, $$ + +and if $\mu \ge \nu$, or if $\mu < \nu$ and $\lambda < \lambda^*$ with + +$$ \lambda^* = \frac{\mu}{(1-\rho)(1-\log(1-\rho))}, $$ + +and $\rho = \mu/\nu$, then $(X^r(t))$ is an ergodic Markov process. + +Note that Condition (C) is satisfied for the functions $r$ considered in the models considered by Núñez-Queija and Prabhu [15] and in Susitaival et al. [24]. See above. + +*Proof.* If $x = (x_0, x_1) \in \mathbb{R}^2$, $|x|$ denotes the norm of $x$, $|x| = |x_0| + |x_1|$. The proof uses Foster's criterion as stated in Robert [18, Theorem 9.7]. If there exist constants $K_0, K_1, t_0, t_1$ and $\eta > 0$ such that, for $x = (x_0, x_1) \in \mathbb{N}^2$, + +(8) + +$$ \mathbb{E}_{(x_0,x_1)}(|X^r(t_1)| - |x|) \leq -t_1, \text{ if } x_1 \geq K_1, $$ + +(9) + +$$ \mathbb{E}_{(x_0,x_1)}(|X^r(t_0)| - |x|) \leq -\eta t_0, \text{ if } x_0 \geq K_0 \text{ and } x_1 < K_1, $$ + +then the Markov process $(X^r(t))$ is ergodic. + +Relation (8) is straightforward to establish: if $x_1 \ge K_1$, one gets, by considering only $K_1$ of the $x_1$ initial servers in the second queue and the Poisson arrivals, that + +$$ \mathbb{E}_{(x_0,x_1)}(|X^r(1)| - |x|) \leq \lambda - K_1(1 - e^{-\nu}), $$ + +hence it is enough to take $t_1 = 1$ and $K_1 = (\lambda+1)/(1-e^{-\nu})$ to have Relation (8). + +One has therefore to establish Inequality (9). Let $\tau_\delta$ and $\sigma$ be the stopping times introduced in Proposition 2.1, one first proves an intermediate result: for any $t > 0$ and any $x_1 \in \mathbb{N}$, + +$$ (10) \quad \lim_{x_0 \to +\infty} \mathbb{P}_{(x_0,x_1)}(\sigma \wedge \tau_\delta \le t) = 0. $$ + +Fix $x_1 \in \mathbb{N}$ and $t \ge 0$: for $\varepsilon > 0$, there exists $D_1$ such that + +$$ \mathbb{P}_{x_1} \left( \sup_{0 \le s \le t} Y_1^1(s) \ge D_1 \right) \le \varepsilon, $$ + +from Proposition 2.1, this gives the relation valid for all $x_0 \ge 0$, + +$$ \mathbb{P}_{(x_0,x_1)} \left( \sup_{0 \le s \le t} X_1^r(s) \ge D_1 \right) \le \varepsilon. $$ + +By Condition (C), there exists $\gamma \ge 0$ (that depends on $x_1$) such that $r(x_0, x_1) \ge \delta$ when $x_0 \ge \gamma$. As long as $(X^r(t))$ stays in the subset $\{(y_0, y_1) : y_1 \le D_1\}$, the transition rates of the first component $(X_0^r(t))$ are uniformly bounded. Consequently, +---PAGE_BREAK--- + +there exists $K$ such that, for $x_0 \ge K$, + +$$ \mathbb{P}_{(x_0,x_1)} \left[ \sup_{s \le t} X_0^r(s) \le \gamma, \sup_{s \le t} X_1^r(s) \le D_1 \right] \le \varepsilon. $$ + +Relation (10) follows from the last two inequalities and the identity + +$$ \mathbb{P}_{(x_0,x_1)}(\sigma \wedge \tau_\delta \le t) \le \mathbb{P}_{(x_0,x_1)}\left(\sup_{s \le t} X_0^r(s) \le \gamma\right). $$ + +One returns to the proof of Inequality (9). By definition of the Q-matrix of the process $(X^r(t))$, + +$$ \mathbb{E}_{(x_0,x_1)}(|X^r(t)| - |x|) = \lambda t - \nu \int_0^t \mathbb{E}_{(x_0,x_1)}(X_1^r(u)) du, x \in \mathbb{N}^2, t \ge 0. $$ + +For any $x \in \mathbb{N}^2$, there exists a version of $(Y^\delta(t))$ with initial condition $Y^\delta(0) = X^r(0) = x$, and such that Relation (6) holds for $t < \tau_\delta \wedge \sigma$, in particular + +$$ \begin{aligned} \mathbb{E}_x(X_1^r(t)) &\geq \mathbb{E}_x(X_1^r(t); t < \tau_\delta \wedge \sigma) \\ &\geq \mathbb{E}_x(Y_1^\delta(t); t < \tau_\delta \wedge \sigma) = \mathbb{E}_x(Y_1^\delta(t)) - \mathbb{E}_x(Y_1^\delta(t); t \geq \tau_\delta \wedge \sigma). \end{aligned} $$ + +Cauchy-Schwarz inequality shows that for any $t \ge 0$ and $x \in \mathbb{N}^2$ + +$$ \begin{aligned} \int_0^t \mathbb{E}_x(Y_1^\delta(u); \tau_\delta \wedge \sigma \le u) du &\le \int_0^t \sqrt{\mathbb{E}_x\left[(Y_1^\delta(u))^2\right]} \sqrt{\mathbb{P}_x(\tau_\delta \wedge \sigma \le u)} du \\ &\le \sqrt{\mathbb{P}_x(\tau_\delta \wedge \sigma \le t)} \int_0^t \sqrt{\mathbb{E}_x\left[(Y_1^\delta(u))^2\right]} du, \end{aligned} $$ + +by gathering these inequalities, and by using the fact that the process $(Y_1^\delta(t))$ depends only on $x_1$ and not $x_0$, one finally gets the relation + +$$ (11) \quad \frac{1}{t} \mathbb{E}_x(|X(t)| - |x|) \leq \lambda - \frac{\nu}{t} \int_0^t \mathbb{E}_{x_1}(Y_1^\delta(u)) du + c(x_1, t) \sqrt{\mathbb{P}_x(\tau_\delta \wedge \sigma \le t)} $$ + +with + +$$ c(x_1, t) = \frac{\nu}{t} \int_{0}^{t} \sqrt{\mathbb{E}_{x_1} [ (Y_1^{\delta}(u))^2 ]} du. $$ + +Two cases are considered. + +(1) If $\mu > \nu$, if $\delta < 1$ is such that $\delta\mu > \nu$, the process $(Y_1^\delta(t))$ is transient, so that + +$$ \lim_{t \to +\infty} \frac{1}{t} \int_0^t \mathbb{E}_{x_1}(Y_1^\delta(u)) du = +\infty, $$ + +for each $x_1 \ge 0$. + +(2) If $\mu < \nu$, one takes $\delta = 1$, or if $\mu = \nu$, one takes $\delta < 1$ close enough to 1 so that $\lambda < \lambda^*(\delta)$. In both cases, $\lambda < \lambda^*(\delta)$ and the process $(Y_1^\delta(t))$ converges in distribution, hence + +$$ \lim_{t \to +\infty} \frac{1}{t} \int_0^t \mathbb{E}_{x_1}(Y_1^\delta(u)) du = \nu E(Y_1^\delta(\infty)) = \lambda^*(\delta) > \lambda $$ + +for each $x_1 \ge 0$. +---PAGE_BREAK--- + +Consequently in both cases, there exist constants $\eta > 0$, $\delta < 1$ and $t_0 > 0$ such that for any $x_1 \le K_1$, + +$$ (12) \qquad \lambda - \nu \frac{1}{t_0} \int_0^{t_0} \mathbb{E}_{x_1}(Y_1^\delta(u)) du \le -\eta, $$ + +with Relation (11), one gets that if $x_1 \le K_1$ then + +$$ \frac{1}{t_0} \mathbb{E}_x(|X(t_0)| - |x|) \le -\eta + c^* \sqrt{\mathbb{P}_x(\tau_\delta \wedge \sigma \le t_0)}, $$ + +where $c^* = \max(c(n, t_0), 0 \le n \le K_1)$. By Identity (10), there exists $K_0$ such that, for all $x_0 \ge K_0$ and $x_1 \le K_1$, the relation + +$$ c^* \sqrt{\mathbb{P}_{(x_0,x_1)}(\tau_{\delta} \wedge \sigma \le t_0)} \le \frac{\eta}{2} $$ + +holds. This relation and the inequalities (12) and (11) give Inequality (9). The proposition is proved. $\square$ + +**Another Boundary Condition.** The boundary condition $x_1 \lor 1$ in the transition rates of $(X(t))$, Equation (2), prevents the second coordinate from ending up in the absorbing state 0. It amounts to suppose that a permanent server gets activated when no node may offer the file. Another way to avoid this absorbing state is to suppose that a permanent node is always active, which gives transition rates with $x_1+1$ instead. This choice was for instance made in Núñez-Queija and Prabhu [15]. All our results apply for this other boundary condition: the only difference that is when $\nu > \mu$, the value of the threshold $\lambda^*$ of Equation (4) is given by the quantity $\lambda^* = \mu\nu/(\nu - \mu)$. + +### 3. YULE PROCESSES WITH DELETIONS + +This section introduces the tools which are necessary in order to generalize the results of the previous section to the multi-chunk case $n \ge 2$. A Yule process $(Y(t))$ with rate $\mu > 0$ is a Markovian branching process with Q-matrix + +$$ (13) \qquad q_Y(x, x+1) = \mu x, \quad \forall x \ge 0. $$ + +An individual gives birth to a child, or equivalently splits into two particles, with rate $\mu$. Let $(\sigma_n)$ be the split times of a Yule process started with one particle, it is not difficult to check that, for $n \ge 1$, + +$$ \sigma_n \stackrel{\text{dist.}}{=} \sum_{\ell=1}^{n} \frac{E_{\ell}^{\mu}}{\ell} \stackrel{\text{dist.}}{=} \max(E_1^{\mu}, \dots, E_n^{\mu}), $$ + +where $(E_{\ell}^{\mu})$ are i.i.d. exponential random variables with parameter $\mu$. If $\lambda > \mu$ then, by using Fubini's Theorem, + +$$ (14) \qquad +\begin{aligned} +\mathbb{E}\left(\sum_{\ell=1}^{+\infty} e^{-\lambda\sigma_\ell}\right) &= \mathbb{E}\left(\sum_{\ell=1}^{+\infty} \int_0^{+\infty} \lambda e^{-\lambda x} 1_{\{\sigma_\ell \le x\}} dx\right) = \int_0^{+\infty} \lambda e^{-\lambda x} \sum_{\ell=1}^{+\infty} \mathbb{P}(\sigma_\ell \le x) dx \\ +&= \int_0^{+\infty} \lambda e^{-\lambda x} \frac{1-e^{-\mu x}}{e^{-\mu x}} dx = \frac{\mu}{\lambda-\mu} < +\infty. +\end{aligned} +$$ + +In this section one considers some specific results on variants of this stochastic model when some individuals are killed. In terms of branching processes, this amounts to prune the tree, i.e., to cut some edges of the tree, and the subtree attached to +---PAGE_BREAK--- + +it. This procedure is fairly common for branching processes, in the Crump-Mode- +Jagers model for example, see Kingman [9]. See also Neveu [14] or Aldous and +Pitman [1]. Two situations are considered: the first one when the deletions are +part of the internal dynamics, so that each individual dies out after an exponential +time, and the other when killings are given by an exogenous process and occur at +fixed (random or deterministic) epochs. + +**Constant Death Rate and Regeneration.** Let $(Z(t))$ be the birth-and-death process whose $Q$-matrix $Q_Z$ is given by, for $\mu_Z > 0$ and $\nu > 0$, + +$$ +(15) \qquad q_Z(z, z+1) = \mu_Z(z \lor 1) \text{ and } q_Z(z, z-1) = \nu z. +$$ + +The lifetime of an individual is exponentially distributed with parameter $v$, and the +process restarts with one individual after some time when it hits 0. This process +can be described equivalently as a time-changed $M/M/1$ queue or as a sequence +of independent branching processes. As it will be seen these two viewpoints are +complementary. + +In the rest of this part, $\mu_Z$ and $\nu$ are fixed, $(Z(t))$ is the Markov process with $Q$-matrix $Q_Z$, $(\sigma_n)$ is the sequence of times of its positive jumps, the birth instants, and $(B_\sigma(t))$ is the corresponding counting process of $(\sigma_n)$, for $t \ge 0$, + +$$ +B_{\sigma}(t) = \sum_{i \ge 1} 1_{\{\sigma_i \le t\}}. +$$ + +**Proposition 3.1 (Queueing Representation).** If $Z(0) = z \in \mathbb{N}$, then + +$$ +(16) \qquad (Z(t), t \ge 0) \stackrel{\text{dist.}}{=} (L(C(t)), t \ge 0), +$$ + +where $(L(t))$ is the process of the number of jobs of an $M/M/1$ queue with input +rate $\mu_Z$ and service rate $\nu$ and with $L(0) = z$ and $C(t) = \inf\{s > 0 : A(s) > t\}$, +where + +$$ +A(t) = \int_{0}^{t} \frac{1}{1 \vee L(u)} du. +$$ + +*Proof.* It is not difficult to check that the process $(M(t)) \stackrel{\text{def.}}{=} (L(C(t)))$ has the Markov property. Let $Q_M$ be its $Q$-matrix. For $z \ge 0$, + +$$ +\P(L(C(h)) = z + 1 | L(0) = z) = \mu_Z \mathbb{E}(C(h)) + o(h) = \mu_Z (z \vee 1)h + o(h), +$$ + +hence $q_M(z, z + 1) = \mu_Z(z \vee 1)$. Similarly $q_M(z, z - 1) = \nu z$. The proposition is proved. $\square$ + +**Corollary 3.1.** For any $\gamma > (\mu_Z - \nu) \lor 0$ and $z = Z(0) \in \mathbb{N}$, + +$$ +(17) \qquad \mathbb{E}_z \left( \sum_{n=1}^{+\infty} e^{-\gamma \sigma_n} \right) < +\infty. +$$ + +*Proof.* Proposition 3.1 shows that, in particular, the sequences of positive jumps of $(Z(t))$ and of $(L(C(t)))$ have the same distribution. Hence, if $N_{\mu_Z} = (t_n)$ is the arrival process of the $M/M/1$ queue, a Poisson process with parameter $\mu_Z$, then, with the notations of the above proposition, the relation + +$$ +(\sigma_n) \stackrel{\text{dist.}}{=} (A(t_n)) +$$ +---PAGE_BREAK--- + +holds. By using standard martingale properties of stochastic integrals with respect to Poisson processes, see Rogers and Williams [20], one gets for $t \ge 0$, + +$$ (18) \qquad \begin{aligned} \mathbb{E}_z \left( \sum_{n \ge 1} e^{-\gamma A(t_n)} \right) &= \mathbb{E}_z \left( \int_0^\infty e^{-\gamma A(s)} N_{\mu_Z}(ds) \right) = \mu_Z \mathbb{E}_z \left( \int_0^\infty e^{-\gamma A(s)} ds \right) \\ &= \mu_Z \int_0^\infty e^{-\gamma u} \mathbb{E}_z (Z(u) \vee 1) du, \end{aligned} $$ + +where Relation (16) has been used for the last equality. Kolmogorov's equation for the process $(Z(t))$ gives that + +$$ \begin{aligned} \phi(t) &\stackrel{\text{def.}}{=} \mathbb{E}_z(Z(t)) = \mu_Z \int_0^t \mathbb{E}_z(Z(u) \vee 1) du - \nu \int_0^t \mathbb{E}_z(Z(u)) du \\ &\le (\mu_Z - \nu) \int_0^t \phi(u) du + \mu_Z t, \end{aligned} $$ + +therefore, by Gronwall's Lemma, + +$$ \phi(t) \le \phi(0) + \mu_Z \int_0^t ue^{(\mu_Z - \nu)u} du \le z + \frac{\mu_Z}{\mu_Z - \nu} te^{(\mu_Z - \nu)t}. $$ + +From Equation (18), one concludes that + +$$ \mathbb{E}_z \left( \sum_n e^{-\gamma \sigma_n} \right) = \mathbb{E}_z \left( \sum_n e^{-\gamma A(t_n)} \right) < +\infty. $$ + +The proposition is proved. $\square$ + +**A Branching Process.** Before hitting 0, the Markov process $(Z(t))$ whose Q-matrix is given by Relation (15) can be seen a Bellman-Harris branching process. Its Malthusian parameter is given by $\alpha = \mu_Z - \nu$. See Athreya and Ney [3]. In this setting, it describes the evolution of a population of independent particles, at rate $\lambda \stackrel{\text{def.}}{=} \mu_Z + \nu$ each of these particles either splits into two particles with probability $p \stackrel{\text{def.}}{=} \mu_Z / (\mu_Z + \nu)$ or dies. These processes will be referred to as $(p, \lambda)$-branching processes in the sequel. + +A $(p, \lambda)$-branching process survives with positive probability only when $p > 1/2$, in which case the probability of extinction $q$ is equal to $q = (1-p)/p = \nu/\mu_Z$. The main (and only) difference with a branching process is that $Z$ regenerates after hitting 0. When it regenerates, it again behaves as a $(p, \lambda)$-branching process (started with one particle), until it hits 0 again. + +**Proposition 3.2 (Branching Representation).** If $Z(0) = z \in \mathbb{N}$ and $(\tilde{Z}(t))$ is a $(p, \lambda)$-branching process started with $z \in \mathbb{N}$ particles and $\tilde{T}$ its extinction time, then + +$$ (Z(t), 0 \le t \le T) \stackrel{\text{dist.}}{=} (\tilde{Z}(t), 0 \le t \le \tilde{T}), $$ + +where $T = \inf\{t \ge 0 : Z(t) = 0\}$ is the hitting time of 0 by $(Z(t))$. + +**Corollary 3.2.** Suppose that $\mu_Z > \nu$. Then $\mathbb{P}_z$-almost surely for any $z \ge 0$, there exists a finite random variable $Z(\infty)$ such that, + +$$ \lim_{t \to +\infty} e^{-(\mu_Z - \nu)t} Z(t) = Z(\infty) \quad \text{and} \quad Z(\infty) > 0. $$ +---PAGE_BREAK--- + +*Proof.* When $\mu_Z > \nu$, the process $(Z(t))$ couples in finite time with a supercritical $(p, \lambda)$-branching process $(\tilde{Z}(t))$ conditioned on non-extinction; this follows readily from Proposition 3.2 (or see the Appendix for details). Since for any supercritical $(p, \lambda)$-branching process, $(\exp(-(\mu_Z - \nu)t)\tilde{Z}(t))$ converges almost surely to a finite random variable $\tilde{Z}(\infty)$, positive on the event of non-extinction (see Nerman [13]), one gets the desired result. $\square$ + +Due to its technicality, the proof of the following result is postponed to the Appendix; this result is used in the proof of Proposition 3.5. + +**Proposition 3.3.** Suppose that $\mu_Z > \nu$, if + +$$ (19) \qquad \eta^*(x) = \frac{2 - x - \sqrt{x(4-3x)}}{2(1-x)}, \quad 0 < x < 1, $$ + +then for any $0 < \eta < \eta^*(\nu/\mu_Z)$, + +$$ \sup_{z \ge 0} \left[ \mathbb{E}_z \left( \sup_{t \ge \sigma_1} \left( e^{\eta(\mu_Z - \nu)t} B_\sigma(t)^{-\eta} \right) \right) \right] < +\infty. $$ + +**A Yule Process Killed at Fixed Instants.** In this part, it is assumed that, provided that it is non-empty, at epochs $\sigma_n$, $n \ge 1$, an individual is removed from the population of an ordinary Yule process ($Y(t)$) with rate $\mu_W$ starting with $Y(0) = w \in \mathbb{N}$ individuals. It is assumed that $(\sigma_n)$ is some fixed non-decreasing sequence. It will be shown that the process $(W(t))$ obtained by killing one individual of $Y(t)$) at each of the successive instants $(\sigma_n)$ survives with positive probability when the series with general term $(\exp(-\mu_W\sigma_n))$ converges. + +In the following, a related result will be considered in the case where the sequence $(\sigma_n)$ is given by the sequence of birth times of the process $(Z(t))$ introduced above. See Alsmeyer [2] and the references therein for related models. + +One denotes + +$$ \kappa = \inf\{n \ge 1 : W(\sigma_n) = 0\}. $$ + +The process $(W(t))$ can be represented in the following way: + +$$ (20) \qquad W(t) = Y(t) - \sum_{i=1}^{\kappa} X_i(t) 1_{\{\sigma_i \le t\}}, $$ + +where, for $1 \le i \le \kappa$ and $t \ge \sigma_i$, $X_i(t)$ is the total number of children at time $t$ in the original Yule process of the $i$th individual killed at time $\sigma_i$. In terms of trees, $(W(t))$ can be seen as a subtree of $(Y(t))$: for $1 \le i \le \kappa$, $(X_i(t))$ is the subtree of $(Y(t))$ associated with the $i$th particle killed at time $\sigma_i$. + +It is easily checked that $(X_i(t - \sigma_i), t \ge \sigma_i)$ is a Yule process starting with one individual and, since a killed individual cannot have one of his descendants killed, that the processes + +$$ (\tilde{X}_i(t)) = (X_i(t + \sigma_i), t \ge 0), \quad 1 \le i \le \kappa, $$ + +are independent Yule processes. + +For any process $(U(t))$, one denotes: + +$$ (21) \qquad (M_U(t)) \stackrel{\text{def.}}{=} (e^{-\mu_W t} U(t)). $$ +---PAGE_BREAK--- + +If $(\tilde{X}(t))$ is a Yule process with rate $\mu_W$, the martingale $(M_{\tilde{X}}(t))$ converges almost surely and in $L_2$ to a random variable $M_{\tilde{X}}(\infty)$ with an exponential distribution with mean $\tilde{X}(0)$, and by Doob's Inequality + +$$ \mathbb{E}\left(\sup_{t \ge 0} M_{\tilde{X}}(t)^2\right) \le 2 \sup_{t \ge 0} \mathbb{E}\left(M_{\tilde{X}}(t)^2\right) < +\infty. $$ + +See Athreya and Ney [3]. Consequently + +$$ e^{-\mu_W t} W(t) = M_Y(t) - \sum_{i=1}^{\kappa} e^{-\mu_W \sigma_i} M_{\tilde{X}_i}(t - \sigma_i) 1_{\{\sigma_i \le t\}}, $$ + +and for any $t \ge 0$, + +$$ \sum_{i=1}^{\kappa} e^{-\mu_W \sigma_i} M_{\tilde{X}_i}(t-\sigma_i) 1_{\{\sigma_i \le t\}} \le \sum_{i=1}^{\kappa} e^{-\mu_W \sigma_i} \sup_{s \ge 0} M_{\tilde{X}_i}(s). $$ + +Assume now that $\sum_{i \ge 1} e^{-\mu_W \sigma_i} < +\infty$: then the last expression is integrable, and Lebesgue's Theorem implies that $(M_W(t)) = (\exp(-\mu_W t)W(t))$ converges almost surely and in $L_2$ to + +$$ M_W(\infty) = M_Y(\infty) - \sum_{i=1}^{\kappa} e^{-\mu_W \sigma_i} M_{\tilde{X}_i}(\infty). $$ + +Clearly, for some $w^*$ large enough and then for any $w \ge w^*$, one has + +$$ \mathbb{E}_w(M_W(\infty)) \ge w - \sum_{i=1}^{+\infty} e^{-\mu_W \sigma_i} > 0, $$ + +in particular $\mathbb{P}_w(M_W(\infty) > 0) > 0$ and $\mathbb{P}_w(W(t) \ge 1, \forall t \ge 0) > 0$. If $Y(0) = w < w^*$ and $\sigma_1 > 0$, then $\mathbb{P}_w(Y(\sigma_1) \ge w^* + 1) > 0$ and therefore, by translation at time $\sigma_1$, the same conclusion holds when the sequence $(\exp(-\mu_W \sigma_i))$ has a finite sum. The following proposition has thus been proved. + +**Proposition 3.4.** Let $(W(t))$ be a process growing as a Yule process with rate $\mu_W$ and for which individuals are killed at non-decreasing instants $(\sigma_n)$ with $\sigma_1 > 0$. If + +$$ \sum_{i=1}^{+\infty} e^{-\mu_W \sigma_i} < +\infty, $$ + +then as $t$ gets large, and for any $w \ge 1$, the variable $(\exp(-\mu_W t)W(t))$ converges $\mathbb{P}_w$-almost surely and in $L_2$ to a finite random variable $M_W(\infty)$ such that $\mathbb{P}_w(M_W(\infty) > 0) > 0$. + +The previous proposition establishes the minimal results needed in Section 4. However, Kolmogorov's Three-Series, see Williams [25], can be used in conjunction with Fatou's Lemma to show that $(W(t))$ dies out almost surely when the series with general term $(\exp(-\mu_W \sigma_n))$ diverges. + +**A Yule Process Killed at the Birth Instants of a Bellman-Harris Process.** + +In this subsection, one considers a Yule process $(Y(t))$ with parameter $\mu_W$ with Q-matrix defined by Relation (13) and an independent Markov process $(Z(t))$ with Q-matrix defined by Relation (15). In particular $\mu_Z - \nu$ is the Malthusian parameter of $(Z(t))$. A process $(W(t))$ is defined by killing one individual of $(Y(t))$ at each of +---PAGE_BREAK--- + +the birth instants $(\sigma_n)$ of $(Z(t))$. As before $(B_\sigma(t))$ denotes the counting process association to the non-decreasing sequence $(\sigma_n)$, + +$$B_{\sigma}(t) = \sum_{i \ge 1} 1_{\{\sigma_i \le t\}}.$$ + +**Proposition 3.5.** Assume that $\mu_Z - \nu > \mu_W$, and let $H_0$ be the extinction time of $(W(t))$, i.e., + +$$H_0 = \inf\{t \ge 0 : W(t) = 0\},$$ + +then the random variable $H_0$ is almost surely finite and: + +(i) $Z(H_0) - Z(0) \le e^{\mu_W H_0} M_Y^*$ where +$$M_Y^* = \sup_{t \ge 0} e^{-\mu_W t} Y(t).$$ + +(ii) There exists a finite constant $C$ such that for any $z \ge 0$ and $w \ge 1$, + +$$ (22) \qquad \mathbb{E}_{(w,z)}(H_0) \le C (\log(w) + 1). $$ + +Note that the subscript $(w, z)$ refers to the initial state of the Markov process $(W(t), Z(t))$. + +*Proof.* Define $\alpha = \mu_Z - \nu$. Concerning the almost sure finiteness of $H_0$, note that Equation (20) entails that $W(t) \le Y(t) - B_\sigma(t)$ for all $t \ge 0$ on the event $\{H_0 = +\infty\}$. As $t$ goes to infinity, both $\exp(-\mu_W t)Y(t)$ and $\exp(-\alpha t)B_\sigma(t)$ converge almost surely to positive and finite random variables (see Nerman [13]), which implies, when $\alpha = \mu_Z - \nu > \mu_W$, that $W(t)$ converges to $-\infty$ on $\{H_0 = +\infty\}$, and so this event is necessarily of probability zero. + +The first point (i) of the proposition comes from Identity (20) at $t = H_0$: + +$$ (23) \qquad Z(H_0) - Z(0) \le B_\sigma(H_0) \le Y(H_0) \le e^{\mu_W H_0} M_Y^*. $$ + +By using the relation $\exp(x) \ge x$, Equation (22) follows from the following bound: for any $\eta < \eta^*(\nu/\mu_Z)$ (recall that $\eta^*$ is given by Equation (19)), + +$$ (24) \qquad \sup_{w \ge 1, z \ge 0} \left[ w^{-\eta} \mathbb{E}_{(w,z)} \left( e^{\eta(\alpha - \mu_W)H_0} \right) \right] < +\infty. $$ + +So all is left to prove is this bound. Under $\mathbb{P}_{(w,z)}$, $(Y(t))$ can be represented as the sum of $w$ i.i.d. Yule processes, and so $M_Y^* \le M_{Y,1}^* + \cdots + M_{Y,w}^*$ with $(M_{Y,i}^*)$ i.i.d. distributed like $M_Y^*$ under $\mathbb{P}_{(1,z)}$; Inequality (23) then entails that + +$$ e^{(\alpha - \mu_W)H_0} \le \left( \sum_{i=1}^{w} M_{Y,i}^{*} \right) \times \sup_{t \ge \sigma_1} \left( e^{\alpha t} / B_{\sigma}(t) \right). $$ + +By independence of $(M_{Y,i}^*)$ and $(B_\sigma(t))$, Jensen's inequality gives for any $\eta < 1$: + +$$ \mathbb{E}_{(w,z)} (e^{\eta(\alpha - \mu_W)H_0}) \le w^\eta (\mathbb{E}(M_{Y,1}^*))^\eta \mathbb{E}_z \left( \sup_{t \ge \sigma_1} (e^{\eta\alpha t} B_\sigma(t)^{-\eta}) \right), $$ + +hence the bound (24) follows from Proposition 3.3. $\square$ + +One concludes this section with a Markov chain which will be used in Section 4. Define recursively the sequence $(V_n)$ by, $V_0 = v$ and + +$$ (25) \qquad V_{n+1} = \sum_{k=1}^{A_n(V_n)} I_k, n \ge 0, $$ +---PAGE_BREAK--- + +where $(I_k)$ are identically distributed integer valued random variables independent of $V_n$ and $A_n(V_n)$, and such that $\mathbb{E}(I_1) = p$ for some $p \in (0, 1)$. For $v > 0$, $A_n(v)$ is an independent random variable with the same distribution as $Z(H_0)$ under $\mathbb{P}_{(1,v)}$, i.e., with the initial condition $(W(0), Z(0)) = (1, v)$. + +The above equation (25) can be interpreted as a branching process with immi- +gration, see Seneta [21], or also as an autoregressive model. + +**Proposition 3.6.** Under the condition $\mu_Z - \nu > \mu_W$, if $(V_n)$ is the Markov chain defined by Equation (25) and, for $K \ge 0$, + +$$N_K = \inf\{n \ge 0 : V_n \le K\},$$ + +then there exist $\gamma > 0$ and $K \in \mathbb{N}$ such that + +$$ +(26) \quad \mathbb{E}(N_K|V_0 = v) \le \frac{1}{\gamma} \log(1+v), \quad \forall v \ge 0. +$$ + +The Markov chain (V_n) is in particular positive recurrent. + +*Proof.* For $V_0 = v \in \mathbb{N}$, Jensen's Inequality and Definition (25) give the relation + +$$ +(27) \quad \mathbb{E}_v \log \left( \frac{1+V_1}{1+v} \right) \le \mathbb{E}_{(1,v)} \log \left[ \frac{1+pZ(H_0)}{1+v} \right]. +$$ + +From Proposition 3.5 and by using the same notations, one gets that, under $\mathbb{P}_{(1,v)}$, + +$$ +Z(H_0) \leq v + e^{\mu_w H_0} M_Y^*, +$$ + +where $(Y(t))$ is a Yule process starting with one individual. By looking at the birth instants of $(Z(t))$, it is easily checked that the random variable $H_0$ under $\mathbb{P}_{(1,v)}$ is stochastically bounded by $H_0$ under $\mathbb{P}_{(1,0)}$. The integrability of $H_0$ under $\mathbb{P}_{(1,0)}$ (proved in Proposition 3.5) and of $M_Y^*$ give that the expression + +$$ +\log \left( \frac{1 + p(v + e^{\mu_w H_0} M_Y^*)}{1 + v} \right) +$$ + +bounding the right hand side of Relation (27) is also an integrable random variable +under $\mathbb{P}_{(1,0)}$. Lebesgue's Theorem gives therefore that + +$$ +\limsup_{v \to +\infty} \left[ \mathbb{E}_v \log \left( \frac{1+V_1}{1+v} \right) \right] \leq \log p < 0. +$$ + +Consequently, one concludes that $v \mapsto \log(1+v)$ is a Lyapunov function for the Markov chain $(V_n)$, i.e., if $\gamma = -(\log p)/2$, there exists $K$ such that for $v \ge K$, + +$$ +\mathbb{E}_v \log (1 + V_1) - \log (1 + v) \le -\gamma. +$$ + +Foster's criterion, see Theorem 8.6 of Robert [18], implies that $(V_n)$ is indeed ergodic +and that Relation (26) holds. $\square$ + +4. ANALYSIS OF THE MULTI-CHUNK NETWORK + +In this section it is assumed that a file of *n* chunks is distributed by the file-sharing network within the following framework, corresponding to Figure 1. Chunks are delivered in the sequential order, and, for *k* ≥ 1, requests with chunks 1, ..., *k* provide service for requests with one less chunk. + +For $0 \le k < n$ and $t \ge 0$, the variable $X_k(t)$ denotes the number of requests downloading the $(k+1)$st chunk; for $k = n$, $X_n(t)$ is the number of requests having all the chunks. When taking into account the boundaries in the transition rates +---PAGE_BREAK--- + +described in Figure 1, one gets the following $Q$-matrix for the $(n+1)$-dimensional +Markov process $(X_k(t), 0 \le k \le n)$: + +$$ +\begin{equation} +\begin{aligned} +Q(f)(x) ={}& \lambda[f(x+e_0)-f(x)] + \sum_{k=1}^{n} \mu_k(x_k \lor 1)[f(x+e_k-e_{k-1})-f(x)]1_{\{x_{k-1}>0\}} \\ +& + \nu x_n[f(x-e_n)-f(x)], +\end{aligned} +\end{equation} +$$ + +where $x \in \mathbb{N}^{n+1}$, $f: \mathbb{N}^{n+1} \to \mathbb{R}_+$ is a function and for, $0 \le k \le n$, $e_k \in \mathbb{N}^{n+1}$ is the $k$th unit vector. Note that, as before, to avoid absorbing states, it is assumed that there is a server for the $k$th chunk when $x_k = 0$. The first section corresponds to the case $n = 2$ in a more general setting. + +It is first shown in Proposition 4.1 that the network is stable for sufficiently small input rate $\lambda$. Proposition 4.2 studies the analog of the two-dimensional case with $\mu > \nu$, i.e., when $\mu_1 > \cdots > \mu_{n-1} > \mu_n - \nu > 0$, it is proved that the network is stable for any input rate $\lambda$. When this condition fails, it is shown that for $n = 2$ the network can only accommodate a finite input rate. + +**Proposition 4.1.** *Under the condition* + +$$ +(28) \qquad \sum_{k=1}^{n} \frac{\lambda}{\mu_k} < 1, +$$ + +the Markov process (X(t)) is ergodic for any $\nu > 0$. + +Condition (28) is obviously not sharp as can be seen in the case $n=1$ analyzed +in Section 2. But the proposition shows that there is always a positive threshold +$\lambda^*$ such that the system is stable when $\lambda < \lambda^*$. + +*Proof.* For $x \in \mathbb{N}^{n+1}$ and $(\alpha_k) \in \mathbb{R}^{n+1}$, define $f(x) = \alpha_0 x_0 + \dots + \alpha_n x_n$, then + +$$ +Q(f)(x) = \lambda\alpha_0 - \sum_{k=1}^{n} (\alpha_{k-1} - \alpha_k)\mu_k(x_k \vee 1)1_{\{x_{k-1}>0\}} - \nu x_n \alpha_n. +$$ + +For $\varepsilon > 0$, one can choose $(\alpha_k)$ so that $\alpha_0 = 1$ and + +$$ +\alpha_{k-1} - \alpha_k = \frac{\lambda}{\mu_k} + \varepsilon, \quad 1 \le k \le n, +$$ + +hence + +$$ +\alpha_n = 1 - \left( n\varepsilon + \sum_{i=1}^{n} \frac{\lambda}{\mu_k} \right), +$$ + +so that, for $\varepsilon$ small enough, the $\alpha_k$'s, $0 \le k \le n$ are decreasing and positive under +the condition of the proposition; in particular the set $\{x : f(x) \le K\}$ is finite for +any $K \ge 0$. + +Take $K = (1+\lambda)/\nu$, then if $x \in \mathbb{N}^{n+1}$ is such that $f(x) \ge K$, either $x_k > 0$ for some $0 \le k \le n-1$ and in this case + +$$ +Q(f)(x) \leq \lambda - \mu_{k+1}(\alpha_k - \alpha_{k+1}) = -\varepsilon\mu_{k+1} < 0, +$$ + +or $x_n \ge K$ so that + +$$ +Q(f)(x) \leq \lambda - \nu K = -1 < 0. +$$ + +A Lyapunov function criteria for Markov processes shows that this implies that +the Markov process $(X(t))$ is ergodic. See Proposition 8.14 of Robert [18] for +example. □ +---PAGE_BREAK--- + +**Decreasing Service Rates.** The analog of the “good” case $\mu > \nu$ is proved in the next proposition. + +**Proposition 4.2.** *Under the condition $\mu_1 > \mu_2 > \cdots > \mu_{n-1} > \mu_n - \nu > 0$, the Markov process $(X(t)) = (X_k(t), 0 \le k \le n)$ describing the linear file-sharing network is ergodic for any $\lambda \ge 0$.* + +*Proof.* The proof proceeds in two steps: first coupling arguments with Yule processes allow to prove (30); then one can use the same technique as in the proof of Proposition 2.3, see Robert [18, Theorem 9.7]. + +*Step 1 (coupling).* Let $(W_n(t))$ be the process with $Q$-matrix defined by Relation (15) with $\mu_Z = \mu_n$ and starting at $W_n(0) = w_n \ge 1$. Since $\mu_n > \nu$, the process $(\exp(-(\mu_n-\nu)t)W_n(t))$ converges almost surely to a finite and positive random variable $M_{W_n}(\infty)$ by Corollary 3.2. Moreover, since $\mu_{n-1} > \mu_n - \nu > 0$, Corollary 3.1 entails that the birth instants $(\sigma_\ell^n)$ of this process are such that + +$$ \sum_{\ell \ge 1} e^{-\mu_{n-1} \sigma_\ell^n} < +\infty, \text{ almost surely.} $$ + +Let $(Y_{n-1}(t))$ be an independent Yule process with parameter $\mu_{n-1}$ with initial condition $Y_{n-1}(0) = w_{n-1} \ge 1$ and $(W_{n-1}(t))$ the resulting process when its individuals are killed at the instants $(\sigma_\ell^n)$ of births of $(W_n(t))$: the previous equation and Proposition 3.4 show that $(W_{n-1}(t))$ can survive forever with a positive probability. + +Let $(Y_{n-2}(t))$ be an independent Yule process starting from $w_{n-2} \ge 1$ with parameter $\mu_{n-2}$. Define $(W_{n-2}(t))$ the resulting process when the individuals of $(Y_{n-2}(t))$ are killed at the birth instants $(\tilde{\sigma}_\ell^{n-1})$ of $(W_{n-1}(t))$. Since $\mu_{n-2} > \mu_{n-1}$, the birth instants $(\tilde{\sigma}_\ell^{n-1})$ of $(Y_{n-1}(t))$ satisfy + +$$ \sum_{\ell=1}^{+\infty} e^{-\mu_{n-2}\tilde{\sigma}_{\ell}^{n-1}} < +\infty $$ + +almost surely by Equation (14) (which still holds for a Yule process starting with more than one particle). Since the birth instants $(\sigma_\ell^{n-1})$ of $(W_{n-1}(t))$ are a subsequence of $(\tilde{\sigma}_\ell^{n-1})$, the same relationship holds for $(\sigma_\ell^{n-1})$, and therefore, with a positive probability, the three processes $(e^{-(\mu_n-\nu)t}W_n(t))$, $(e^{-\mu_{n-1}t}W_{n-1}(t))$ and $(e^{-\mu_{n-2}t}W_{n-2}(t))$ converge simultaneously to positive and finite random variables $M_{W_n}(\infty)$, $M_{W_{n-1}}(\infty)$ and $M_{W_{n-2}}(\infty)$, respectively. This construction can be repeated inductively to give the existence of $n$ processes $(W_k(t), k = 1, \dots, n)$ such that $(\sigma_\ell^k)$ is the sequence of birth times of $W_k$, $W_n$ is the birth-and-death process with $Q$-matrix (15), $W_k$ for $1 \le k \le n-1$ is a Yule process with parameter $\mu_k$ killed at $(\sigma_\ell^{k+1})$, and the event $\mathcal{E} = \{M_{W_1}(\infty) > 0, \dots, M_{W_n}(\infty) > 0\}$ has a positive probability. On this event, $W_k(t) \ge 1$ for all $t \ge 0$ and $1 \le k \le n-1$, and + +$$ \lim_{t \to +\infty} W_n(t) = +\infty. $$ +---PAGE_BREAK--- + +For $0 \le k \le n-1$, one defines $(X_k^S(t)) = (X_{k,n-k}^S(t), \dots, X_{k,n}^S(t))$, the $k$th saturated system, as the $(k+1)$-dimensional Markov process with generator + +$$ +\begin{equation} +\begin{split} +(Q_k^S(f)(x) = \mu_{n-k}(x_{n-k} \lor 1)[f(x + e_{n-k}) - f(x)] \\ +\qquad & + \sum_{\ell=1}^k \mu_{n-k+\ell}(x_{n-k+\ell} \lor 1)[f(x + e_{n-k+\ell} - e_{n-k+\ell-1}) - f(x)] \mathbf{1}_{\{x_{n-k+\ell-1} > 0\}} \\ +\qquad & + \nu x_n[f(x - e_n) - f(x)], +\end{split} +\end{equation} +$$ + +where $x \in \mathbb{N}^{k+1}$ and $f : \mathbb{N}^{k+1} \to \mathbb{R}_+$ is an arbitrary function. Compared with the process $(X_\ell(t), 1 \le \ell \le n)$ with generator $Q$, it amounts to look at the $k+1$ last queues $(X_{n-k}(t), \dots, X_n(t))$ under the assumption that the queue $n-k-1$ is saturated, i.e., $X_{n-k-1}(t) \equiv +\infty$ for all $t \ge 0$. + +Note that for any $0 \le k \le n-1$, the transition rates of the Markov processes $(W_{n-\ell}(t), 0 \le \ell \le k)$ and $(X_{k,n-\ell}^S(t), 0 \le \ell \le k)$ are identical as long as no coordinate hits 0; one thus concludes that, with positive probability, the relation + +$$ +\lim_{t \to +\infty} X_{k,n}^{S}(t) = +\infty +$$ + +holds when $X_{k,n-l}^S(0) \ge 1$, $l=0,\dots,k$. Consequently, since the set $(\mathbb{N}-\{0\})^{k+1}$ can be reached with positive probability from any initial state in $\mathbb{N}^{k+1}$ by $(X_k^S(t))$, then + +$$ +(30) \qquad \lim_{t \to +\infty} \mathbb{E}(X_{k,n}^S(t)) = +\infty. +$$ + +Step 2 (Foster's criterion). We use Foster's criterion as stated in Theorem 9.7 of Robert [18]. First we inspect the case when $X_n(0)$ is large, then the case when $X_n(0)$ is bounded and $X_{n-1}(0)$ is large, etc... The key idea is that when $X_{n-k-1}(0)$ is large, then the process $(X_{n-k}(t), \dots, X_n(t))$ essentially behaves as the process $(X_k^S(t))$, for which Relation (30) ensures that the output rate is arbitrarily large. + +Let $X(0) = x = (x_k) \in \mathbb{N}^{n+1}$, since the last queue serves at rate $\nu$ each request, for $t \ge 0$, + +$$ +\mathbb{E}(\|X(t)\|) \le \|x\| + \lambda t - x_n (1 - e^{-\nu t}), +$$ + +where $\|x\| = x_0 + \dots + x_n$ for $x = (x_0, \dots, x_n) \in \mathbb{N}^{n+1}$. Define $t_n = 1$ and let $K_n$ be such that $\lambda t_n - K_1(1 - \exp(-\nu)) \le -1$, so that the relation + +$$ +\mathbb{E}_x(\|X(t_n)\|) - \|x\| \le -1, +$$ + +holds when $x_n \ge K_n$. + +From Equation (30) with $k=0$, one gets that there exists some $t_{n-1}$ such that for any $x_n \le K_n$, + +$$ +\nu \int_0^{t_{n-1}} \mathbb{E}_{x_n} (X_{0,n}^S(u)) du \geq \lambda t_{n-1} + 2. +$$ + +The two processes $(X_0^S(t))$ and $(X(t))$ can be built on the same probability space such that if they start from the same initial state, then the two processes $(X_{0,n}^S(t))$ and $(X_n(t))$ are identical as long as $X_{n-1}(t)$ stays positive. Since moreover the hitting time $\inf\{t \ge 0 : X_{n-1}(t) = 0\}$ goes to infinity as $x_{n-1}$ goes to infinity +---PAGE_BREAK--- + +for any $x_n \le K_n$, one gets that there exists $K_{n-1}$ such that if $x_{n-1} \ge K_{n-1}$ and $x_n < K_n$, then the relation + +$$ +\begin{align*} +\mathbb{E}_x(\|X(t_{n-1})\|) - \|x\| &= \lambda t_{n-1} - \nu \int_0^{t_{n-1}} \mathbb{E}_x(X_n(u)) du \\ +&\le \lambda t_{n-1} - \left( \nu \int_0^{t_{n-1}} \mathbb{E}_{x_n}(X_{0,n}^S(u)) du - 1 \right) \le -1 +\end{align*} +$$ + +holds. + +By induction, one gets in a similar way that there exist constants $t_n, \dots, t_0$ and $K_n, \dots, K_0$ such that for any $0 \le l \le n$, if $x_n \le K_n$, $x_{n-1} \le K_{n-1}$, $\dots$, $x_{n-l+1} \le K_{n-l+1}$ and $x_{n-l} > K_{n-l}$, then + +$$ +\mathbb{E}_x (\|X(t_{n-l})\|) - \|x\| \le -1. +$$ + +Theorem 8.13 of Robert [18] shows that (X(t)) is an ergodic Markov process. The proposition is proved. $\square$ + +**Analysis of the Two-Chunk Network.** In this subsection, one investigates the case when the monotonicity condition $\mu_1 > \cdots > \mu_{n-1} > \mu_n - \nu > 0$ fails. In general we conjecture the existence of bottlenecks which implies that the network can only accommodate a finite input rate. For instance, when $\mu_n - \nu < 0$, then it is easily seen that the network is unstable for $\lambda > \lambda^*$ where $\lambda^*$ is defined in Equation (32) below. + +The first non-trivial case occurs for $n=2$, for which the monotonicity condition breaks in two situations, either when $\mu_2 - \nu > \mu_1$ or when $\mu_2 < \nu$. The latter case can be dealt in fact with the exact same arguments as before. See Proposition 4.4. + +The actual difficulty is when $\mu_2 - \nu > \mu_1$: then the stationary behavior of $(X_2(t))$ is linked to the stationary behavior of the first saturated model $(X_1^S(t))$ defined through its Q-matrix (29). The difficulty in this case is that one needs to compare two processes which grow exponentially fast. + +**Proposition 4.3.** Assume that $\mu_2 - \nu > \mu_1$, then the first saturated process $(X_1^S(t))$ with Q-matrix defined by Equation (29) is ergodic. + +**Corollary 4.1.** If $\mu_2 - \nu > \mu_1$ and if + +$$ +\lambda_2^* \stackrel{\text{def.}}{=} \nu \mathbb{E}_{\pi^S} (X_{1,2}^S(0)), +$$ + +where $\pi^S$ is the invariant distribution of the Markov process $(X_1^S(t))$, then the process $(X(t)) = (X_k(t), k = 0, 1, 2)$ describing the linear file-sharing network with parameters $\lambda, \mu_1, \mu_2$ and $\nu$ is ergodic for $\lambda < \lambda_2^*$ and transient for $\lambda > \lambda_2^*$. + +*Sketch of Proof.* The proof of the transience when $\lambda > \lambda_2^*$ follows similarly as in Section 2: when $X_0(0)$ is large, the process $(X_1(t), X_2(t))$ can be coupled for some time with the second saturated system $(X_1^S(t))$. Since the output rate $\lambda_2^*$ of this system is smaller than the input rate $\lambda$, this implies that $(X_0(t))$ builds up, and it can indeed be shown that $X_0(t)/t$ converges almost surely to $\lambda - \lambda_2^*$. + +The ergodicity when $\lambda < \lambda_2^*$ is slightly more complicated, but it involves the same arguments as the ones employed in the proof of Proposition 4.2. The details are omitted. $\square$ +---PAGE_BREAK--- + +*Proof of Proposition 4.3.* Denote $(X_1^S(t)) = (X_{1,1}^S(t), X_{1,2}^S(t))$, then as long as the first coordinate $X_{1,1}^S$ is positive, the process $(X_1^S(t))$ has the same distribution as $(W(t), Z(t))$ introduced in Section 3: $(Z(t))$ is a Bellman-Harris process with Malthusian parameter $\mu_2 - \nu$ and $(W(t))$ is a Yule process with parameter $\mu_1$ killed at times of births of $(Z(t))$. + +By Proposition 3.5 and since $\mu_2 - \nu > \mu_1$, one has that $(X_{1,1}^S(t))$ returns infinitely often to 0. When $(X_{1,1}^S(t))$ is at 0 it jumps to 1 after an exponential time with parameter $\mu_1$, one denotes by $(E_{\mu_1,n})$ the corresponding i.i.d. sequence of successive residence times at 0. One defines the sequence $(S_n)$ by induction, $S_0 = 0$ and then + +$$S_{n+1} = \inf\{t > S_n : X_{1,1}^S(t) = 0\} + E_{\mu_1, n+1}, \quad n \ge 0.$$ + +For $n \ge 1$, $X_{1,1}^S(S_n) = 1$ and for $n \ge 0$, define $M_n \stackrel{\text{def.}}{=} X_{1,2}^S(S_n)$. With the notations of Proposition 3.5, $(X_{1,1}^S(t))$ hits 0 after a duration of $H_{0,n}$ and at that time $(X_{1,2}^S(t))$ is at $Z(H_{0,n})$ with the initial condition $Z(0) = M_n$; while $X_{1,1}^S$ is still at 0, the dynamics of $X_{1,2}^S$ is simple, since it just empties. Finally, at time $S_{n+1} = S_n + H_{0,n} + E_{\mu_1,n+1}$, $(X_{1,1}^S(t))$ returns to 1 and at this instant the location of $(X_{1,2}^S(t))$ is given by + +$$X_{1,2}^{S}(S_{n+1}) = M_{n+1} = \sum_{i=1}^{Z(H_{0,n})} 1_{\{E_{\nu,i}>E_{\mu_1,n+1}\}},$$ + +where $(E_{\nu,i})$ are i.i.d. exponential random variables with parameter $\nu$, the ith variable being the residence time of the ith request in node 2. Consequently, $(M_n, n \ge 1)$ is a Markov chain whose transitions are defined by Relation (25) with $p = \nu / (\nu + \mu_1)$; note that $(M_n, n \ge 0)$ has the same dynamics only when $X_{1,1}^S(0) = 1$. + +Define for any $K > 0$ the stopping time $T_K$ + +$$T_K = \inf\{t \ge 0 : X_{1,2}^S(t) \le K, X_{1,1}^S(t) = 1\}.$$ + +The ergodicity of $(X_1^S(t))$ will follow from the finiteness of $\mathbb{E}_{(x_1,x_2)}(T_K)$ for some $K$ large enough and for arbitrary $x = (x_1, x_2) \in \mathbb{N}^2$. The strong Markov property of $(X_1^S(t))$ applied at time $S_1$ gives + +$$\mathbb{E}_{(x_1,x_2)}(T_K) \le 2\mathbb{E}_{(x_1,x_2)}(S_1) + \mathbb{E}_{(x_1,x_2)}\left[\mathbb{E}_{(1,X_{1,2}^S(S_1))}(T_K)\right],$$ + +and so one only needs to study $T_K$ conditioned on $\{X_{1,1}^S(0) = 1\}$ since $\mathbb{E}_{(x_1,x_2)}(S_1)$ is finite in view of Proposition 3.5. + +Then, on this event and with $N_K$ defined in Proposition 3.6, the identity + +$$ (31) \qquad T_K = \sum_{i=0}^{N_K} (H_{0,i} + E_{\mu_1,i}) $$ + +holds. For $i \ge 0$, the Markov property of $(M_n, n \ge 0)$ gives + +$$ \mathbb{E}_{(x_1,x_2)}(H_{0,i} 1_{\{i \le N_K\}}) = \mathbb{E}_{(x_1,x_2)}(\mathbb{E}_{(1,M_i)}(H_0) 1_{\{i \le N_K\}}) $$ + +With the same argument as in the proof of Proposition 3.6, one has + +$$ \mathbb{E}_{(1,M_i)}(H_0) \le \mathbb{E}_{(1,0)}(H_0) < +\infty, $$ +---PAGE_BREAK--- + +with Equations (31) and (26) of Proposition (3.6), one gets that for some $\gamma > 0$ and some $K > 0$, + +$$ \mathbb{E}_{(x_1,x_2)}(T_K) \leq 2\mathbb{E}_{(x_1,x_2)}(S_1) + C \left(1 + \mathbb{E}_{(x_1,x_2)}\left[\log\left(1 + X_{1,2}^S(S_1)\right)\right]\right) $$ + +with the constant $C = (\mathbb{E}_{(1,0)}(H_0) + 1/\mu_2)/\gamma$. This last term is finite for any $(x_1, x_2)$ in view of Proposition 3.5, which proves the proposition. $\square$ + +**Proposition 4.4.** If $\nu > \mu_2$ and + +$$ (32) \qquad \lambda^* \stackrel{\text{def.}}{=} \frac{\mu_2}{(1 - \mu_2/\nu)(1 - \log(1 - \mu_2/\nu))}, $$ + +then the Markov process $(X(t)) = (X_k(t), k = 0, 1, 2)$ is transient if $\lambda > \lambda^*$ and ergodic if $\lambda < \lambda^*$. + +*Sketch of Proof.* The result for transience comes directly from the fact that the last coordinate is stochastically dominated by the birth-and-death process $(Y_1^1(t))$ of Section 2. + +As before, the arguments employed in the proof of Proposition 4.2 to prove ergodicity can also be used, for this reason they are only sketched. One has in fact to consider the following situations. + +— If there are many customers in the last queue, then the total number of customers instantaneously decreases. + +— If there are many customers in the second queue, then the last queue has time to get close to stationarity, the input rate is $\lambda$ and the output rate is $\lambda^*$. + +— Finally, if there are many customers in the first queue, then it is easily seen that the second queue builds up, since it grows like a Yule process killed at times $(\sigma_n)$ where the sequence $(\sigma_n)$ essentially grows linearly since the last queue is stable. Hence the second queue reaches high values and the last queue offers an output rate of $\lambda^*$. + +Hence when $\lambda < \lambda^*$, the Markov process $(X(t))$ is ergodic. $\square$ + +## APPENDIX A. PROOF OF PROPOSITION 3.3 + +In this appendix the notations of Section 3 are used. Since the random variable $(B_\sigma(t) | Z(0) = 0)$ is stochastically smaller than $(B_\sigma(t) | Z(0) = z)$ for any $z \in \mathbb{N}$, it is enough to show that for $\eta < \eta^*(\nu/\mu_Z)$ + +$$ \mathbb{E}_0 \left[ \sup_{t \ge \sigma_1} (e^{\eta \alpha t} B_\sigma(t)^{-\eta}) \right] < +\infty, $$ + +where $\alpha = \mu_Z - \nu > 0$. + +Note that the process $(B_\sigma(t+\sigma_1), t \ge 0)$ under $\mathbb{P}_0$ has the same distribution as $(B_\sigma(t)+1, t \ge 0)$ under $\mathbb{P}_1$, and by independence of $\sigma_1$, an exponentially random variable with parameter $\mu_Z$, and $(B_\sigma(t+\sigma_1), t \ge 0)$, one gets + +$$ \mathbb{E}_0 \left[ \sup_{t \ge \sigma_1} (e^{\eta \alpha t} B_\sigma(t)^{-\eta}) \right] = \mathbb{E}_0 (e^{\eta \alpha \sigma_1}) \mathbb{E}_1 \left[ \sup_{t \ge 0} (e^{\eta \alpha t} (B_\sigma(t) + 1)^{-\eta}) \right]. $$ + +Since $\alpha < \mu_Z$ and $\eta^*(\nu/\mu_Z) < 1$, then $\mathbb{E}_0(\exp(\eta\alpha\sigma_1))$ is finite, and all one needs to prove is that the second term is finite as well. + +Define $\tau$ as the last time $Z(t) = 0$: + +$$ \tau = \sup\{t \ge 0 : Z(t) = 0\}, $$ +---PAGE_BREAK--- + +with the convention that $\tau = +\infty$ if $(Z(t))$ never returns to 0. Recall that, because of the assumption $\mu_Z > \nu$, with probability 1, the process $(Z(t))$ returns to 0 a finite number of times. + +Conditioned on the event $\{\tau = +\infty\}$, the process $(Z(t))$ is a $(p, \lambda)$-branching process conditioned on survival, with $\lambda = \mu_Z + \nu$ and $p = \mu_Z/\lambda$. Such a branching process conditioned on survival can be decomposed as $Z = Z_{(1)} + Y$, where $(Y(t))$ is a Yule process $(Y(t))$ with parameter $\alpha$. See Athreya and Ney [3]. Consequently, for any $0 < \eta < 1$, + +$$ \mathbb{E}_1 \left[ \sup_{t \ge 0} \left( e^{\eta \alpha t} (B_\sigma(t) + 1)^{-\eta} \right) | \tau = +\infty \right] \le \mathbb{E}_1 \left[ \sup_{t \ge 0} \left( e^{\eta \alpha t} Y(t)^{-\eta} \right) \right]. $$ + +Since the nth split time $t_n$ of $(Y(t))$ is distributed like the maximum of n i.i.d. exponential random variables, $Y(t)$ for $t \ge 0$ is geometrically distributed with parameter $1 - e^{-\alpha t}$, hence, + +$$ +\begin{aligned} +\sup_{t \ge 0} \left[ e^{\eta \alpha t} \mathbb{E}_1 \left( \frac{1}{Y(t)^{\eta}} \right) \right] &= \sup_{t \ge 0} \left[ e^{-(1-\eta)\alpha t} \sum_{k \ge 1} \frac{(1-e^{-\alpha t})^{k-1}}{k^{\eta}} \right] \\ +&\le \sup_{0 \le u \le 1} \left[ (1-u)^{1-\eta} \sum_{k \ge 1} \frac{u^{k-1}}{k^{\eta}} \right]. +\end{aligned} +$$ + +For $0 < u < 1$, the relation + +$$ +\begin{aligned} +(1-u)^{1-\eta} \sum_{k \ge 1} \frac{u^{k-1}}{k^\eta} &\le (1-u)^{1-\eta} \int_0^\infty \frac{u^x}{(1+x)^\eta} dx, \\ +&= \left(\frac{1-u}{-\log u}\right)^{1-\eta} \int_0^\infty \frac{e^{-x}}{(x-\log u)^\eta} dx, +\end{aligned} +$$ + +holds, hence + +$$ \sup_{t \ge 0} \left[ e^{\eta \alpha t} \mathbb{E}_1 \left( \frac{1}{Y(t)^{\eta}} \right) \right] < +\infty. $$ + +The process $(e^{-\alpha t}Y(t))$ being a martingale, by convexity the process $(e^{\eta\alpha t}Y(t)^{-\eta})$ +is a non-negative sub-martingale. For any $\eta \in (0, 1)$ Doob's $L_p$ inequality gives the +existence of a finite $q(\eta) > 0$ such that + +$$ \mathbb{E}_1 \left[ \sup_{t \ge 0} (e^{\eta \alpha t} Y(t)^{-\eta}) \right] \le q(\eta) \sup_{t \ge 0} \left[ e^{\eta \alpha t} \mathbb{E}_1 \left( \frac{1}{Y(t)^{\eta}} \right) \right] < +\infty. $$ + +The following result has therefore been proved. + +**Lemma A.1.** For any $0 < \eta < 1$, + +$$ \mathbb{E}_1 \left[ \sup_{t \ge 0} \left( e^{\eta \alpha t} (B_{\sigma}(t) + 1)^{-\eta} \right) | \tau = +\infty \right] < +\infty. $$ +---PAGE_BREAK--- + +On the event $\{\tau < +\infty\}$, $(Z(t))$ hits a geometric number of times 0 and then couples with a $(p, \lambda)$-branching process conditioned on survival. On this event, + +$$ +\begin{align*} +& \sup_{t \ge 0} (e^{\eta \alpha t} (B_{\sigma}(t) + 1)^{-\eta}) \\ +&= \max \left( \sup_{0 \le t \le \tau} (e^{\eta \alpha t} (B_{\sigma}(t) + 1)^{-\eta}), \sup_{t \ge \tau} (e^{\eta \alpha t} (B_{\sigma}(t) + 1)^{-\eta}) \right) \\ +&\le e^{\eta \alpha \tau} \left( 1 + \sup_{t \ge 0} (e^{\eta \alpha t} (B'_{\sigma}(t) + 1)^{-\eta}) \right) +\end{align*} +$$ + +where $B'_\sigma(t)$ for $t \ge \tau$ is the number of births in $(\tau, t]$ of a $(p, \lambda)$-branching process conditioned on survival and independent of the variable $\tau$, consequently + +$$ +\mathbb{E}_1 \left[ \sup_{t \ge 0} \left( e^{\eta \alpha t} (B_\sigma(t) + 1)^{-\eta} \right) \middle| \tau < +\infty \right] \le \mathbb{E}_1 (e^{\eta \alpha \tau} | \tau < +\infty) \\ +\times \left( 1 + \mathbb{E}_1 \left[ \sup_{t \ge 0} \left( e^{\eta \alpha t} (B_\sigma(t) + 1)^{-\eta} \right) \middle| \tau = +\infty \right] \right). +$$ + +In view of Lemma A.1, the proof of Proposition 3.3 will be finished if one can prove that + +$$ +\mathbb{E}_1 (e^{\eta \alpha \tau} |\tau < +\infty) < +\infty, +$$ + +which actually comes from the following decomposition: under $\mathbb{P}_1(\cdot | \tau < +\infty)$, the +random variable $\tau$ can be written as + +$$ +\tau = \sum_{k=1}^{1+G} (T_k + E_{\mu_Z,k}) +$$ + +where G is a geometric random variable with parameter q = ν/μZ, (Tk) is an i.i.d. +sequence with the same distribution as the extinction time of a (p, λ)-branching +process starting with one particle and conditioned on extinction and (EμZ,k) are +i.i.d. exponential random variables with parameter μZ. + +Since *q* is the probability of extinction of a (*p*, *λ*)-branching process started with +one particle, *G* + 1 represents the number of times (*Z*(*t*)) hits 0 before going to +infinity. This representation entails + +$$ +\mathbb{E}_1 (e^{\eta \alpha \tau} | \tau < +\infty) = \mathbb{E} (\gamma(\eta)^{G+1}) \quad \text{where} \quad \gamma(\eta) = \mathbb{E} (e^{\eta \alpha (T_1 + E_{\mu_Z,1})}). +$$ + +A (p, λ)-branching process conditioned on extinction is actually a (1 − p, λ)-branching process. See again Athreya and Ney [3]. Thus T₁ satisfies the following recursive distributional equation: + +$$ +T_1^{\text{dist.}} := E_{\lambda} + 1_{\{\xi=2\}}(T_1 \lor T_2), +$$ + +where $\mathbb{P}(\xi = 2) = 1-p$ and $E_\lambda$ is an exponential random variable with parameter $\lambda$. This equation yields + +$$ +\P(T_1 \ge t) \le e^{-\lambda t} + 2\lambda(1-p) \int_0^t \P(T_1 \ge t-u) e^{-\lambda u} du, +$$ + +and Gronwall's Lemma applied to the function $t \mapsto \exp(\lambda t)\mathbb{P}(T_1 \ge t)$ gives that + +$$ +\P(T_1 \ge t) \le e^{(\lambda - 2\lambda_p)t} = e^{(\nu - \mu_Z)t} +$$ +---PAGE_BREAK--- + +hence for any $0 < \eta < 1$, + +$$\mathbb{E}_1(e^{\eta\alpha T_1}) \le \frac{1}{1-\eta}.$$ + +Since *G* is a geometric random variable with parameter *q*, $\mathbb{E}(\gamma(\eta)^G)$ is finite if and only if $\gamma(\eta) < q$. Since finally + +$$\gamma(\eta) = \frac{\mu_Z}{\mu_Z - \eta\alpha} \mathbb{E} (e^{\eta\alpha T_1}) \le \frac{\mu_Z}{(1-\eta)(\mu_Z - \eta\alpha)}$$ + +one can easily check that $\gamma(\eta) < q$ for $\eta < \eta^*(\nu/\mu_Z)$ as defined by Equation (19), which concludes the proof of Proposition 3.3. + +REFERENCES + +[1] David Aldous and Jim Pitman, *Tree-valued Markov chains derived from Galton-Watson processes*, Annales de l'Institut Henri Poincaré. Probabilités et Statistiques **34** (1998), no. 5, 637-686. + +[2] Gerold Alsmeyer, *On the Galton-Watson Predator-Prey Process*, Annals of Applied Probability **3** (1993), no. 1, 198-211. + +[3] K. B. Athreya and P. E. Ney, *Branching processes*, Springer, 1972. + +[4] Thomas Bonald, Laurent Massoulié, Fabien Mathieu, Diego Perino, and Andrew Twigg, *Epidemic live streaming: optimal performance trade-offs*, Proceedings of SIGMETRICS'08 (New York, NY, USA), ACM, 2008, pp. 325-336. + +[5] Maury Bramson, *Stability of queueing networks*, Lecture Notes in Mathematics, vol. 1950, Springer, Berlin, 2008, Lectures from the 36th Probability Summer School held in Saint-Flour, July 2-15, 2006. + +[6] Hong Chen and David D. Yao, *Fundamentals of queueing networks*, Springer-Verlag, New York, 2001, Performance, asymptotics, and optimization, Stochastic Modelling and Applied Probability. + +[7] T. D. Dang, R. Pereczes, and S. Molnár, *Modeling the population of file-sharing peer-to-peer networks with branching processes*, IEEE Symposium on Computers and Communications (ISCC'07) (Aveiro, Portugal), July 2007. + +[8] F.P. Kelly, *Loss networks*, Annals of Applied Probability **1** (1991), no. 3, 319-378. + +[9] J. F. C. Kingman, *The first birth problem for an age-dependent branching process.*, Annals of Probability **3** (1975), no. 5, 790-801. + +[10] L. Leskelä, *Stochastic relations of random variables and processes*, J. Theor. Probab. (2009), To appear. + +[11] Laurent Massoulié and Andrew Twigg, *Rate-optimal schemes for Peer-to-Peer live streaming*, Performance Evaluations **65** (2008), no. 11-12, 804-822. + +[12] Laurent Massoulié and Milan Vojnović, *Coupon replication systems*, Proceedings of SIGMETRICS'05 (Banff, Alberta, Canada), no. 1, June 2005, pp. 2-13. + +[13] Olle Nerman, *On the convergence of supercritical general (C-M-J) branching processes*, Z. Wahrscheinlichkeitstheorie verw. Gebiete **57** (1981), 365-395. + +[14] J. Neveu, *Erasing a branching tree*, Advances in Applied Probability (1986), no. suppl., 101-108. + +[15] R. Núñez-Queija and B. J. Prabhu, *Scaling laws for file dissemination in P2P networks with random contacts*, Proceedings of IWQoS, 2008. + +[16] Nadim Parvez, Carey Williamson, Anirban Mahanti, and Niklas Carlsson, *Analysis of bittorrent-like protocols for on-demand stored media streaming*, SIGMETRICS '08: Proceedings of the 2008 ACM SIGMETRICS international conference on Measurement and modeling of computer systems (New York, NY, USA), ACM, 2008, pp. 301-312. + +[17] Dongyu Qiu and R. Srikant, *Modeling and performance analysis of bittorrent-like peer-to-peer networks*, SIGCOMM '04: Proceedings of the 2004 conference on Applications, technologies, architectures, and protocols for computer communications (New York, NY, USA), ACM, 2004, pp. 367-378. + +[18] Philippe Robert, *Stochastic networks and queues*, Stochastic Modelling and Applied Probability Series, vol. 52, Springer, New-York, June 2003. +---PAGE_BREAK--- + +[19] Philippe Robert and Florian Simatos, Occupancy schemes associated to Yule processes, Advances in Applied Probability 41 (2009), no. 2, To Appear. + +[20] L. C. G. Rogers and David Williams, *Diffusions, Markov processes, and martingales. Vol. 2: Itô calculus*, John Wiley & Sons Inc., New York, 1987. + +[21] E. Seneta, *On the supercritical branching process with immigration*, Mathematical Biosciences 7 (1970), 9-14. + +[22] Florian Simatos, Philippe Robert, and Fabrice Guillemin, *A queueing system for modeling a file sharing principle*, Proceedings of SIGMETRICS'08 (New York, NY, USA), ACM, 2008, pp. 181-192. + +[23] Florian Simatos and Danielle Tibi, *Spatial homogenization in a stochastic network with mobility*, Annals of Applied Probability (2009), To Appear. + +[24] Riikka Susitaival, Samuli Aalto, and Jorma Virtamo, *Analyzing the dynamics and resource usage of P2P file sharing by a spatio-temporal model*, International Workshop on P2P for High Performance Computation Sciences, 2006. + +[25] David Williams, *Probability with martingales*, Cambridge University Press, 1991. + +[26] Xiangying Yang and Gustavo de Veciana, *Service capacity of peer to peer networks*, Proceedings of IEEE Infocom'04, ACM, 2004, pp. 2242-2252. + +(L. Leskelä) HELSINKI UNIVERSITY OF TECHNOLOGY, DEPARTMENT OF MATHEMATICS AND SYSTEMS ANALYSIS, PO BOX 1100, 02015 TKK, FINLAND + +*E-mail address:* lasse.leskela@iki.fi + +*URL:* http://www.iki.fi/lsl + +(Ph. Robert, F. Simatos) INRIA PARIS — ROCQUENCOURT, DOMAINE DE VOLUCEAU, BP 105, 78153 LE CHESNAY, FRANCE. + +*E-mail address:* Philippe.Robert@inria.fr + +*E-mail address:* Florian.Simatos@inria.fr + +*URL:* http://www-rocq.inria.fr/~robert \ No newline at end of file diff --git a/samples_new/texts_merged/1808935.md b/samples_new/texts_merged/1808935.md new file mode 100644 index 0000000000000000000000000000000000000000..21dda5f798a60cb323e2da649a46db0d847d62f4 --- /dev/null +++ b/samples_new/texts_merged/1808935.md @@ -0,0 +1,409 @@ + +---PAGE_BREAK--- + +# Ostensive Automatic Schema Mapping for Taxonomy-based Peer-to-Peer Systems + +Yannis Tzitzikas¹ and Carlo Meghini + +Istituto di Scienza e Tecnologie dell' Informazione [ISTI] +Consiglio Nazionale delle Ricerche [CNR], Pisa, Italy +Email: {tzitzik|meghini}@iei.pi.cnr.it + +**Abstract** This paper considers Peer-to-Peer systems in which peers employ taxonomies for describing the contents of their objects and for formulating semantic-based queries to the other peers of the system. As each peer can use its own taxonomy, peers are equipped with inter-taxonomy mappings in order to carry out the required translation tasks. As these systems are ad-hoc, the peers should be able to create or revise these mappings on demand and at run-time. For this reason, we introduce an ostensive data-driven method for automatic mapping and specialize it for the case of taxonomies. + +## 1 Introduction + +There is a growing research interest on peer-to-peer systems like Napster, Gnutella, FreeNet and many others. A peer-to-peer (P2P) system is a distributed system in which participants (the peers) rely on one another for service, rather than solely relying on dedicated and often centralized servers. Many examples of P2P systems have emerged recently, most of which are wide-area, large-scale systems that provide content sharing [4], storage services [19], or distributed "grid" computation [2, 1]. Smaller-scale P2P systems also exist, such as federated, server-less file systems [10, 7] and collaborative workgroup tools [3]. + +Existing peer-to-peer (P2P) systems have focused on specific application domains (e.g. music file sharing) or on providing file-system-like capabilities. These systems do not yet provide semantic-based retrieval services. In most of the cases, the name of the object (e.g. the title of a music file) is the only means for describing the contents of the object. Semantic-based retrieval in P2P systems is a great challenge. In general, the language that can be used for indexing the objects of the domain and for formulating semantic-based queries, can be *free* (e.g natural language) or *controlled*, i.e. object descriptions and queries may have to conform to a specific vocabulary and syntax. The first case, resembles distributed Information Retrieval (IR) systems and this approach is applicable in the case where the objects of the domain have a textual content (e.g. see + +¹ Work done during the postdoctoral studies of the author at CNR-ISTI as an ERCIM fellow. +---PAGE_BREAK--- + +[23]). In this paper we focus on the second case where the objects of a peer are indexed according to a specific conceptual model represented in a data model (e.g. relational, object-oriented, logic-based, etc), and content searches are formulated using a specific query language. This approach, which can be called "database approach", starts to receive noteworthy attention by the researchers, as is believed that the database and knowledge base research has much to contribute to the P2P grand challenge through its wealth of techniques for sophisticated semantics-based data models and query processing techniques (e.g. see [14, 9, 18, 15, 32]). A P2P system might impose a single conceptual model on all participants to enforce uniform, global access, but this will be too restrictive. Alternatively, a limited number of conceptual models may be allowed, so that traditional information mediation and integration techniques will likely apply (with the restriction that there is no central authority). The case of fully heterogeneous conceptual models makes uniform global access extremely challenging and this is the case that we are interested in. + +The first and basic question that we have to investigate is which conceptual modeling approach is appropriate for the P2P paradigm. We would like a scalable conceptual modeling approach which also allows bridging the various kinds of heterogeneity in a systematic and easy manner. As there are no central servers, or mediators, each participating source must have (or be able to create) *mappings*, or articulations, between its conceptual model and the conceptual models of its neighbors in order to be able to translate the received queries to queries that can be understood (and thus answered) by the recipient sources. These mapping could be established manually (as in the case of Semantic Web [8]) but the more appropriate approach for a P2P network, and the more challenging, is the *automatic mapping*. For all these reasons, a simple, conceptually clear, and application-independent conceptual modeling approach seems to be advantageous. + +In this paper we consider the case where peers employ *taxonomies*. Note that it is quite easy to create a taxonomy for a source or a mediator. Even ordinary Web users can design this kind of conceptual model. Taxonomies can be constructed either from scratch, or by extracting them from existing taxonomies (e.g. from the taxonomy of Yahoo! or ODP) using special-purpose languages and tools (e.g. see [30]). Furthermore, the design of taxonomies can be done more systematically if done following a faceted approach (e.g. see [27, 26]). In addition, thanks to techniques that have emerged recently [31], taxonomies of compound terms can be also defined in a flexible and systematic manner. However, the more important for P2P systems, advantage of taxonomies is that their simplicity and modeling uniformity allows integrating the contents of several sources without having to tackle complex structural differences. Indeed, as it is shown in [32], inter-taxonomy mappings offer a *uniform* method for bridging *naming, contextual and granularity* heterogeneities between the taxonomies of the sources. Given this conceptual modeling approach, a mediator does not have to tackle complex structural differences between the sources, as it happens with relational mediators (e.g. see [22, 21]) and Description Logics-based medi- +---PAGE_BREAK--- + +ators (e.g. see [17, 11]). Moreover, it allows the integration of *schema* and *data* in a uniform manner. Another advantage of this conceptual modeling approach is that query evaluation in taxonomy-based sources and mediators can be done efficiently (polynomial time). + +In this paper we introduce a data-driven method for automatic taxonomy articulation. We call this method *ostensive* because the meaning of each term is explained by ostension, i.e. by pointing to something (here, to a set of objects) to which the term applies. For example, the word "rose" can be defined ostensively by pointing to a rose and saying "that is a rose". Instead, the verbal methods of term definition (e.g. the synonyms or the analytic method) presuppose that the learner already knows some other terms and, thus, they are useless to someone who does not know these terms; e.g. verbal word definitions are useless to a small child who has not learnt any words at all. + +Specifically, in this paper we describe an ostensive articulation method that can be used for articulating both single terms and queries, and it can be implemented efficiently by a communication protocol. However, ostensive articulation is possible in a P2P system only if the domain of the peers is not disjoint. If it is disjoint then we cannot derive any articulation. This problem can be tackled by employing *reference collections*. For instance, each peer can have its own taxonomy, but before joining the network it must first index the objects of a small reference object set. Consequently, peers can build automatically the desired articulations by running the articulation protocol on this reference collection. + +The rest of this paper is organized as follows: Section 2 introduces a general formal framework for ostensive articulation. Section 3 specializes and describes ostensive articulation for taxonomy-based sources. Section 4 discusses the application of ostensive articulation in P2P systems of taxonomy-based sources, and finally, Section 5 concludes the paper. + +## 2 Ostensive Articulation + +Let us first introduce the general framework. We view a source $S$ as a function $S: Q \to \mathcal{A}$ where $Q$ is the set of all queries that $S$ can answer, and $\mathcal{A}$ is the set of all answers, i.e. $\mathcal{A}=\{S(q) | q \in Q\}$. As we focus on retrieval queries, we assume that $\mathcal{A}$ is a subset of $\mathcal{P}(Obj)$ where `Obj` is the set of all objects stored at the source. + +The ostensive articulation technique that we shall introduce requires a "naming service", i.e. a method for computing one (or may more) name (e.g. query) for each set of objects $R \subseteq Obj$. Let $Q_N$ denote the set of all names. In general, $Q_N = Q$, however we introduce $Q_N$ because we may want names to be queries of a specific form. For supporting the naming service we would like a function $n: \mathcal{P}(Obj) \to Q_N$ such that for each $R \subseteq Obj$, $S(n(R)) = R$. Having such a function, we would say that $n(R)$ is an exact name for $R$. Note that if $S$ is an onto function and $Q_N = Q$, then the naming function $n$ coincides with the inverse relation of $S$, i.e. with the relation $S^{-1}: \mathcal{P}(Obj) \to Q$. However, this +---PAGE_BREAK--- + +is not always the case, as more often than not, *S* is not an onto function, i.e. *A* ⊂ *P*(Obj). For this reason we shall introduce two naming functions, a lower naming function *n*⁻ and an upper naming function *n*⁺. To define these functions, we first need to define an ordering over queries. Given two queries, *q* and *q'* in *Q*, we write *q* ≤ *q'* if *S(q) ⊆ S(q')*, and we write *q* ∼ *q'*, if both *q* ≤ *q'* and *q'* ≤ *q* hold. Note that ∼ is an equivalence relation over *Q*, and let *Q~* denote the set of equivalence classes induced by ∼ over *Q*. Note that ≤ is a partial order over *Q~*. + +Now we can define the function $n^-$ and $n^+$ as follows: + +$$ +\begin{align*} +n^{-}(R) &= \text{lub}\{\, q \in Q_{N} \mid S(q) \subseteq R \} \\ +n^{+}(R) &= \text{glb}\{\, q \in Q_{N} \mid S(q) \supseteq R \} +\end{align*} +$$ + +where $R$ is any subset of $Obj$. Now let $R$ be a subset of $Obj$ for which both $n^{-}(R)$ and $n^{+}(R)$ are defined (i.e. the above lub and glb exist). It is clear that in this case it holds: + +$$ +S(n^{-}(R)) \subseteq R \subseteq S(n^{+}(R)) +$$ + +and that $n^-(R)$ and $n^+(R)$ are the best "approximations" of the exact name of $R$. Note that if $S(n^-(R)) = S(n^+(R))$ then both $n^-(R)$ and $n^+(R)$ are exact names of $R$. + +If $Q_N$ is a query language that (a) supports disjunction ($\vee$) and conjunction ($\wedge$) and is closed with respect to these, and (b) has a top ($\top$) and a bottom ($\bot$) element such that $S(\top) = Obj$ and $S(\bot) = \emptyset$, then the functions $n^-$ and $n^+$ are defined for every subset $R$ of $Obj$. Specifically, in this case $(Q_\sim, \le)$ is a complete lattice, thus these functions are defined as: + +$$ +\begin{align*} +n^{-}(R) &= \bigvee \{ q \in Q_{N} \mid S(q) \subseteq R \} \\ +n^{+}(R) &= \bigwedge \{ q \in Q_{N} \mid S(q) \supseteq R \} +\end{align*} +$$ + +As $Q_N$ is usually an infinite language, $n^-(R)$ and $n^+(R)$ are queries of infinite length. This means that in practice we also need for a method for computing a query of finite length that is equivalent to $n^-(R)$ and another one that is equivalent to $n^+(R)$. + +If however $Q_N$ does not satisfy the above ((a) and (b)) conditions, then $n^-(R)$ and $n^+(R)$ may not exist. For example, this happens if we want to establish relationships between single terms of two taxonomy-based sources, or between atomic concepts of two Description Logics-based sources. For such cases, we can define $n^-$ and $n^+$ as follows: + +$$ +\begin{align*} +n^{-}(R) &= \max\{ q \in Q_{N} \mid S(q) \subseteq R \} \\ +n^{+}(R) &= \min\{ q \in Q_{N} \mid S(q) \supseteq R \} +\end{align*} +$$ + +where max returns the maximal element(s), and min the minimal(s). Clearly, in this case we may have several lower and upper names for a given R. + +We can now proceed and describe the ostensive articulation. Consider two sources $S_i : Q_i \to P(Obj_i)$, and $S_j : Q_j \to P(Obj_j)$. Ostensive articulation is +---PAGE_BREAK--- + +possible only if their domains are not disjoint, i.e. if $Obj_i \cap Obj_j \neq \emptyset$. Let $C$ denote their common domain, i.e. $C = Obj_i \cap Obj_j$. The method that we shall describe yields relationships that are extensionally valid in $C$. + +Suppose that $S_i$ wants to establish an articulation $a_{i,j}$ to a source $S_j$. An articulation $a_{i,j}$ can contain relationships of the form: + +(i) $q_i \geq q_j$, + +(ii) $q_i \leq q_j$ + +where $q_i \in Q_i$, $q_j \in Q_j$. These relationships have the following meaning: + +(i) $q_i \geq q_j$ means that $S_i(q_i) \cap C \supseteq S_j(q_j) \cap C$ + +(ii) $q_i \leq q_j$ means that $S_i(q_i) \cap C \subseteq S_j(q_j) \cap C$ + +Before describing ostensive articulation let us make a couple of remarks. The first is that the form (i or ii) of the relationships of an articulation depends on the internal structure and functioning of the source that uses the articulation. For instance, suppose that $S_i$ acts as a mediator over $S_j$. If $S_i$ wants to compute complete (with respect to $C$) answers, then it should use only relationships of type (i) during query translation. On the other hand, if $S_i$ wants to compute sound (with respect to $C$) answers then it should use relationships of type (ii) (e.g. see [21]). + +Another interesting remark is that if $S_i$ is a mediator that adopts a global-as-view modeling approach, then all $q_i$ that appear in $a_{i,j}$ are primitive concepts. On the other hand, if $S_i$ adopts a local-as-view approach then all $q_j$ that appear in $a_{i,j}$ are primitive concepts of $S_j$. + +Below we describe ostensive articulation for the more general case where $S_i$ is interested in relationships of both, (i) and (ii), types, and where $q_i, q_j$ can be arbitrary queries. Let $n_j^-$ and $n_j^+$ be the naming functions of $S_j$ as defined earlier. Also let $S_i^c(q) = S_i(q) \cap C$ and $S_j^c(q) = S_j(q) \cap C$. Now suppose that $S_i$ wants to articulate a query $q_i \in Q_i$. The query $q_i$ should be articulated as follows: + +$$ +\begin{aligned} +-q_i &\ge n_j^-(S_i^c(q_i)) && \text{if } S_i^c(q_i) \supseteq S_j^c(n_j^-(S_i^c(q_i))) \\ +-q_i &\le n_j^-(S_i^c(q_i)) && \text{if } S_i^c(q_i) \subseteq S_j^c(n_j^-(S_i^c(q_i))) \\ +-q_i &\ge n_j^+(S_i^c(q_i)) && \text{if } S_i^c(q_i) \supseteq S_j^c(n_j^+(S_i^c(q_i))) \\ +-q_i &\le n_j^+(S_i^c(q_i)) && \text{if } S_i^c(q_i) \subseteq S_j^c(n_j^+(S_i^c(q_i))) +\end{aligned} + $$ + +Observe the role of the naming functions. $S_j$ instead of checking all queries in $Q_j$, it just uses its naming functions in order to compute the lower and the upper name of the set $S_i(q_i) \cap C$. Recall that the naming functions (by definition) return the most precise (semantically close) mapping for $q_i$, thus this is all that we need. + +Furthermore, as we shall see below, the above relationships can be obtained without extensive communication. In fact, they can be obtained by a quite simple and efficient (in terms of exchanged messages) distributed protocol. The protocol +---PAGE_BREAK--- + +$$S_i: \begin{array}{l} (1) A := S_i(q_i); \\ (2) \n\end{array}$$ + +$$S_j: \begin{array}{l} (3) F := A \setminus Obj_j \\ (4) A := A \cap Obj_j; \\ (5) down := n_j^-(A); Bdown := S_j(\text{down}); \\ (6) up := n_j^+(A); Bup := S_j(\text{up}); \\ (7) \end{array}$$ + +$$S_i: \begin{array}{l} (8) \text{If } (A \setminus F) \supseteq (Bdown \cap Obj_i) \text{ then set } q_i \geq \text{down}; \\ (9) \text{If } (A \setminus F) \subseteq (Bdown \cap Obj_i) \text{ then set } q_i \leq \text{down}; \\ (10) \text{If } (A \setminus F) \supseteq (Bup \cap Obj_i) \text{ then set } q_i \geq \text{up}; \\ (11) \text{If } (A \setminus F) \subseteq (Bup \cap Obj_i) \text{ then set } q_i \leq \text{up} \end{array}$$ + +Fig. 1. The ostensive articulation protocol + +is sketched in Figure 1. Note that only two messages have to be exchanged between $S_i$ and $S_j$ for articulating the query $q_i$. + +Another interesting point is that $S_i$ and $S_j$ do not have to a-priori know (or compute) their common domain $C$, as $C$ is "discovered" during the run of the protocol (this is the reason why $S_j$ stores in $F$ and sends to $S_i$ those terms that do not belong to $Obj_j$). + +In the case where $Q_N \subset Q$, the only difference is that the message that $S_j$ sends to $S_i$ may contain more than one *up* and *down* queries. + +A source can run the above protocol in order to articulate one, several or all of its terms (or queries). + +## 3 Ostensive Articulation for Taxonomy-based Sources + +Here we shall specialize ostensive articulation for the case of taxonomy-based sources. Examples of this kind of sources include Web Catalogs (like Yahoo!, Open Directory) and Classification Schemes used in Library and Information Science + +We view a taxonomy-based source $S$ as a quadruple $S = (T, \preceq, I, Q)$ where: + +- $T$ is a finite set of names called *terms*, e.g. **Caranies**, **Birds**. + +- $\preceq$ is a reflexive and transitive binary relation over $T$ called *subsumption*, e.g. **Canaries** $\preceq$ **Birds**. + +- $I$ is a function $I: T \to P(Obj)$ called *interpretation* where *Obj* is a finite set of objects. For example *Obj* = {1, ..., 100} and $I(\mathbf{Canaries}) = \{1, 3, 4\}$. + +- $Q$ is the set of all queries defined by the grammar $q ::= t | q^\wedge q' | q^\vee q' | \neg q | (q)$ where $t$ is a term in $T$. + +Figure 2 shows an example of a source consisting of 8 terms and 3 objects². + +We assume that every terminology $T$ also contains two special terms, the *top term*, denoted by $\top$, and the *bottom term*, denoted by $\bot$. The top term subsumes + +² We illustrate only the Hasse diagram of the subsumption relation. +---PAGE_BREAK--- + +**Fig. 2.** Graphical representation of a source + +every other term *t*, i.e. *t* ≲ ⊤. The bottom term is strictly subsumed by every +other term *t* different than top and bottom, i.e. ⊥ ≲ ⊥, ⊥ ≲ ⊤, and ⊥ ≺ *t*, +for every *t* such that *t* ≠ ⊤ and *t* ≠ ⊥. We also assume that *I*(⊥) = ∅ in every +interpretation *I*. + +The answer $S(q)$ of a query $q$ is defined as follows (for more see [33]): + +$$ +\begin{align*} +S(t) &= \bigcup \{ I(t') \mid t' \preceq t \} \\ +S(q \land q') &= S(q) \cap S(q') \\ +S(q \lor q') &= S(q) \cup S(q') \\ +S(\neg q) &= \mathit{Obj} \setminus S(q) +\end{align*} +$$ + +For example, in Figure 2 we have $S(\text{DB}) = \{\text{1,2}\}$, as $S(\text{DB}) = I(\text{DB}) \cup I(\text{Databases}) \cup I(\text{RDB}) = \{\text{1,2}\}$, and $S(\text{DB} \land \text{JournalArticle}) = \{\text{1}\}$. We define the *index* of an object *o* with respect to an interpretation *I*, denoted by $D_I(o)$, as follows: $D_I(o) = \bigwedge \{t \in T \mid o \in I(t)\}$. For example, in the source of Figure 2 we have $D_I(3) = \text{JournalArticle}$ and $D_I(1) = \text{RDB} \land \text{JournalArticle}$. + +Let us now define the naming functions for this kind of sources. We define the +set of names $Q_N$ as follows: $Q_N = \{ q \in Q \mid q \text{ does not contain negation "¬"} \}$. +We exclude queries with negation because, as showed in [32], if such queries +appear in articulations then we may get systems which do not have a unique +minimal model and this makes query evaluation more complicated and less effi- +cient. + +The lower and upper name of a set $R \subseteq Obj$ are defined as in the general +framework and clearly ($Q_N, \leq$) is a complete lattice. What remains is to find +finite length queries that are equivalent to $n^-(R)$ and $n^+(R)$. + +**Theorem 1.** + +$$ +\begin{align*} +n^{-}(R) &\sim \bigvee \{ D_{I}(o) \mid o \in R, S(D_{I}(o)) \subseteq R \} \\ +n^{+}(R) &\sim \bigvee \{ D_{I}(o) \mid o \in R \} +\end{align*} +$$ + +The proof is given in [34]. It is clear that the above queries have finite length, +hence they are the queries that we are looking for. For this purpose, hereafter +we shall use $n^-(R)$ and $n^+(R)$ to denote the above queries. Note that if the +set $\{o \in R, S(D_I(o)) \subseteq R\}$ is empty then we consider that $n(R)^- = \perp$. Some +examples from the source shown in Figure 3 follow: +---PAGE_BREAK--- + +Fig. 3. Example of a source + +$$ +\begin{align*} +n^+({1,3}) &= (\text{tomatoes} \land \text{red}) \lor (\text{apples} \land \text{green}) \\ +n^-({1,3}) &= (\text{tomatoes} \land \text{red}) \lor (\text{apples} \land \text{green}) \\ +n^+({1,3,5}) &= (\text{tomatoes} \land \text{red}) \lor (\text{apples} \land \text{green}) \lor (\text{apples} \land \text{red}) \\ +n^-({1,3,5}) &= (\text{tomatoes} \land \text{red}) \lor (\text{apples} \land \text{green}) +\end{align*} +$$ + +Let us now demonstrate the articulation protocol for taxonomy-based sources. +Consider the sources shown in Figure 4 and suppose that $S_1$ wants to articulate +its terms with queries of $S_2$. In the following examples we omit the set $F$ (from +the message of line (7) of Figure 1) as it is always empty. + +Fig. 4. An example of two sources S₁ and S₂ + +The steps for articulating the term **cabbages** follow: + +$$ +\begin{array}{l} +S_1 \rightarrow S_2 : \{\text{1}\} \\ +S_2 \rightarrow S_1 : (\bot, \emptyset), (\mathbf{green}, \{1,5,6\}) \\ +S_1 : \mathbf{cabbages} \preceq \mathbf{green} +\end{array} + $$ + +The steps for articulating the term apples follow: + +$$ +\begin{array}{l} +S_1 \rightarrow S_2 : \{\mathbf{4}, \mathbf{5}\} \\ +S_2 \rightarrow S_1 : (\bot, \emptyset), (\mathbf{red} \lor \mathbf{green}, \{\mathbf{1}, \mathbf{2}, \mathbf{3}, \mathbf{4}, \mathbf{5}, \mathbf{6}\}) \\ +S_1 : \mathbf{apples} \preceq \mathbf{red} \lor \mathbf{green} +\end{array} + $$ + +The steps for articulating the term foods follow: +---PAGE_BREAK--- + +$$ +\begin{array}{l@{\quad}l} +S_1 \to S_2 & : \{1,2,3,4,5,6,7\} \\ +S_2 \to S_1 & : (\text{red} \lor \text{green}, \{1,2,3,4,5,6\}), \\ +& (\text{red} \lor \text{green} \lor \text{yellow}, \{1,2,3,4,5,6,7,8\}) \\ +S_1 & : \text{foods} \succeq \text{red} \lor \text{green}, \\ +& \qquad \text{foods} \sim \text{red} \lor \text{green} \lor \text{yellow} +\end{array} +$$ + +If $S_1$ runs the protocol for each term of its taxonomy, it will infer the following relationships: + +cabbages $\preceq$ green +tomatoes $\preceq$ red +apples $\preceq$ red $\vee$ green +bananas $\preceq$ green $\vee$ yellow +vegetables $\preceq$ green $\vee$ red +fruits $\preceq$ red $\vee$ green $\vee$ yellow +foods $\succeq$ red $\vee$ green +foods $\sim$ red $\vee$ green $\vee$ yellow + +If $S_2$ runs this protocol for each term of its taxonomy, it will infer the following relationships: + +red $\succeq$ tomatoes +red $\preceq$ tomatoes $\vee$ apples +green $\succeq$ cabbages +green $\preceq$ cabbages $\vee$ apples $\vee$ bananas +yellow $\preceq$ bananas +color $\sim$ cabbages $\vee$ tomatoes $\vee$ apples $\vee$ bananas + +The protocol can be used not only for articulating single terms to queries, but also for articulating queries to queries. For example, the steps for articulating the query apples $\lor$ bananas follow: + +$$ +\begin{array}{l} +S_1 \to S_2 : \{4, 5, 6, 7\} \\ +S_2 \to S_1 : (\text{red} \lor \text{green} \lor \text{yellow}, \{1,2,3,4,5,6,7,8\}) \\ +S_1 : \text{apples} \lor \text{bananas} \preceq \text{red} \lor \text{green} \lor \text{yellow} +\end{array} +$$ + +Now consider the case where we do not want to articulate terms with queries, but terms with *single terms* only, i.e. consider the case where $Q_N = T$. Note that now $lub\{t \in T | S(t) \subseteq R\}$ and $glb\{t \in T | S(t) \supseteq R\}$ do not always exist. For example, consider the source shown in Figure 5.(a). Note that $n^+(\{1\}) = glb\{t, t'\}$ which does not exist. For the source shown in Figure 5.(b) note that $n^-(\{1,2\}) = lub\{t,t'\}$ which does not exist. Therefore, we can define the upper and lower names of a set $R$ as follows: $n^-(R) = max(\{t \in T | S(t) \subseteq R\})$ and $n^+(R) = min(\{t \in T | S(t) \supseteq R\})$. Consider for example the source shown in Figure 5.(c). Here we have: + +$$ n^{-}(\{1, 2, 3\}) = max(\{c, d, e, b\}) = \{b\} $$ + +$$ n^{+}(\{1, 2, 3\}) = min(\{b, a\}) = \{b\} $$ +---PAGE_BREAK--- + +Fig. 5. An example of three sources + +Certainly, the relationships obtained by the term-to-term articulation are less expressive than the relationships obtained by the term-to-queries articulation. For instance, suppose that we want to articulate the terms of the source $S_1$ in each one of the three examples that are shown in Figure 6. Table 1 shows the articulation $a_{1,2}$ that is derived by the *term-to-term* articulation and the *term-to-queries* articulation in each of these three examples. + +Fig. 6. Three examples + +
Example$a_{1,2}$
term-to-term art.term-to-query art.
Figure 6.(a)$a \supseteq b$
$a \supseteq b'$
$a \sim b \lor b'$
Figure 6.(b)$a \preceq b$
$a \preceq b'$
$a \sim b \land b'$
$a' \preceq b \lor b'$
Figure 6.(c)$a \preceq b \lor b'$
$a' \preceq b \lor b'$
+ +**Table 1.** Term-to-term vs term-to-query articulation + +# 4 Ostensive Articulation in Taxonomy-based P2P Systems + +We demonstrated how ostensive articulation can be applied on taxonomy-based +sources for constructing inter-taxonomy articulations. Ostensive articulation is +---PAGE_BREAK--- + +possible in a P2P system only if the domain of the peers is not disjoint. We also assume that every object of *Obj* has the same identity (e.g. object identifier, URI) in all sources. For domains where no accepted identity/naming standards exist, mapping tables such as the ones proposed in [18] can be employed to tackle this problem. Also techniques from the area of information fusion (that aim at recognizing different objects that represent the same reality) could be also employed for the same purpose. If however the domain of the peers is disjoint then we cannot derive any articulation. One method to tackle this problem is to employ reference collections. For instance, each peer can have its own taxonomy, but before joining the network it must first index the objects of a small object set. Consequently, peers can build automatically the desired articulations by running the articulation protocol on this reference collection. Running the protocol on the reference collection *C* means that the sources $S_1$ and $S_2$ instead of using $S_1(q_1)$ and $S_2(q_2)$, they use $S_1(q_1) \cap C$ and $S_2(q_2) \cap C$ respectively. Also note that the employment of reference collections can: (a) enhance the accuracy of the resulting articulation, and/or (b) enhance efficiency. For instance, if *C* corresponds to a well known, thus well-indexed set of objects then it can improve the quality of the obtained articulations. For example in the case where $S_1$ and $S_2$ are bibliographic sources, *C* can be a set of 100 famous papers in computer science. A reference collection can also enhance the efficiency of the protocol since a smaller number of objects go back and forth. This is very important, especially in P2P where involved sources are distant. + +In a P2P system of taxonomy-based sources, a source apart from object queries now accepts content-based queries, i.e. queries (e.g. boolean expressions) expressed in terms of its taxonomy. For answering a query a source may have to query the neighbor sources. The role of articulations during query evaluation has been described in [33] (for the mediator paradigm) and in [32] (for the P2P paradigm). Roughly, a source in a P2P network can serve any or all of the following roles: primary source, mediator, and query initiator. As a *primary* source it provides original content to the system and is the authoritative source of that data. Specifically, it consists of a taxonomy (i.e. a pair (*T*, $\le$)) plus an object base (i.e. an interpretation *I*) that describes a set of objects (*Obj*) in terms of the taxonomy. As a *mediator* it has a taxonomy but does not store or provide any content: its role is to provide a uniform query interface to other sources, i.e. it forwards the received queries after first selecting the sources to be queried and formulating the query to be sent to each one of them. These tasks are determined by the articulations of the mediator. As a *query initiator* it acts as client in the system and poses new queries. Figure 7 sketches graphically the architecture of a network consisting of four peers $S_1, ..., S_4$; two primary sources ($S_3$ and $S_4$), one mediator ($S_2$) and one source that is both primary and mediator ($S_1$). Triangles denote taxonomies, cylinders object bases, and circles inter-taxonomy mappings. $S_2$ is a mediator over $S_1, S_3$ and $S_4$, while $S_1$ is a mediator over $S_2$ and $S_3$. For more about this architecture and the associated semantics and query evaluation methods please refer to [32]. +---PAGE_BREAK--- + +Fig. 7. A P2P network based on taxonomies and inter-taxonomy mappings + +5 Conclusion + +The contribution of this paper is a formal framework for ostensive data-driven articulation. Roughly, the approaches for linking two conceptual models or tax-onomies can be broadly classified as either *model*-driven or *data*-driven. + +The model-driven approach starts with a (theoretical) model of how the two taxonomies are constructed and how they are used. Subsequently, the mapping approaches have to address the compatibility, structural and semantic differences and heterogeneities that exist. This is done using software tools (that usually rely on lexical resources) that assist the designer during the articulation process (e.g. see [25, 29, 5, 24]). + +On the other hand, in the *data-driven* approach the mappings are *discovered* by examining how terms are used in indexing the objects. The advantage of such an approach is that it does not make any assumptions on how the two taxonomies are constructed, or how they are used. All it requires is the presence of two databases that contain several objects in common. However, the data-driven approach does have inherent difficulties. First, unless one has a large collection of objects that have been indexed using *both* taxonomies, spurious correlation can result in inappropriate linking. Second, if a term is not assigned to any of the common objects, one cannot establish a link for that term. Third, rarely occurring terms can result in statistically insignificant links. Finally, the validation of data-driven approaches can only be statistical in nature. In spite of these inherent difficulties, data-driven approaches can be formalized and automated. However, most of the data-driven approaches that can be found in the literature are applicable only if the domain is a set of documents (texts) (e.g. [6, 16, 12, 20, 28]), and they cannot establish mappings between queries. + +The technique described in this paper is quite general and expressive as it can be used for articulating not only single terms but also queries. Furthermore, it can be used for articulating the desired set of terms or queries (it is not obligatory to articulate the entire taxonomies). Another distinctive feature of this technique is that it can be implemented efficiently by a communication protocol, thus the involved sources do not have to reside on the same machine. Therefore it seems appropriate for automatic articulation in P2P systems which is probably the more challenging issue in P2P computing [9]. + +We also demonstrated how it can be applied to taxonomy-based sources. An interesting remark is that the proposed method can be applied not only to manually constructed taxonomies but also to taxonomies derived automatically on the basis of an inference service. For instance, it can be applied on sources +---PAGE_BREAK--- + +indexed using taxonomies of compound terms which are defined algebraically [31]. Furthermore it can be applied on concept lattices formed using Description Logics (DL) [13]. + +One issue for further research, is to investigate how a source that wants to articulate a set $F \subseteq Q$ must use the described protocol in order to obtain the desired articulation with the minimal number of exchanged messages and the less network throughput. Another issue for further research is to investigate ostensive articulation for other kinds of sources. + +## Acknowledgements + +The first author wants to thank his wife Tonia for being an endless source of happiness and inspiration. + +## References + +1. "About LEGION - The Grid OS" (www.appliedmeta.com/legion/about.html), 2000. + +2. "How Entropia Works" (www.entropia.com/how.asp), 2000. + +3. "Groove" (www.groove.net), 2001. + +4. "Napster" (www.naptster.com), 2001. + +5. Bernd Amann and Irini Fundulaki. "Integrating Ontologies and Thesauri to Build RDF Schemas". In *Proceedings of the Third European Conference for Digital Libraries ECDL '99*, Paris, France, 1999. + +6. S. Amba. "Automatic Linking of Thesauri". In *Proceeding of SIGIR '96*, Zurich, Switzerland, 1996. ACM Press. + +7. T.E. Anderson, M. Dahlin, J. M. Neefe, D. A. Patterson, D. S. Roselli, and R. Wang. "Serveless Network File Systems". *SOSP*, 29(5), 1995. + +8. Tim Berners-Lee, James Hendler, and Ora Lassila. "The Semantic Web". *Scientific American*, May 2001. + +9. Philip A. Bernstein, F. Giunchiglia, A. Kementsietsidis, J. Mylopoulos, L. Serafini, and I. Zaihrayeu. "Data Management for Peer-to-Peer Computing: A Vision". In *Proceedings of WebDB02*, Madison, Wisconsin, June 2002. + +10. W. J. Bolosky, J. R. Douceur, D. Ely, and M. Theimer. "Feasibility of a Serveless Distributed File System Deployed on an Existing Set of Desktop PCs". In *Proceedings of Measurement and Modeling of Computer Systems*, June 2000. + +11. Diego Calvanese, Giuseppe De Giacomo, and Maurizio Lenzerini. A framework for ontology integration. In *Proc. of the 2001 Int. Semantic Web Working Symposium (SWWS 2001)*, pages 303-316, 2001. + +12. A. Doan, J. Madhavan, P. Domingos, and A. Halevy. "Learning to Map between Ontologies on the Semantic Web". In *Proceedings of the World-Wide Web Conference (WWW-2002)*, 2002. + +13. F.M. Donini, M. Lenzerini, D. Nardi, and A. Schaerf. "Reasoning in Description Logics", chapter 1. CSLI Publications, 1997. + +14. Steven Gribble, Alon Halevy, Zachary Ives, Maya Rodrig, and Dan Suiu. "What can Databases do for Peer-to-Peer?". In *Proceedings of WebDB01*, Santa Barbara, CA, 2001. +---PAGE_BREAK--- + +15. Alon Halevy, Zachary Ives, Peter Mork, and Igor Tatarinov. "Piazza: Data Management Infrastructure for Semantic Web Applications". In *Proceedings of WWW'2003*, May 2003. + +16. Heiko Helleg, Jurgen Krause, Thomas Mandl, Jutta Marx, Matthias Muller, Peter Mutschke, and Robert Strogen. "Treatment of Semantic Heterogeneity in Information Retrieval". Technical Report 23, Social Science Information Centre, May 2001. (http://www.gesis.org/en/publications/reports/iz working papers/). + +17. Vipul Kashyap and Amit Sheth. "Semantic Heterogeneity in Global Information Systems: the Role of Metadata, Context and Ontologies". In *Cooperative Information Systems: Trends and Directions*. Academic Press, 1998. + +18. A. Kementsietsidis, Marcelo Arenas, and Rene J. Miller. "Mapping Data in Peer-to-Peer Systems: Semantics and Algorithmic Issues". In *Int. Conf. on Management of Data, SIGMOD'2003*, San Diego, California, June 2003. + +19. J. Kubiatowicz, D. Bindel, Y. Chen, S. Czerwinski, P. Eaton, D. Geels, R. Gum-madi, S. Rhea, H. Weatherspoon, W. Weimer, C. Wells, and B. Zhao. "Oceanstore: An Architecture for Global-Scale Persistent Storage". In *ASPLOS*, November 2000. + +20. M. Lacher and G. Groh. "Facilitating the Exchange of Explicit Knowledge Through Ontology Mappings". In *Proceedings of the 14th Int. FLAIRS Conference*, 2001. + +21. Maurizio Lenzerini. Data integration: A theoretical perspective. In *Proc. ACM PODS 2002*, pages 233–246, Madison, Wisconsin, USA, June 2002. + +22. Alon Y. Levy. "Answering Queries Using Views: A Survey". *VLDB Journal*, 2001. + +23. Bo Ling, Zhiguo Lu, Wee Siong Ng, BengChin Ooi, Kian-Lee Tan, and Aoying Zhou. "A Content-Based Resource Location Mechanism in PeerIS". In *Proceedings of the 3rd International Conference on Web Information Systems Engineering, WISE 2002*, Singapore, December 2002. + +24. Bernardo Magnini, Luciano Serafini, and Manuela Speranza. "Making Explicit the Hidden Semantics of Hierarchical Classification". In *Atti dell'Ottavo Congresso Nazionale dell'Associazione Italiana per l'Intelligenza Artificiale, LNCS. Springer Verlag*, 2003. + +25. P. Mitra, G. Wiederhold, and J. Jannink. "Semi-automatic Integration of Knowledge sources". In *Proc. of the 2nd Int. Conf. On Information FUSION*, 1999. + +26. Ruben Prieto-Diaz. "Implementing Faceted Classification for Software Reuse". *Communications of the ACM*, 34(5), 1991. + +27. S. R. Ranganathan. "The Colon Classification". In Susan Artandi, editor, *Vol IV of the Rutgers Series on Systems for the Intellectual Organization of Information*. New Brunswick, NJ: Graduate School of Library Science, Rutgers University, 1965. + +28. I. Ryutaro, T. Hideaki, and H. Shinichi. "Rule Induction for Concept Hierarchy Allignment". In *Proceedings of the 2nd Workshop on Ontology Learning at the 17th Int. Conf. on AI (IJCAI)*, 2001. + +29. Marios Sintichakis and Panos Constantopoulos. "A Method for Monolingual The-sauri Merging". In *Proceedings of 20th International Conference on Research and Development in Information Retrieval, ACM SIGIR'97*, Philadelphia, PA, USA, July 1997. + +30. Nicolas Spyratos, Yannis Tzitzikas, and Vassilis Christophides. "On Personaliz-ing the Catalogs of Web Portals". In *15th International FLAIRS Conference, FLAIRS'02*, Pensacola, Florida, May 2002. + +31. Yannis Tzitzikas, Anastasia Analyti, Nicolas Spyratos, and Panos Constantopou-los. "An Algebraic Approach for Specifying Compound Terms in Faceted Tax-onomies". In *13th European-Japanese Conference on Information Modelling and Knowledge Bases*, Kitakyushu, Japan, June 2003. +---PAGE_BREAK--- + +32. Yannis Tzitzikas, Carlo Meghini, and Nicolas Spyratos. "Taxonomy-based Conceptual Modeling for Peer-to-Peer Networks". In *Proceedings of 22th Int. Conf. on Conceptual Modeling, ER'2003*, Chicago, Illinois, October 2003. + +33. Yannis Tzitzikas, Nicolas Spyratos, and Panos Constantopoulos. "Mediators over Ontology-based Information Sources". In *Second International Conference on Web Information Systems Engineering, WISE 2001*, Kyoto, Japan, December 2001. + +34. Yannis T. Tzitzikas. "*Collaborative Ontology-based Information Indexing and Retrieval*". PhD thesis, Department of Computer Science - University of Crete, September 2002. \ No newline at end of file diff --git a/samples_new/texts_merged/1836869.md b/samples_new/texts_merged/1836869.md new file mode 100644 index 0000000000000000000000000000000000000000..c8e060cb0a3791cdf4ba3f1024fb10eabd4f4e28 --- /dev/null +++ b/samples_new/texts_merged/1836869.md @@ -0,0 +1,606 @@ + +---PAGE_BREAK--- + +# Exact and Efficient Inference for Collective Flow +Diffusion Model via Minimum Convex Cost Flow Algorithm + +Yasunori Akagi,¹ Takuya Nishimura,¹ Yusuke Tanaka,¹ Takeshi Kurashima,¹ Hiroyuki Toda¹ + +¹NTT Service Evolution Laboratories, NTT Corporation, +1-1 Hikari-no-oka, Yokosuka-Shi, Kanagawa, 239-0847, Japan +{yasunori.akagi.cu, takuya.nishimura.fk, yusuke.tanaka.rh, takeshi.kurashima.uf, hiroyuki.toda.xb}@hco.ntt.co.jp + +## Abstract + +Collective Flow Diffusion Model (CFDM) is a general framework to find the hidden movements underlying aggregated population data. The key procedure in CFDM analysis is MAP inference of hidden variables. Unfortunately, existing approaches fail to offer exact MAP inferences, only approximate versions, and take a lot of computation time when applied to large scale problems. In this paper, we propose an exact and efficient method for MAP inference in CFDM. Our key idea is formulating the MAP inference problem as a combinatorial optimization problem called Minimum Convex Cost Flow Problem (C-MCFP) with no approximation or continuous relaxation. On the basis of this formulation, we propose an efficient inference method that employs the C-MCFP algorithm as a subroutine. Our experiments on synthetic and real datasets show that the proposed method is effective both in single MAP inference and people flow estimation with EM algorithm. + +## 1. Introduction + +With recent advances in GPS, Wi-Fi, and various sensors, the importance of location information has grown and is being utilized in various fields. However, it is often difficult to obtain data about individual movements because of privacy concerns or the difficulty of tracking individuals over time. Instead, aggregated count data is relatively easy to obtain as it does not include individual movement information. For example, mobile spatial statistics (Terada, Nagata, and Kobayashi 2013), which is the hourly population data of fixed size square grids calculated from mobile network operational data, are available for purchase in Japan. As another example, traffic data is often obtained not in the form of tracking data of individual cars, but in the form of count data acquired by cameras or sensors installed on road networks (Yang and Zhou 1998; Morimura, Osogami, and Idé 2013). + +Although there are various uses for these aggregated count data, their applicability is limited because they do not contain explicit information about people movements. In order to utilize such data, Collective Graphical Model + +(CGM)(Sheldon and Dietterich 2011), which enables us to conduct learning and inference with aggregated count data, was proposed. In particular, Collective Flow Diffusion Model (CFDM) (Kumar, Sheldon, and Srivastava 2013), which is a special case of CGM, has been proposed to infer people flows between the areas by modeling individual movements via a Markov chain approach; it has been applied to the analysis of the hidden movements behind observed count data in a traffic network (Kumar, Sheldon, and Srivastava 2013), urban space (Iwata et al. 2017; Akagi et al. 2018; Iwata and Shimizu 2019), amusement park (Du, Kumar, and Varakantham 2014) and exhibition halls (Tanaka et al. 2018). + +An important function in CFDM analysis is MAP (maximum a posteriori) inference of the number of moving people from observed population data and parameters of the probabilistic model. This process is mainly used in two ways: (i) As a method for recovering people flow given observed population data and a human mobility model. Even if we can design a probabilistic model of human mobility using domain knowledge or estimate the model using another small set of movement (trajectory) data, we have to conduct MAP inference in order to know the number of people moving between areas. (ii) As a method for conducting E-step in the EM (Expectation Maximization) algorithm to estimate people flow and parameters of the probabilistic model simultaneously. Although E-step was implemented by the well-designed MCMC (Sheldon and Dietterich 2011) in the original CFDM proposal, its scalability was problematic. In order to address this issue, a method that uses MAP inference as an alternative to the regular expectation operation was widely used in subsequent research (Iwata et al. 2017; Akagi et al. 2018; Tanaka et al. 2018). + +Although methods for realizing MAP inference for CFDM are very important, previous proposals have several crucial drawbacks. (i) They do not provide exact MAP inference because they use continuous relaxation and Stirling's approximation. (ii) Each optimum solution element is non-integer because of continuous relaxation. As a result, the optimum solutions are dense with many non-zero elements and each solution occupies a lot of memory. (iii) When we deal with large scale problems, a lot of computation time is still + +Copyright © 2020, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved. +---PAGE_BREAK--- + +needed to solve the optimization problem. + +In this paper, we propose a novel method for MAP inference in CFDM that overcomes the shortcomings of previous approaches. Our key idea is formulating the MAP inference problem in CFDM as a combinatorial optimization problem called (non-linear) Minimum Cost Flow Problem (MCFP). Moreover, we prove that all cost functions of the MCFP are "discrete convex functions", discrete analogues of the continuous convex function. This fact indicates that the formulated MCFP is a Minimum Convex Cost Flow Problem (C-MCFP) variant, which is an efficiently solvable subclass of MCFP. On the basis of this formulation, we propose an efficient inference method that employs the C-MCFP algorithm as a subroutine. The proposal has the following advantages: + +1. It offers exact MAP inference as no approximation is used. + +2. Optimum solution elements are integers, which is consistent with the number of moving people. Moreover, the solution tends to be sparse and we can hold it with less memory by use of the sparse matrix data structure. + +3. By utilizing efficient algorithms for C-MCFP, fast estimation is possible. In addition, it is easy to use in practice because it is not necessary to set hyperparameters, and the calculation time is relatively insensitive to the probabilistic models and the optimum solutions. + +Our results are significant in that they bridge two distinct research topics, graph algorithms and CFDM inference. This work is the first to regard CFDM inference as a discrete optimization problem on a graph (all efficient existing methods transform the inference problem into a continuous optimization problem via approximation). Our non-trivial finding of the discrete convexity of the cost function is an important key in revealing the hidden relationship between graph algorithms and inference in collective flow diffusion. + +Experiments on synthetic and real datasets show that the proposed method is effective for MAP inference in terms of both running time and solution quality such as sparsity. Of particular note, running time is accelerated 10 times or more and sparsity of optimum solution is dramatically increased in most cases. Moreover, we use the proposal to conduct people flow estimation via the EM algorithm and confirm its effectiveness. + +## 2. Problem Setting + +For positive integer $k$, we denote $[k] := \{1, \dots, k\}$. Suppose that the target space is discretized into $n$ distinct areas. The people who were in area $i \in [n]$ at timestep $t$ will stay in $i$ or move to another area to be observed in area $j \in \Gamma_i$ at timestep $t+1$, where $\Gamma_i \subseteq [n]$ is the set of areas that can be moved to from area $i$. This process will be repeated for each $t \in [T-1]$, where $T$ is the total number of timesteps. + +The problem we address in this paper is formulated as follows. Suppose we are given the population of area $i$ at timestep $t$, $\dot{N}_{t,i}$ ($i \in [n], t \in [T]$). Our goal is to estimate the number of people who leave area $i$ at time $t$ and whose next area is $j$ at time $t+1$, $M_{tij}$ ($i \in [n], j \in [n], t \in [T-1]$). Figure 1 shows an example of this problem setting. + +Figure 1: An example of the problem setting where the number of areas $n = 3$ and the number of total timesteps $T = 3$. + +# 3. Background + +## 3.1 Collective Flow Diffusion Model (CFDM) + +Let $\theta_i = \{\theta_{ij}\}_{j \in \Gamma_i} (\sum_{j \in \Gamma_i} \theta_{ij} = 1)$ be the transition probability from area $i$ to other areas (including $i$ itself). We here assume $\theta_i$ does not depend on timestep $t$. Given population $N_{t,i}$ and transition probability $\theta_i$, the transition population $M_{ti} = \{M_{tij}\}_{j \in \Gamma_i} (t \in [T-1], i \in [n])$ is assumed to be decided by the following multinomial distribution: $M_{ti} \sim \text{Multi}(N_{t,i}, \theta_i)$. Given $\mathcal{N} = \{N_{t,i} | t \in [T], i \in [n]\}$ and $\mathcal{M} = \{M_{ti} | t \in [T-1], i \in [n]\}$, the likelihood function of $\theta = \{\theta_i | i \in [n]\}$ is given by + +$$ P(\mathcal{M} | N, \theta) \propto \prod_{t=1}^{T-1} \prod_{i \in [n]} \left( \frac{N_{t,i}!}{\prod_{j \in \Gamma_i} M_{tij}!} \prod_{j \in \Gamma_i} \theta_{ij}^{M_{tij}} \right). \quad (1) $$ + +In addition, the population in each area, $N_{t,i}$, and the transition population between areas, $M_{ti}$, satisfy the following two relationships $N_{t,i} = \sum_{j \in \Gamma_i} M_{tij}$, $N_{t+1,i} = \sum_{j \in \Gamma_i} M_{tji}$ ($t \in [T-1], i \in [n]$), which represent the law of conservation in the number of people. + +Our purpose is to estimate the true number of people moving between areas. We consider two problems: (i) estimation of $\mathcal{M}$ given $\mathcal{N}$ and $\theta$, and (ii) estimation of $\mathcal{M}$ and $\theta$ given only $\mathcal{N}$. The first problem, includes, for example, the case where it is possible to design a human movement model (i.e. $\theta$) in the target space based on domain knowledge, geographical information, or other data related to people movement such as a small amount of trajectory data. The second problem corresponds to the case that there is no clue as to $\theta$ and it is necessary to estimate everything from $\mathcal{N}$. + +In any case, an important subroutine in achieving our pur- +---PAGE_BREAK--- + +pose is solving the following MAP inference problem: + +$$ +\begin{align} +\max_{M} \quad & P(M | N, \theta) \nonumber \\ +\text{s.t.} \quad & N_{t,i} = \sum_{j \in \Gamma_i} M_{tij} \quad (t \in [T-1], i \in [n]), \tag{2} \\ +& N_{t+1,i} = \sum_{j \in \Gamma_i} M_{tji} \quad (t \in [T-1], i \in [n]), \nonumber \\ +& M_{tij} \in \mathbb{Z}_{\ge 0} \quad (t \in [T-1], i \in [n], j \in \Gamma_i). \nonumber +\end{align} +$$ + +In the first problem, the optimum solution of (2) is the de- +sired answer. A common approach to solving the second +problem is to estimate, alternatively, *M* and *θ* by the EM +algorithm considering *M* as a hidden variable and *θ* as pa- +rameter of a probabilistic model. Since high computational +cost is incurred in calculating the expected value of hidden +variable *M* by MCMC, a method to replace the expected +value with the solution of the MAP inference problem has +already been proposed (Sheldon et al. 2013) and is being +widely used to conduct E-step. This approach solves the opt- +imization problem (2) iteratively. + +**3.2 Minimum Cost Flow Problems** + +(Non-linear) Minimum Cost Flow Problem (MCFP) is a +combinatorial optimization problem defined as follows. Let +$G = (V, E)$ be a directed graph, where each node $i \in V$ has +supply value $b_i \in \mathbb{Z}$, and each edge $(i, j) \in E$ has capac- +ity $l_{ij} \in \mathbb{Z}_{\ge 0}$ and cost function $c_{ij}: \mathbb{Z}_{\ge 0} \rightarrow \mathbb{R}$. If $b_i > 0$ +we call node $i$ to be source, and if $b_i < 0$ we call sink. +MCFP is the problem of finding a minimum cost flow on $G$ +that satisfies the supply constraints at all nodes and capacity +constraints at all edges. MCFP can be described as follows: + +$$ +\begin{align} +\min_{x \in \mathbb{Z}^{|E|}} \quad & \sum_{(i,j) \in E} c_{ij}(x_{ij}) \notag \\ +\text{s.t.} \quad & \sum_{j:(i,j) \in E} x_{ij} - \sum_{j:(j,i) \in E} x_{ji} = b_i \quad (i \in V), \tag{3} \\ +& 0 \le x_{ij} \le l_{ij} \quad ((i,j) \in E). \notag +\end{align} +$$ + +Note that this paper considers the problems that restrict fea- +sible **x** to integer values i.e. **x** ∈ Z^{|E|}. Generally, MCFP (3) +is NP-hard and difficult to solve efficiently. However, spe- +cial cost functions make it possible to derive efficient opti- +mization algorithms. For example, MCFP with linear cost +functions, which is the most famous special case of MCFP, +is polynomial-time solvable and many efficient algorithms +have been developed (Kiraly and Kovacs 2012). Moreover, +Minimum Convex Cost Flow Problem (C-MCFP), in which +every cost function $c_{ij}$ satisfies “discrete convexity” $c_{ij}(x + 1) + c_{ij}(x - 1) \ge 2 \cdot c_{ij}(x)$ ($x = 1, 2, ...$), is known to be +an efficiently solvable subclass of MCFP (Ahuja, Magnanti, +and Orlin 1993). + +4. Proposed Method + +4.1 Formulation as C-MCFP + +We show that the optimization problem (2) can be +formulated as C-MCFP. After taking the logarithm of + +Figure 2: An example of MCFP formulation when the num- +ber of areas *n* = 3. *o* is the source and *d* is the sink of the +flow network. The capacity of edge (*o*, *u*ᵢ) equals to *N*ᵡᵢ and +the capacity of edge (*v*ᵢ, *d*) equals to *N*ᵡ₁ᵢ. + +the objective function (1) and omitting terms that +do not depend on M, the objective function equals +∑t∈[T-1]i∈[n]j∈Γi (-log Mtij! + Mtij log θij). Since +we can split (2) into independently solvable T - 1 subprob- +lems by t, all we have to do is solve the minimization prob- +lems described as follows for each t ∈ [T - 1]: + +$$ +\begin{equation} +\begin{array}{ll} +\min_{M_t} & \displaystyle \sum_{i \in [n]} \sum_{j \in \Gamma_i} (\log M_{tij}! - M_{tij} \log \theta_{ij}) \\ +\text{s.t.} & N_{t,i} = \sum_{j \in \Gamma_i} M_{tij} \quad (i \in [n]), \\ +& N_{t+1,i} = \sum_{j \in \Gamma_i} M_{tji} \quad (i \in [n]), \\ +& M_{tij} \in \mathbb{Z}_{\ge 0} \quad (i \in [n], j \in \Gamma_i). +\end{array} +\tag{4} +\end{equation} +$$ + +In order to formulate the problem (4) as MCFP, we con- +struct an instance by the procedure described below (an ex- +ample is shown in Figure 2): + +1. Let $V = \{u_i\}_{i \in [n]} \cup \{v_i\}_{i \in [n]} \cup \{o, d\}$. $u_i$ and $v_i$ correspond to area $i$ at timestep $t$ and timestep $t+1$, respectively. $o$ is the source node and $d$ is the sink node of the flow network. + +2. For $i \in [n]$, add edge $(o, u_i)$ with cost function $0$ (constant function) and capacity $N_{t,i}$. + +3. For $i \in [n]$, add edge $(v_i, d)$ with cost function $0$ and capacity $N_{t+1,i}$. + +4. For $i \in [n]$ and $j \in \Gamma_i$, add edge $(u_i, v_j)$ with cost function $f_{ij}(x) := \log x! - x \cdot \log \theta_{ij}$ and capacity $+\infty$. + +5. Set $b_o = \sum_{i \in [n]} N_{t,i}$, $b_d = -b_o = -\sum_{i \in [n]} N_{t,i}$ and $b_{u_i} = b_{v_i} = 0$ ($i \in [n]$). + +For the MCFP instance constructed above, the following +holds. + +**Proposition 1.** For $M_i^*$ defined by $M_{tij}^* = x_{ui,v_j}^* (i \in [n], j \in \Gamma_i)$ where $\boldsymbol{x}^*$ is the optimum solution of the MCFP +---PAGE_BREAK--- + +instance constructed above, $M_t^*$ is an optimum solution of +the optimization problem (4). + +**Proof of Proposition 1.** Let $\boldsymbol{x}$ to be a feasible solution of the constructed MCFP. From the non-negativity of $x_{ij}$ and flow conservation constraints at node $o$ and $d$, $x_{ou_i} = N_{ti}$ and $x_{v_{id}} = N_{t+1,i}$ ($\forall i \in [n]$) must be satisfied. From these facts and flow conservation constraints at node $u_i$ and $v_i$, $N_{t,i} = \sum_{j \in \Gamma_i} x_{u_i v_j}$ and $N_{t+1,i} = \sum_{j \in \Gamma_i} x_{v_j u_i}$ ($\forall i \in [n]$) hold. Since we restrict $\boldsymbol{x}$ to integer values and total MCFP cost is $\sum_{i \in [n]} \sum_{j \in \Gamma_i} (\log x_{u_i v_j}! - x_{u_i v_j} \log \theta_{ij})$, the constructed MCFP is equivalent to the optimization problem (4), so the proposition holds. $\square$ + +**Proposition 2.** For the MCFP instance constructed above, all cost functions satisfy discrete convexity, i.e. $c_{ij}(x+1)+c_{ij}(x-1) \ge 2 \cdot c_{ij}(x)$ ($x=1,2,\dots$). + +*Proof of Proposition 2.* It is clear that a constant function satisfies discrete convexity, so it is sufficient to check for $f_{ij}$. We have $f_{ij}(x+1) + f_{ij}(x-1) - 2 \cdot f_{ij}(x) = \log(x+1)! + \log(x-1)! - 2 \cdot \log x! = \log(x+1) - \log x \ge 0$. This confirms the discrete convexity of $f_{ij}$. $\square$ + +Proposition 1 says that by solving MCFP we can get an +optimum solution of problem (4). Proposition 2 shows that +the constructed MCFP instance belongs to C-MCFP. Since +C-MCFP is an efficiently solvable subclass of MCFP as de- +scribed in 3.2, we can design efficient algorithms to tackle +the original MAP inference problem (4). + +Note that problem (4) may not have any feasible solution if $\sum_{i \in [n]} N_{t,i} \neq \sum_{i \in [n]} N_{t+1,i}$ holds or $|\Gamma_i|$ ($i \in [n]$) is small. Such cases occur frequently when dealing with noisy real data. Even in such cases, our method with slight modification can output reasonable solutions. We describe this modification in Section 4.3. + +## 4.2 Algorithm + +We describe here an algorithm that can find exact optimum +solutions of C-MCFP, called Capacity Scaling algorithm +(CS) (Minoux 1986). CS is an algorithm that successively +augments flow along the shortest path from source to sink +in a residual graph, which is an auxiliary graph calculated +from the current flow. By maintaining a scalar value, called +potential, on each node and modifying edge costs to ensure +that they are non-negative, we can utilize Dijkstra's algo- +rithm (Dijkstra 1959), which is a fast algorithm for shortest +path search in graphs with non-negative edge costs. In or- +der to reduce the number of shortest path searches, CS is +designed to carry sufficiently large number of flows in each +path augmentation. The algorithm utilized in our work is the +one described in Chapter 14.5 of (Ahuja, Magnanti, and Or- +lin 1993). Although this algorithm is based on the idea of +(Minoux 1986), some changes have been made, so its com- +putation complexity differs from that of (Minoux 1986). + +Given a C-MCFP instance with graph $G = (V, E)$, The- +orem 14.1 of (Ahuja, Magnanti, and Orlin 1993) claims that +CS runs in $O(|E| \cdot \log U \cdot S)$, where $U := \max_{i \in V} |b_i|$ is the +maximum absolute value of flow demand and $S$ is the time +complexity for solving a shortest path problem in graph $G$ + +**Algorithm 1** Algorithm for solving MAP inference problem (2) via capacity scaling algorithm + +**Require:** Population of each area and time *N*, transition matrix $\theta$ +**for all** *t* ∈ [*T* − 1] **do** + Construct C-MCFP instance based on *N**t*, *N**t*+1, θ by the procedure described in Section 4.1 + Get optimum solution *x** of constructed C-MCFP by capacity scaling algorithm + **for all** *i* ∈ [*n*] **do** + **for all** *j* ∈ Γ*i* **do** + *M**tij** ← *x**u*_i*v*_j* + **end for** + **end for** + **end for** + **return** *M** + +with non-negative edge costs. According to Dijkstra's algo- +rithm with binary heap, *S* is bounded by *O*(*|E|* · log *|V|*), so +the total time complexity is *O*(*|E|*2 · log *|V|* · log *U*). When +this algorithm is used to solve problem (4), its time complex- +ity is *O*(*m*2 · log *n* · log *F*), where *n* is the number of areas, +*m* is the number of edges of the adjacency graph between +the areas determined by Γ*i* (*i* ∈ [*n*]) and *F* := ∑*i*∈[*n*] *N**t*,i* +is the total population of targeted areas. Note that, the to- +tal complexity does not depend on the maximum value of +edge capacity, and it is guaranteed that the algorithm runs +efficiently even if the graph contains an edge with infinite +capacity. + +CS is a suitable algorithm for solving our problem in +the following sense: When dealing with real-world datasets, +sometimes *F* is extremely large (for example, in mobile spa- +tial statistics in the Greater Tokyo Area, which consists of +population distribution data by time and area, *F* is about +106–107). Therefore, the algorithm used to solve the formu- +lated C-MCFP should have sub-linear time complexity with +respect to *F*. Accordingly, CS is appropriate since its time +complexity is proportional to log *F*. + +The overall algorithm for solving the original MAP infer- +ence problem (2) is summarized in Algorithm 1. + +## 4.3 Handling with Infeasible Cases + +As mentioned in Section 4.1, when dealing with real-world +data, there may not be feasible solution to problem (4). To +address this problem and output a reasonable solution, we +add a few more steps in the instance construction procedure +described in Section 4.1. + +First, we add edge (o,d) with linear cost function Cx, +where C is a sufficiently large constant, and capacity +∞. +Next, we set b_o = S, b_d = -S, b_{u_i} = b_{v_i} = 0 (i ∈ [n]), +where S := max(∑_{i∈[n]} N_{t,i}, ∑_{i∈[n]} N_{t+1,i}). This newly +formulated MCFP always has a feasible solution and still +belongs to C-MCFP, so we can solve this by CS. + +In this case, $M_t^*$ calculated from the optimum solu- +tion of the MCFP does not necessarily satisfy the pop- +ulation conservation law $N_{t,i} = \sum_{j \in \Gamma_i} M_{tij}^*, N_{t+1,i} = \sum_{j \in \Gamma_i} M_{tji}^*(i \in [n])$, which are the constraints of the origi- +---PAGE_BREAK--- + +nal problem (4). We can interpret these discrepancies as fol- +lows: $N_{t,i} - \sum_{j \in \Gamma_i} M_{tij}^*$ is outflow from area $i$ to some- +where outside the targeted areas, and $N_{t+1,i} - \sum_{j \in \Gamma_i} M_{tji}^*$ is inflow from somewhere outside the targeted areas to area +$i$ between timesteps $t$ and $t + 1$. + +5. Experimental results + +Here, we use numerical experiments to demonstrate the practical utility of the proposed method. All experiments are conducted on a 64-bit CentOS 7.3 machine with Xeon(R) Gold 6126 CPU(2.60GHz)x2 and 512 GB memory. The capacity scaling algorithm is implemented in C++ (g++ 4.8.5 with the -O3 option); other codes were written in python 2.7.12 with SciPy (Jones et al. 2001). + +5.1 Compared methods + +We compare the proposed method with commonly used ones used in CFDM inference (Iwata et al. 2017; Akagi et al. 2018; Tanaka et al. 2018). In this method, we solve an optimization problem that has the following objective function $f(M_t) + \frac{\lambda}{2} \cdot g(M_t)$ under constraints $M_{tij} \in \mathbb{R}_{\ge 0}$, where + +$$ +f(\mathbf{M}_t) = \sum_{i \in [n], j \in \Gamma_i} (M_{tij} \log M_{tij} - M_{tij}(1 + \log \theta_{ij})), +$$ + +$$ +g(\mathbf{M}_t) = \sum_{i \in [n]} \left[ (N_{t,i} - \sum_{j \in \Gamma_i} M_{tij})^2 + (N_{t+1,i} - \sum_{j \in \Gamma_i} M_{tji})^2 \right] +$$ + +and $\lambda$ is a hyperparameter. This problem is derived by applying Stirling’s approximation and continuous relaxation to the objective function of (4), and adding constraints of people conservation law to objective function as penalty terms. $\lambda$ controls the strength of penalty terms. This optimization problem has a convex objective function and bound constraints, so we can get the global optimum by L-BFGS-B method (Byrd et al. 1995), which is implemented in scipy\.optimize. Our experiments explored three methods with $\lambda$ values of \{1, 10, 100\}. + +5.2 MAP inference: Synthetic data + +First, we compare running times and characteristics of the +optimum solutions of MAP inference problem (2) obtained +by each method using synthetic data. We randomly generate +synthetic instances of the MAP inference problem (2). We +consider an L × L grid space, where each cell corresponds +to one area. Γᵢ is set to be [n] for ∀i ∈ [n] (i.e. we consider +the “fully connected” situation). We set T = 2 and Nₜ,ᵢ ~ +Multi(F, p_t) (t = 1, 2), where F is the total population in +the grid space and p₁, p₂ ~ Dirichlet(1). θ is generated in +two ways as follows. + +1. $\theta_i \sim \text{Dirichlet}(1)$ for each $i \in [n]$ independently. We call this generation procedure "Dirichlet". + +2. $\theta_{ij} = \exp(-\text{dist}(i, j)) / \sum_{j \in \Gamma_i} \exp(-\text{dist}(i, j))$, where $\text{dist}(i, j)$ is the Euclidean distance between cell $i$ and $j$. We call this procedure "Exponential decay". This procedure reflects the characteristics typical of movements that people are likely to take over short distances rather than long ones. + +To clarify the dependence of computation time on the num- +ber of areas, L², and total population, F, we solve the MAP +inference problem for L = 10, 20, 30 fixing F to 10⁴, and +for F = 10⁴, 10⁵, 10⁶ fixing L to 20. We generate 10 ran- +dom instances for each evaluation. + +The average running times (seconds) for 10 instances by +each algorithm are summarized in Table 1. Each experiment +is executed with the time limit of 1000 seconds. If run- +ning time exceeds the time limit, running time of the trial +is recorded as 1000 seconds. In such a case, the averaged +value is underestimated. To clarify this, we tag average run- +ning time in the table with "> " if the time limit is exceeded +in even one instance. In the parentheses, standard deviation +of running times are shown if all 10 trials are completed in +the time limit. L-BFGS-B methods have longer running time +than the proposed method and varies with parameter settings +and instances. This unstable behavior will be problematic in +practical usage. The proposed method outperforms all other +methods in all settings. In particular, it offers the advantage +that it can solve problems with small computational time and +work stably even when L and F are large. + +In order to compare the characteristics of optimum solu- +tions output by the proposed method and L-BFGS-B ($\lambda$ = +1), we solve two examples with $L = 5$, $F = 10^2$, “Expo- +nential decay” and $L = 5$, $F = 10^3$, “Exponential decay” +instances and checked the solutions in detail. The results are +shown in Figure 3. In this figure, the $L^2 \times L^2$ optimum so- +lution matrix obtained by each method are presented as a + heatmap. To investigate the sparsity structure of the solution, +the maximum value of heatmap is set to 1 and minimum +value to 0. While the solution obtained by L-BFGS-B is +blurred and contains a lot of small but non-zero elements (el- +ements with light colors) because of continuous relaxation, +proposed method is able to produce sparse solutions. We cal- +culated the sparseness of each solution by (# of near-zero +(< 10⁻⁴) elements)/(# of whole elements); the yielded val- +ues are 90%, 67% with proposed method and 0%, 0 % with +L-BFGS-B. This implies that the memory needed to hold the +solution can be reduced significantly by using sparse matrix +structure. Although we can get sparse solutions by rounding +the solutions of existing methods, this operation violates the +constraint of population conservation and degrades solution +quality. + +**5.3 MAP inference: Real data** + +We evaluate running times and characteristics of the opti- +mum solutions using real-world spatio-temporal population +data. We use mobile spatial statistics (Terada, Nagata, and +Kobayashi 2013), which is the hourly population data for +fixed size square grids calculated from mobile network op- +erational data. We use Tokyo and Kanagawa prefecture data, +which is the main part of the capital region of Japan, on +April 1st, 2015 (weekday) and April 5th, 2014 (holiday). +*N**t* is the population of each area at the clock time of *t*- +hour for *t* ∈ {0, 1, ..., 22} on each day. In order to com- +pare the performances of the methods at different cell width, +we aggregate population data of each cell and made datasets +with cell sizes of 5km × 5km, 2km × 2km, and 1km × +1km. The resulting datasets contain 200, 1017, 3711 cells, +---PAGE_BREAK--- + +Table 1: The average running time (seconds) of 10 synthetic instances when *F* is fixed to 10⁴ (above) and when *L* is fixed to 20 (below). The best running time is highlighted for each problem size. Values with "> " are underestimates due to the time limit. Standard deviation is shown in parentheses if all 10 trials are completed in the time limit. + +
type of θ
L
DirichletExponential decay
102030102030
Proposed0.05 (0.00)0.61 (0.01)4.54 (0.16)0.03 (0.00)0.46 (0.03)6.29 (2.60)
L-BFGS-B (λ = 1)6.51 (0.91)132.86 (15.46)357.32 (39.76)13.51 (2.00)273.25 (18.86)>911.22 (-)
L-BFGS-B (λ = 10)7.40 (1.27)143.14 (13.25)387.09 (56.31)13.87 (1.69)281.40 (19.18)>936.14 (-)
L-BFGS-B (λ = 100)9.65 (2.01)169.83 (17.19)440.77 (69.87)15.79 (1.36)297.40 (20.42)>975.64 (-)
+ +
type of θ
F
DirichletExponential decay
104105106104105106
Proposed0.71 (0.09)4.19 (0.85)14.25 (1.56)0.68 (0.22)2.44 (0.58)4.93 (0.94)
L-BFGS-B (λ = 1)140.16 (15.34)434.25 (114.80)>804.52 (-)323.87 (30.86)>1000.00 (-)>1000.00 (-)
L-BFGS-B (λ = 10)149.29 (14.35)503.72 (117.16)>880.68 (-)340.96 (41.54)>1000.00 (-)>1000.00 (-)
L-BFGS-B (λ = 100)175.65 (18.26)793.54 (146.68)>899.83 (-)356.24 (48.56)>1000.00 (-)>887.22 (-)
+ +Table 2: The average running time (seconds) for real data. The best running time is highlighted for each cell width. Values with "> " are underestimates due to the time limit. Standard deviation is shown in parentheses if all 10 trials are completed in the time limit. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
dataset
cell width
April 1st, 2015April 5th, 2015
5km2km1km5km2km1km
Proposed0.84 (0.16)9.16 (1.49)59.40 (22.38)0.41 (0.01)6.52 (1.15)54.00 (10.70)
L-BFGS-B (λ = 1)196.46 (139.61)>1000.00 (-)>1000.00 (-)68.76 (25.43)>940.84 (-)>1000.00 (-)
L-BFGS-B (λ = 10)14.96 (34.63)>1000.00 (-)>1000.00 (-)10.90 (19.85)>1000.00 (-)>1000.00 (-)
L-BFGS-B (λ = 100)2.04 (0.73)>811.94 (-)>1000.00 (-)0.99 (0.89)>697.78 (-)>1000.00 (-)
+ +respectively. + +We construct $\theta$ by the same procedure as “Exponen- +tial decay” in the synthetic data experiment and set +$\Gamma_i = \{j \mid j \in [n], \text{dist}(i, j) \le 5\}$, where $\text{dist}(i, j)$ is Eu- +clidean distance between cell $i$ and cell $j$ in the grid space. + +The results are summarized in Table 2. Time limit is set to be 1000 seconds, and average running time standard deviation are calculated in the same way as in the experiment on synthetic data. + +As shown, proposed method is able to solve +all instances in about 60 seconds. + +On the other hand, com- +pared methods fail to process 2km × 2km and 1km × 1km +datasets regardless of the value of λ. + +This shows the effec- +tiveness of the proposed method. + +**5.4 EM algorithm: Synthetic data** + +As mentioned, MAP inference is used for conducting E-step of EM algorithm to estimate the number of moving people and probabilistic model parameters. + +Here, we compare EM algorithm performance achieved with the proposed method and with the existing method using simulation data. + +We consider people movement in an *L* × *L* sized grid space (*L* = 10, 12). We construct transition matrix $\theta^{\text{true}}$ by $\theta_{ij} \propto s_i \cdot \exp(-\beta \cdot \text{dist}(i, j))$, where $s_i > 0$ ($i \in [n]$) is a parameter that represents how likely people are to gather at area *j*, and $\beta$ is a parameter that controls the decay of transition probability with increasing distance between *i* and *j*. This transition matrix is a variant of the one used in (Akagi et al. 2018). + +We set $\beta^{\text{true}} = 0.5$ and $s_i^{\text{true}}$ as follows: first, we randomly selected 3 areas from $[n]$ and set $s_i^{\text{true}} = 10$. For other areas, we set $s_i^{\text{true}} = 1$. We generate the population + +of each area, *N*, and number of moving people between areas, *M*, by simulating people movement following the procedure written in Section 3.1 until timestep *T* = 10 using transition matrix $\theta^{\text{true}}$. We set initial population $N_{1,i}$ to $10^4$ ($i \in [n]$). + +Our task is to estimate the number of moving peo- +ple, M, from observed population N by the EM algo- +rithm. For details of the EM algorithm, please see (Ak- +agi et al. 2018). In the algorithms, Γᵢ is set to be [n] for +∀i ∈ [n]. We evaluate algorithm performance by Normal- +ized Absolute Error (NAE) of M, which is calculated by +∑t,i,j |Mtijtrue - Mtijestimated| / ∑t,i,j Mtijtrue. EM algorithm +is iterated 200 times for each method. + +Figure 4 plots NAE versus the elapsed time for the EM algorithm with proposed method and previous method. + +It can be seen that the proposed method yields better NAE values more quickly than the previous method, especially at large *L*. For example, in the case of *L* = 12, it took the L-BFGS-B method about 9657 seconds to reach 1.15 for NAE (the dashed line in Figure 4). The proposed method, on the other hand, took only 24 seconds or so, which is about 400 times faster. + +**6. Related Work** + +Several methods have been proposed to realize MAP in- +ference efficiently in CGM, which is a general framework +including CFDM, (Sheldon et al. 2013; Sun, Sheldon, and +Kumar 2015; Nguyen et al. 2016; Vilnis et al. 2015). Note +that existing methods provide non-exact MAP inference and +output non-integer solutions. + +In (Akagi et al. 2018), an ef- +---PAGE_BREAK--- + +Figure 3: Comparison of optimum solution matrix in an $L \times L$ grid space obtained by proposed method and L-BFGS-B ($\lambda = 1$) with $\theta$ type of “Exponential decay”. The left is when $(L, F) = (5, 10^2)$ and the right is when $(L, F) = (5, 10^3)$, where F is the total population of the targeted areas. Sparsity pattern of obtained $L^2 \times L^2$ solution matrix is presented as a heatmap. $(i, j)$-element of solution matrix represents the number of moving people from area *i* to area *j*. In order to investigate sparsity structure of solutions, maximum value of color map is set to be 1 and minimum value is 0. The output of L-BFGS-B method is blurred and contains a lot of small but non-zero elements. In contrast, solution by proposed method is noticeably sparse. + +ficient optimization method for CFDM is proposed, but it +can be used only under a specially factorized probabilistic +model, which is designed to model human movements in ur- +ban spaces. In contrast, the proposal of this paper is widely +available and poses no excessive constraints on the underly- +ing transition model structure. + +There is a lot of work on people flow estimation via CFDM. For example, (Iwata et al. 2017; Akagi et al. 2018; Iwata and Shimizu 2019) deal with the estimation of people flows in urban spaces by utilizing variational inference, a factorized probabilistic model, or neural networks. In (Kumar, Sheldon, and Srivastava 2013) and (Tanaka et al. 2018), the inflow and outflow of each area at each timestep are assumed to be available, while (Tanaka et al. 2018) considers a time delay between before and after movement. Thus, there are many variations in terms of the observation model and the probabilistic model underlying movement. The method proposed herein can be used as a subroutine in any of these approaches by appropriately constructing instances of MCFP to suit the problem. + +Attempts to estimate human movement from aggregated + +Figure 4: NAE (Normalized Absolute Error) as a function of elapsed time for EM algorithm with each MAP inference method. + +count data have received a lot of attention. As a particularly +relevant study, Xue et al. proposed an algorithm for recov- +ering personal trajectories from aggregated count data for +the purpose of evaluating privacy risk for publishing such +data (Xu et al. 2017). Sheldon et al. proposed a method +to reconstruct sample paths of a Markov chain from par- +tial observations for the purpose of analyzing bird migra- +tion patterns (Sheldon, Elmohamed, and Kozen 2008). Al- +though those methods are similar to our method in the sense +of solving combinatorial assignment problems to recover +movement from aggregated data, there are two distinct dif- +ferences: (i) Those methods focus on recovering each indi- +vidual trajectory, not the collective movement of targets. (ii) +Those method do not have a mechanism to estimate the pa- +rameters of movement models. + +Many studies on another direction, predicting population +or people flow in cities, have been published (Konishi et al. +2016; Zhang et al. 2019; Jiang et al. 2019). Their approach is +to forecast future city dynamics at each area from past data +or other features in a supervised way, using classical regres- +sion models or deep learning architecture, etc. Our purpose +is estimating people flows between areas from only popu- +lation snapshots at incremental timesteps in a unsupervised +way, which is a totally different task from future prediction. + +**7. Conclusion** + +In this paper, we proposed a novel method for MAP infer- +ence in collective flow diffusion model. First, we showed +that the MAP inference problem can be formulated as a min- +imum convex cost flow problem. Based on this formulation, +we proposed an efficient algorithm for MAP inference prob- +---PAGE_BREAK--- + +lem using capacity scaling algorithm. Extensive evaluations on both real and synthetic datasets showed that our algorithm outperforms previous alternatives in terms of running time and optimum solution quality. + +## References + +Ahuja, R. K.; Magnanti, T. L.; and Orlin, J. B. 1993. *Network Flows: Theory, Algorithms, and Applications*. Prentice-Hall, Inc. + +Akagi, Y.; Nishimura, T.; Kurashima, T.; and Toda, H. 2018. A fast and accurate method for estimating people flow from spatiotemporal population data. In *IJCAI*, 3293–3300. + +Byrd, R. H.; Lu, P.; Nocedal, J.; and Zhu, C. 1995. A limited memory algorithm for bound constrained optimization. *SIAM Journal on Scientific Computing* 16(5):1190–1208. + +Dijkstra, E. W. 1959. A note on two problems in connexion with graphs. *Numerische mathematik* 1(1):269–271. + +Du, J.; Kumar, A.; and Varakantham, P. 2014. On understanding diffusion dynamics of patrons at a theme park. In *AAMAS*, 1501–1502. + +Iwata, T., and Shimizu, H. 2019. Neural collective graphical models for estimating spatio-temporal population flow from aggregated data. In *AAAI*, 3935–3942. + +Iwata, T.; Shimizu, H.; Naya, F.; and Ueda, N. 2017. Estimating people flow from spatiotemporal population data via collective graphical mixture models. *ACM Transactions on Spatial Algorithms and Systems* 3(1):1–18. + +Jiang, R.; Song, X.; Huang, D.; Song, X.; Xia, T.; Cai, Z.; Wang, Z.; Kim, K.-S.; and Shibasaki, R. 2019. Deepurban-event: A system for predicting citywide crowd dynamics at big events. In *KDD*, 2114–2122. ACM. + +Jones, E.; Oliphant, T.; Peterson, P.; et al. 2001–. SciPy: Open source scientific tools for Python. + +Kiraly, Z., and Kovacs, P. 2012. Efficient implementations of minimum-cost flow algorithms. *Acta Univ. Sapientiae* 4(1):67–118. + +Konishi, T.; Maruyama, M.; Tsubouchi, K.; and Shimosaka, M. 2016. Cityprophet: City-scale irregularity prediction using transit app logs. In *Ubicomp*, 752–757. ACM. + +Kumar, A.; Sheldon, D.; and Srivastava, B. 2013. Collective diffusion over networks: Models and inference. In *UAI*. + +Minoux, M. 1986. Solving integer minimum cost flows with separable convex cost objective polynomially. In *Netflow at Pisa*. Springer. 237–239. + +Morimura, T.; Osogami, T.; and Idé, T. 2013. Solving inverse problem of Markov chain with partial observations. In *NIPS*, 1655–1663. + +Nguyen, T.; Kumar, A.; Lau, H. C.; and Sheldon, D. 2016. Approximate inference using DC programming for collective graphical models. In *AISTATS*, 685–693. + +Sheldon, D. R., and Dietterich, T. G. 2011. Collective graphical models. In *NIPS*, 1161–1169. + +Sheldon, D.; Sun, T.; Kumar, A.; and Dietterich, T. 2013. Approximate inference in collective graphical models. In *ICML*, 1004–1012. + +Sheldon, D.; Elmohamed, M.; and Kozen, D. 2008. Collective inference on markov models for modeling bird migration. In *NIPS*, 1321–1328. + +Sun, T.; Sheldon, D.; and Kumar, A. 2015. Message passing for collective graphical models. In *ICML*, 853–861. + +Tanaka, Y.; Iwata, T.; Kurashima, T.; Toda, H.; and Ueda, N. 2018. Estimating latent people flow without tracking individuals. In *IJCAI*, 3556–3563. + +Terada, M.; Nagata, T.; and Kobayashi, M. 2013. Population estimation technology for mobile spatial statistics. *NTT DOCOMO Technical Journal* 14(3):10–15. + +Vilnis, L.; Belanger, D.; Sheldon, D.; and McCallum, A. 2015. Bethe projections for non-local inference. In *UAI*, 892–901. + +Xu, F.; Tu, Z.; Li, Y.; Zhang, P.; Fu, X.; and Jin, D. 2017. Trajectory recovery from ash: User privacy is not preserved in aggregated mobility data. In *WWW*, 1241–1250. + +Yang, H., and Zhou, J. 1998. Optimal traffic counting locations for origin-destination matrix estimation. *Transportation Research Part B: Methodological* 32(2):109–126. + +Zhang, J.; Zheng, Y.; Sun, J.; and Qi, D. 2019. Flow prediction in spatio-temporal networks based on multitask deep learning. *IEEE Transactions on Knowledge and Data Engineering*. \ No newline at end of file diff --git a/samples_new/texts_merged/1885128.md b/samples_new/texts_merged/1885128.md new file mode 100644 index 0000000000000000000000000000000000000000..ebb58b75e04a660da7d2ae98afeca1b09f22bc4e --- /dev/null +++ b/samples_new/texts_merged/1885128.md @@ -0,0 +1,507 @@ + +---PAGE_BREAK--- + +We are IntechOpen, +the world's leading publisher of +Open Access books +Built by scientists, for scientists + +5,300 +Open access books available + +131,000 +International authors and editors + +160M +Downloads + +Our authors are among the +TOP 1% +most cited scientists + +154 +Countries delivered to + +12.2% +Contributors from top 500 universities + +WEB OF SCIENCE™ + +Selection of our books indexed in the Book Citation Index +in Web of Science™ Core Collection (BKCI) + +Interested in publishing with us? +Contact book department@intechopen.com + +Numbers displayed above are based on latest data collected. +For more information visit www.intechopen.com +---PAGE_BREAK--- + +Low Sampling Rate Time Acquisition Schemes +and Channel Estimation Algorithms of +Ultra-Wideband Signals + +Wei Xu and Jiaxiang Zhao + +Nankai University +China + +# 1. Introduction + +Ultra-wideband (UWB) communication is a viable technology to provide high data rates over broadband wireless channels for applications, including wireless multimedia, wireless Internet access, and future-generation mobile communication systems (Karaoguz, 2001; Stoica et al., 2005). Two of the most critical challenges in the implementation of UWB systems are the timing acquisition and channel estimation. The difficulty in them arises from UWB signals being the ultra short low-duty-cycle pulses operating at very low power density. The Rake receiver (Turin, 1980) as a prevalent receiver structure for UWB systems utilizes the high diversity in order to effectively capture signal energy spread over multiple paths and boost the received signal-to-noise ratio (SNR). However, to perform maximal ratio combining (MRC), the Rake receiver needs the timing information of the received signal and the knowledge of the channel parameters, namely, gains and tap delays. Timing errors as small as fractions of a nanosecond could seriously degrade the system performance (Lovelace & Townsend, 2002; Tian & Giannakis, 2005). Thus, accurate timing acquisition and channel estimation is very essentially for UWB systems. + +Many research efforts have been devoted to the timing acquisition and channel estimation of UWB signals. However, most reported methods suffer from the restrictive assumptions, such as, demanding a high sampling rates, a set of high precision time-delay systems or invoking a line search, which severally limits their usages. In this chapter, we are focusing on the low sampling rate time acquisition schemes and channel estimation algorithms of UWB signals. First, we develop a novel optimum data-aided (DA) timing offset estimator that utilizes only symbol-rate samples to achieve the channel delay spread scale timing acquisition. For this purpose, we exploit the statistical properties of the power delay profile of the received signals to design a set of the templates to ensure the effective multipath energy capture at any time. Second, we propose a novel optimum data-aided channel estimation scheme that only relies on frame-level sampling rate data to derive channel parameter estimates from the received waveform. The simulations are provided to demonstrate the effectiveness of the proposed approach. +---PAGE_BREAK--- + +## 2. The channel model + +From the channel model described in (Foerster, 2003), the impulse response of the channel is + +$$h(t) = X \sum_{n=1}^{N} \sum_{k=1}^{K(n)} \alpha_{nk} \delta(t - T_n - \tau_{nk}) \quad (1)$$ + +where $X$ is the log-normal shadowing effect. $N$ and $K(n)$ represent the total number of the clusters, and the number of the rays in the $n$th cluster, respectively. $T_n$ is the time delay of the $n$th cluster relative to a reference at the receiver, and $\tau_{nk}$ is the delay of the $k$th multipath component in the $n$th cluster relative to $T_n$. From (Foerster, 2003), the multipath channel coefficient $\alpha_{nk}$ can be expressed as $\alpha_{nk} = p_{nk}\beta_{nk}$ where $p_{nk}$ assumes either +1 or -1 with equal probability, and $\beta_{nk} > 0$ has log-normal distribution. + +The power delay profile (the mean square values of $\{\beta_{nk}^2\}$) is exponential decay with respect to $\{T_n\}$ and $\{\tau_{nk}\}$, i.e., + +$$\langle \beta_{nk}^2 \rangle = \langle \beta_{00}^2 \rangle \exp(-\frac{T_n}{\Gamma}) \exp(-\frac{\tau_{nk}}{\gamma}) \quad (2)$$ + +where $\langle \beta_{00}^2 \rangle$ is the average power gain of the first multipath in the first cluster. $\Gamma$ and $\gamma$ are power-delay time constants for the clusters and the rays, respectively. + +The model (1) is employed to generate the impulse responses of the propagation channels in our simulation. For simplicity, an equivalent representation of (1) is + +$$h(t) = \sum_{l=0}^{L-1} \alpha_l \delta(t - \tau_l) \quad (3)$$ + +where $L$ represents the total number of the multipaths, $\alpha_l$ includes log-normal shadowing and multipath channel coefficients, and $\tau_l$ denotes the delay of the $l$th multipath relative to the reference at the receiver. Without loss of generality, we assume $\tau_0 < \tau_1 < \dots < \tau_{L-1}$. Moreover, the channel only allows to change from burst to burst but remains invariant (i.e., $\{\alpha_l, \tau_l\}_{l=0}^{L-1}$ are constants) over one transmission burst. + +## 3. Low sampling rate time acquisition schemes + +One of the most acute challenges in realizing the potentials of the UWB systems is to develop the time acquisition scheme which relies only on symbol-rate samples. Such a low sampling rate time acquisition scheme can greatly lower the implementation complexity. In addition, the difficulty in UWB synchronization also arises from UWB signals being the ultrashort low-duty-cycle pulses operating at very low power density. Timing errors as small as fractions of a nanosecond could seriously degrade the system performance (Lovelace & Townsend, 2002; Tian & Giannakis, 2005). + +A number of timing algorithms are reported for UWB systems recently. Some of the timing algorithms(Tian & Giannakis, 2005; Yang & Giannakis, 2005; Carbonelli & Mengali, 2006; He & Tepedelenlioglui, 2008) involve the sliding correlation that usually used in traditional narrowband systems. However, these approaches inevitably require a searching procedure and are inherently time-consuming. Too long synchronization time will affect +---PAGE_BREAK--- + +symbol detection. Furthermore, implementation of such techniques demands very fast +and expensive A/D converters and therefore will result in high power consumption. +Another approach (Carbonelli & Mengali, 2005; Furusawa et al., 2008; Cheng & Guan, 2008; +Sasaki et al., 2010) is to synchronize UWB signals through the energy detector. The merits +of using energy detectors are that the design of timing acquisition scheme could benefit +from the statistical properties of the power delay profile of the received signals. Unlike +the received UWB waveforms which is unknown to receivers due to the pulse distortions, +the statistical properties of the power delay profile are invariant. Furthermore, as shown +in (Carbonelli & Mengali, 2005), an energy collection based receiver can produce a low +complexity, low cost and low power consumption solution at the cost of reduced channel +spectral efficiency. + +In this section, a novel optimum data-aided timing offset estimator that only relies on +symbol-rate samples for frame-level timing acquisition is derived. For this purpose, we +exploit the statistical properties of the power delay profile of the received signals to design +a set of the templates to ensure the effective multipath energy capture at any time. We show +that the frame-level timing offset acquisition can be transformed into an equivalent amplitude +estimation problem. Thus, utilizing the symbol-rate samples extracted by our templates and +the ML principle, we obtain channel-dependent amplitude estimates and optimum timing +offset estimates. + +**3.1 The signal model** + +During the acquisition stage, a training sequence is transmitted. Each UWB symbol is transmitted over a time-interval of $T_s$ seconds that is subdivided into $N_f$ equal size frame-intervals of length $T_f$. A single frame contains exactly one data modulated ultrashort pulse $p(t)$ of duration $T_p$. And the transmitted waveform during the acquisition has the form as + +$$s(t) = \sqrt{E_f} \sum_{j=0}^{NN_f-1} d_{[j]_{N_{ds}}} p(t - jT_f - a_{\lfloor \frac{j}{N_f} \rfloor}) \quad (4)$$ + +where {$d_l$}$_{l=0}^{{N_{ds}}-1}$ with $d_l \in \{\pm 1\}$ is the DS sequence. The time shift $\Delta$ is chosen to be $T_h/2$ with $T_h$ being the delay spread of the channel. The assumption that there is no inter-frame interference suggests $T_h \le T_f$. For the simplicity, we assume $T_h = T_f$ and derive the acquisition algorithm. Our scheme can easily be extended to the case where $T_f \ge T_h$. The training sequence {$a_n$}$_{n=0}^{N-1}$ is designed as + +$$\underbrace{\{0, 0, 0, \dots, 0}_{n=0,1,\dots,N_0-1}, \underbrace{1, 0, 1, 0, \dots, 1, 0}_{n=N_0,N_0+1,\dots,N-1}}$$ + +(5) + +i.e., the first $N_0$ consecutive symbols are chosen to be 0, and the rest symbols alternately switch between 1 and 0. + +The transmitted signal propagates through an L-path fading channel as shown in (3). Using the first arriving time $\tau_0$, we define the relative time delay of each multipath as $\tau_{l,0} = \tau_l - \tau_0$ +---PAGE_BREAK--- + +Fig. 1. The block diagram of acquisition approach. + +for $1 \le l \le L - 1$. Thus the received signal is + +$$r(t) = \sqrt{E_f} \sum_{j=0}^{NN_f-1} d_{[j]_{N_{ds}}} p_R(t-jT_f - a_{\lfloor \frac{j}{N_f} \rfloor} \Delta - \tau_0) + n(t) \quad (6)$$ + +where $n(t)$ is the zero-mean additive white Gaussian noise (AWGN) with double-side power spectral density $\sigma_n^2/2$ and $p_R(t) = \sum_{l=0}^{L-1} \alpha_l p(t - \tau_{l,0})$ represents the convolution of the channel impulse response (3) with the transmitted pulse $p(t)$. + +The timing information of the received signal is contained in the delay $\tau_0$ which can be decomposed as + +$$\tau_0 = n_s T_s + n_f T_f + \zeta \quad (7)$$ + +with $n_s = \lfloor \frac{\tau_0}{T_s} \rfloor$, $n_f = \lfloor \frac{\tau_0 - n_s T_s}{T_f} \rfloor$ and $\zeta \in [0, T_f)$. + +In the next section, we present an DA timing acquisition scheme based on the following assumptions: 1) There is no interframe interference, i.e., $\tau_{L-1,0} \le T_f$. 2) The channel is assumed to be quasi-static, i.e., the channel is constant over a block duration. 3) Since the symbol-level timing offset $n_s$ can be estimated from the symbol-rate samples through the traditional estimation approach, we assumed $n_s = 0$. In this chapter, we focus on acquiring timing with frame-level resolution, which relies on only symbol-rate samples. + +## 3.2 Analysis of symbol-rate sampled data $Y_0[n]$ + +As shown in Fig. 1, the received signal (6) first passes through a square-law detector. Then, the resultant output is separately correlated with the pre-devised templates $W_0(t)$, $W_1(t)$ and $W_2(t)$, and sampled at $nT_s$ which yields $\{Y_0[n]\}_{n=1}^{N-1}$, $\{Y_1[n]\}_{n=1}^{N-1}$ and $\{Y_2[n]\}_{n=1}^{N-1}$. Utilizing these samples, we derive an optimal timing offset estimator $\hat{n}_f$. + +In view of (6), the output of the square-law detector is + +$$ \begin{aligned} R(t) &= r_s^2(t) = (r_s(t) + n(t))^2 = r_s^2(t) + m(t) \\ &= E_f \sum_{j=0}^{NN_f-1} p_R^2(t - jT_f - a_{\lfloor \frac{j}{N_f} \rfloor} \Delta - \tau_0) + m(t) \end{aligned} \quad (8) $$ +---PAGE_BREAK--- + +where $m(t) = 2r_s(t)n(t) + n^2(t)$. When the template $W(t)$ is employed, the symbol rate sampled data $Y[n]$ is + +$$ Y[n] = \int_{0}^{T_s} R(t+nT_s)W(t)dt. \quad (9) $$ + +Now we derive the decomposition of $Y_0[n]$, i.e., the symbol-rate samples when the template $W_0(t)$ defined as + +$$ W_0(t) = \sum_{k=0}^{N_f-1} w(t-kT_f), \quad w(t) = \begin{cases} 1, & 0 \le t < \frac{T_f}{2} \\ -1, & \frac{T_f}{2} \le t < T_f \\ 0, & \text{others} \end{cases} \quad (10) $$ + +is employed. Substituting $W_0(t)$ for $W(t)$ in (9), we obtain symbol-rate sampled data $Y_0[n]$. Recalling (5), we can derive the following proposition of $Y_0[n]$. + +**Proposition 1:** 1) For $1 \le n < N_0$, $Y_0[n]$ can be expressed as + +$$ Y_0[n] = N_f I_{\xi,0} + M_0[n], \quad (11) $$ + +2) For $N_0 \le n \le N-1$, $Y_0[n]$ can be represented as + +$$ Y_0[n] = \begin{cases} (2\Psi - N_f)I_{\xi,a_{n-1}} + M_0[n], & \zeta \in [0, T_\eta) \\ (2\Psi - N_f + 1)I_{\xi,a_{n-1}} + M_0[n], & \zeta \in [T_\eta, T_\eta + \frac{T_f}{2}) \\ (2\Psi - N_f + 2)I_{\xi,a_{n-1}} + M_0[n], & \zeta \in [T_\eta + \frac{T_f}{2}, T_f) \end{cases} \quad (12) $$ + +where $\Psi \triangleq n_f - \frac{1}{2}\epsilon$, $\epsilon \in [-\frac{1}{2}, \frac{1}{2}]$ and $T_\eta \in [\frac{T_f}{4}, \frac{T_f}{2}]$. $M_0[n]$ is the sampled noise, and $I_{\xi,a_n}$ is defined as + +$$ I_{\xi,a_n} \triangleq E_f \int_0^{T_f} \sum_{m=0}^2 p_R^2(t+mT_f-a_n\Delta-\xi)w(t)dt. \quad (13) $$ + +We prove the Proposition 1 and the fact that the sampled noise $M_0[n]$ can be approximated by a zero mean Gaussian variable in (Xu et al., 2009) in Appendix A and Appendix B respectively. There are some remarks on the Proposition 1: + +1) The fact of $a_{n-1} \in \{0, 1\}$ suggests that $I_{\xi,a_{n-1}}$ in (12) is equal to either $I_{\xi,0}$ or $I_{\xi,1}$. Furthermore, $I_{\xi,0}$ and $I_{\xi,1}$ satisfy $I_{\xi,1} = -I_{\xi,0}$ whose proof is contained in *Fact 1* of Appendix I. + +2) Equation (12) suggests that the decomposition of $Y_0[n]$ varies when $\zeta$ falls in different subintervals, so correctly estimating $n_f$ need to determine to which region $\zeta$ belongs. + +3) *Fact 2* of Appendix A which states + +$$ \left\{ \begin{array}{ll} I_{\xi,0} > 0, & \zeta \in [0, T_{\eta}) \cup [T_{\eta} + \frac{T_f}{2}, T_f] \\ I_{\xi,0} < 0, & \zeta \in [T_{\eta}, T_{\eta} + \frac{T_f}{2}) \end{array} \right. \quad (14) $$ + +suggests that it is possible to utilize the sign of $I_{\xi,0}$ to determine to which subinterval $\zeta$ belongs. However, when $I_{\xi,0} > 0$, $\zeta$ could belong to either $[0, T_{\eta})$ or $[T_{\eta} + \frac{T_f}{2}, T_f)$. To resolve this difficulty, we introduce the second template $W_1(t)$ in the next section. +---PAGE_BREAK--- + +### 3.3 Analysis of symbol-rate sampled data $Y_1[n]$ + +The symbol-rate sampled data $Y_1[n]$ is obtained when the template $W_1(t)$ is employed. $W_1(t)$ is a delayed version of $W_0(t)$ with the delayed time $T_d$ where $T_d \in [0, \frac{T_f}{2}]$. Our simulations show that we obtain the similar performance for the different choices of $T_d$. For the simplicity, we choose $T_d = \frac{T_f}{4}$ for the derivation. Thus, we have + +$$ +\begin{aligned} +Y_1[n] &= \int_{\frac{T_f}{4}}^{T_s+\frac{T_f}{4}} R(t+nT_s)W_0\left(t-\frac{T_f}{4}\right)dt \\ +&= \int_0^{T_s} R(t+nT_s+\frac{T_f}{4})W_0(t)dt. +\end{aligned} +\quad (15) $$ + +Then we can derive the following proposition of $Y_1[n]$. + +**Proposition 2:1)** For $1 \le n < N_0$, $Y_1[n]$ can be expressed as + +$$ Y_1[n] = N_f J_{\zeta,0} + M_0[n]. \quad (16) $$ + +2) For $N_0 \le n \le N-1$, $Y_1[n]$ can be decomposed as + +$$ Y_1[n] = \begin{cases} (2\Psi - N_f - 1)J_{\zeta, a_{n-1}} + M_1[n], & \zeta \in [0, T_\eta - \frac{T_f}{4}) \\ (2\Psi - N_f)J_{\zeta, a_{n-1}} + M_1[n], & \zeta \in [T_\eta - \frac{T_f}{4}, T_\eta + \frac{T_f}{4}) \\ (2\Psi - N_f + 1)J_{\zeta, a_{n-1}} + M_1[n], & \zeta \in [T_\eta + \frac{T_f}{4}, T_f) \end{cases} \quad (17) $$ + +where $J_{\zeta,0}$ satisfies + +$$ \left\{ +\begin{array}{ll} +J_{\zeta,0} < 0, & \zeta \in [0, T_{\eta} - \frac{T_f}{4}) \cup [T_{\eta} + \frac{T_f}{4}, T_f) \\ +J_{\zeta,0} > 0, & \zeta \in [T_{\eta} - \frac{T_f}{4}, T_{\eta} + \frac{T_f}{4}). +\end{array} +\right. +\quad (18) $$ + +Equation (14) and (18) suggest that the signs of $I_{\zeta,0}$ and $J_{\zeta,0}$ can be utilized jointly to determine the range of $\zeta$, which is summarized as follows: + +**Proposition 3:** $\zeta \in [0, T_f]$ defined in (7) satisfies + +1. If $I_{\zeta,0} > 0$ and $J_{\zeta,0} > 0$, then $\zeta \in (T_{\eta} - \frac{T_f}{4}, T_{\eta})$. + +2. If $I_{\zeta,0} < 0$ and $J_{\zeta,0} > 0$, then $\zeta \in (T_{\eta}, T_{\eta} + \frac{T_f}{4})$. + +3. If $I_{\zeta,0} < 0$ and $J_{\zeta,0} < 0$, then $\zeta \in (T_{\eta} + \frac{T_f}{4}, T_{\eta} + \frac{T_f}{2})$. + +4. If $I_{\zeta,0} > 0$ and $J_{\zeta,0} < 0$, then $\zeta \in (0, T_{\eta} - \frac{T_f}{4}) \cup (T_{\eta} + \frac{T_f}{2}, T_f)$. + +The last case of Proposition 3 suggests that using the signs of $I_{\zeta,0}$ and $J_{\zeta,0}$ is not enough to determine whether we have $\zeta \in (0, T_{\eta} - \frac{T_f}{4})$ or $\zeta \in (T_{\eta} + \frac{T_f}{2}, T_f)$. To resolve this difficulty, the third template $W_2(t)$ is introduced. $W_2(t)$ is an auxiliary template and is defined as + +$$ W_2(t) = \sum_{k=0}^{N_f-1} v(t-kT_f), \quad v(t) = \begin{cases} 1, & T_f - 2T_v \le t < T_f - T_v \\ -1, & T_f - T_v \le t < T_f \\ 0, & \text{others} \end{cases} \quad (19) $$ + +where $T_v \in (0, T_f/10]$. Similar to the proof of (14), we can prove that in this case, either $K_{\zeta,0} > 0$ for $0 < \zeta < T_{\eta} - \frac{T_f}{4}$ or $K_{\zeta,0} < 0$ for $T_{\eta} + \frac{T_f}{4} < \zeta < T_f$ is valid, which yields the information to determine which region $\zeta$ belongs to. +---PAGE_BREAK--- + +### 3.4 The computation of the optimal timing offset estimator $\hat{n}_f$ + +To seek the estimate of $n_f$, we first compute the optimal estimates of $I_{\xi,0}$ and $J_{\xi,0}$ using (11) and (16). Then, we use the estimate $\hat{I}_{\xi,0}, \hat{J}_{\xi,0}$ and Proposition 3 to determine the region to which $\xi$ belongs. The estimate $\hat{\Psi}$ therefore can be derived using the proper decompositions of (12) and (17). Finally, recalling the definition in (12) $\Psi = n_f - \frac{\epsilon}{2}$ with $\epsilon \in [-\frac{1}{2}, \frac{1}{2}]$, we obtain $\hat{n}_f = [\hat{\Psi}]$, where $[\cdot]$ stands for the round operation. + +According to the signs of $\hat{I}_{\xi,0}$ and $\hat{J}_{\xi,0}$, we summarize the ML estimate $\hat{\Psi}$ as follows: + +**Proposition 4:** + +* When $\hat{I}_{\xi,0} > 0$ and $\hat{J}_{\xi,0} > 0$, $\hat{\Psi} = \frac{1}{A} \sum_{n=N_0}^{N-1} [Z_n + N_f(I_{\xi,0}^2 + J_{\xi,0}^2)]$. + +* When $\hat{I}_{\xi,0} < 0$ and $\hat{J}_{\xi,0} > 0$, $\hat{\Psi} = \frac{1}{A} \sum_{n=N_0}^{N-1} [Z_n + (N_f - 1)I_{\xi,0}^2 + N_f J_{\xi,0}^2]$. + +* When $\hat{I}_{\xi,0} < 0$ and $\hat{J}_{\xi,0} < 0$, $\hat{\Psi} = \frac{1}{A} \sum_{n=N_0}^{N-1} [Z_n + (N_f - 1)(I_{\xi,0}^2 + J_{\xi,0}^2)]$. + +* When $\hat{I}_{\xi,0} > 0$ and $\hat{J}_{\xi,0} < 0$, $\hat{\Psi} = \begin{cases} \frac{1}{A} \sum_{n=N_0}^{N-1} [Z_n + N_f I_{\xi,0}^2 + (N_f + 1) J_{\xi,0}^2] & , \hat{K}_{\xi,0} > 0 \\ \frac{1}{A} \sum_{n=N_0}^{N-1} [Z_n + (N_f - 2) I_{\xi,0}^2 + (N_f - 1) J_{\xi,0}^2] & , \hat{K}_{\xi,0} < 0 \end{cases}$ + +where $A \triangleq 2(N - N_0)(I_{\xi,0}^2 + J_{\xi,0}^2)$ and $Z_n \triangleq Y_0[n]I_{\xi,a_{n-1}} + Y_1[n]J_{\xi,a_{n-1}}$. The procedures of computing the optimal ML estimate $\hat{\Psi}$ in Proposition 4 are identical. Therefore, we only present the computation steps when $\hat{I}_{\xi,0} > 0$ and $\hat{J}_{\xi,0} > 0$. + +1. Utilizing (11) and (16), we obtain the ML estimates + +$$ \hat{I}_{\xi,0} = \frac{1}{(N_0-1)N_f} \sum_{n=1}^{N_0-1} Y_0[n], \quad \hat{J}_{\xi,0} = \frac{1}{(N_0-1)N_f} \sum_{n=1}^{N_0-1} Y_1[n]. \qquad (20) $$ + +2. From (1) of Proposition 3, it follows that $T_\eta - \frac{T_f}{4} < \zeta < T_\eta$ when $\hat{I}_{\xi,0} > 0$ and $\hat{J}_{\xi,0} > 0$. + +3. According to the region of $\zeta$, we can select the right equations from (12) and (17) as + +$$ Y_0[n] = (2\Psi - N_f)I_{\zeta,a_{n-1}} + M_0[n] \qquad (21) $$ + +$$ Y_1[n] = (2\Psi - N_f)J_{\zeta,a_{n-1}} + M_1[n]. \qquad (22) $$ + +Thus the log-likelihood function $\ln p(y; \Psi, I_{\zeta,a_{n-1}}, J_{\zeta,a_{n-1}})$ is + +$$ \sum_{n=N_0}^{N-1} \left\{ [Y_0[n] - (2\Psi - N_f) I_{\zeta,a_{n-1}}]^2 + [Y_1[n] - (2\Psi - N_f) J_{\zeta,a_{n-1}}]^2 \right\}. $$ + +It follows the ML estimate $\hat{\Psi} = \frac{1}{A}\sum_{n=N_0}^{N-1}[Z_n + N_f(I_{\zeta,0}^2 + J_{\zeta,0}^2)]$. + +### 3.5 Simulation + +In this section, computer simulations are performed. We use the second-order derivative of the Gaussian pulse to represent the UWB pulse. The propagation channels are generated +---PAGE_BREAK--- + +Fig. 2. MSE performance under CM2 with $d = 4m$.. + +Fig. 3. BER performance under CM2 with $d = 4m$.. + +by the channel model CM2 described in (Foerster, 2003). Other parameters are selected as follows: $T_p = 1$ns, $N_f = 25$, $T_f = 100$ns, $T_v = T_f/10$ and the transmitted distance $d = 4m$. In all the simulations, we assume that $n_f$ and $\zeta$ are uniformly distributed over $[0, N_f - 1]$ and $[0, T_f]$ respectively. To evaluate the effect of the estimate $\hat{n}_f$ on the bit-error-rates (BERs) performance, we assume there is an optimal channel estimator at the receiver to obtain the perfect template for tracking and coherent demodulation. The signal-to-noise ratios (SNRs) +---PAGE_BREAK--- + +in all figures are computed through $E_s/\sigma_n^2$ where $E_s$ is the energy spread over each symbol at the transmitter and $\sigma_n^2$ is the power spectral density of the noise. + +In Fig. 2 present the normalized mean-square error (MSE: $E\{|\hat{n}_f - n_f|/N_f\}^2\}$) of the proposed algorithm in contrast to the approach using noisy template proposed in (Tian & Giannakis, 2005). The figure shows that the proposed algorithm (blue curve) outperforms that in (Tian & Giannakis, 2005) (red curve) when the SNR is larger than 10dB. For both algorithms, the acquisition performance improves with an increase in the length of training symbols $N$, as illustrated by the performance gap among $N = 12$ and $N = 30$. Fig. 3 illustrates the BER performance for the both algorithms. The BERs corresponding to perfect timing (green curve) and no timing (Magenta curve) are also plotted for comparisons. + +## 4. Low sampling rate channel estimation algorithms + +The channel estimation of UWB systems is essential to effectively capture signal energy spread over multiple paths and boost the received signal-to-noise ratio (SNR). The low sampling rate channel estimation algorithms have the merits that can greatly lower the implementation complexity and reduce the costs. However, the development of low sampling rate channel estimation algorithms is extremely challenging. This is primarily due to the facts that the propagation models of UWB signals are frequency selective and far more complex than traditional radio transmission channels. + +Classical approaches to this problem are using the maximum likelihood (ML) method or approximating the solutions of the ML problem. The main drawback of these approaches is that the computational complexity could be prohibitive since the number of parameters to be estimated in a realistic UWB channel is very high (Lottici et al., 2002). Other approaches reported are the minimum mean-squared error schemes which have the reduced complexity at the cost of performance (Yang & Giannakis, 2004). Furthermore, sampling rate of the received UWB signal is not feasible with state-of-the-art analog-to-digital converters (ADC) technology. Since UWB channels exhibit clusters (Cramer et al., 2002), a cluster-based channel estimation method is proposed in (Carbonelli & Mitra, 2007). Different methods such as subspace approach (Xu & Liu, 2003), first-order cyclostationary-based method (Wang & Yang, 2004) and compressed sensing based method (Paredes et al., 2007; Shi et al., 2010) proposed for UWB channel estimation are too complex to be implemented in actual systems. + +In this section, we develop a novel optimum data-aided channel estimation scheme that only relies on frame-level sampling rate data to derive channel parameter estimates from the received waveform. To begin with, we introduce a set of especially devised templates for the channel estimation. The received signal is separately correlated with these pre-devised templates and sampled at frame-level rate. We show that each frame-level rate sample of any given template can be decomposed to a sum of a frequency-domain channel parameter and a noise sample. The computation of time-domain channel parameter estimates proceeds through the following two steps: In step one, for each fixed template, we utilize the samples gathered at this template and the maximum likelihood criterion to compute the ML estimates of the frequency-domain channel parameters of these samples. In step two, utilizing the computed frequency-domain channel parameters, we can compute the time-domain channel parameters via inverse fast transform (IFFT). As demonstrated in the simulation example, +---PAGE_BREAK--- + +Fig. 4. The block diagram of channel estimation scheme. + +when the training time is fixed, more templates used for the channel estimation yield the better (BER) performance. + +## 4.1 The signal model + +During the channel estimation process, a training sequence is transmitted. Each UWB symbol is transmitted over a time-interval of $T_s$ seconds that is subdivided into $N_f$ equal size frame-intervals of length $T_f$, i.e., $T_s = N_f T_f$. A frame is divided into $N_c$ chips with each of duration $T_c$, i.e., $T_f = N_c T_c$. A single frame contains exactly one data modulated ultrashort pulse $p(t)$ (so-called monocycle) of duration $T_p$ which satisfies $T_p \le T_c$. The pulse $p(t)$ normalized to satisfy $\int p(t)^2 dt = 1$ can be Gaussian, Rayleigh or other. Then the waveform for the training sequence can be written as + +$$s(t) = \sqrt{E_f} \sum_{n=0}^{N_s-1} \sum_{j=0}^{N_f-1} b_n p(t - nT_s - jT_f) \quad (23)$$ + +where $E_f$ represents the energy spread over one frame and $N_s$ is the length of the training sequence; $b_n$ denotes data, which is equal to 1 during training phase. +Our goal is to derive the estimate of the channel parameter sequence $\mathbf{h} = [h_0, h_1, \dots, h_{L-1}]$. Since from the assumption $L$ is unknown, we define a $N_c$-length sequence $\mathbf{p}$ as + +$$\mathbf{p} = [h_0, h_1, \dots, h_{L-1}, h_L, h_{L+1}, \dots, h_{N_c-1}] \quad (24)$$ + +where $h_l = 0$ for $l \ge L$. The transmitted signal propagates through an $L$-path fading channel as shown in (3). Thus the received signal is + +$$r(t) = \sqrt{E_f} \sum_{n=0}^{N_s-1} \sum_{j=0}^{N_f-1} \sum_{l=0}^{N_c-1} h_l p(t - nT_s - jT_f - lT_c) + n(t) \quad (25)$$ + +where $n(t)$ is the zero-mean additive white Gaussian noise (AWGN) with double-side power spectral density $\sigma_n^2/2$. +---PAGE_BREAK--- + +## 4.2 The choices of templates + +In this section, a novel channel estimation method that relies on symbal-level samples is derived. As shown in Fig. 4, the received signal (25) is separately correlated with the pre-devised templates $W_0(t), W_1(t), \dots, W_S(t)$, and sampled at $nT_m$ where sampling period $T_m$ is on the order of $T_f$. Let $Y_i[n]$ denote the n-th sample corresponding to the template $W_i(t)$, that is, + +$$ Y_i[n] = \int_0^{T_m} r(t + nT_m)W_i(t)dt \quad (26) $$ + +with $i = 0, 1, \dots, S$. Utilizing these samples, we derive the ML estimate of the channel parameter sequence **p** in (24). + +First we introduce a set of $S+1$ templates used for the channel estimation. The number $S$ is chosen as a positive integer factor of $N_c/2$ by assuming that $N_c$ which represents the number of chips $T_c$ in each frame is an even number. That is, we have $N_c = 2SM$ with $M$ also being defined as a positive integer factor of $N_c/2$. The $i$-th template is defined as + +$$ W_i(t) = \sqrt{E_f} \sum_{k=0}^{N_o-1} \omega_{N_o}^{ik} [p(t - kT_c) + p(t - T_f - kT_c)] \quad (27) $$ + +with $N_o = 2S = N_c/M$, $\omega_{N_o}^{ik} = e^{-j\frac{2\pi ik}{N_o}}$ and $i \in \{0, 1, \dots, S\}$. The duration of each template $W_i(t)$ is equal to the sampling period $T_m$ which can be expressed as + +$$ T_m = (N_c + N_o)T_c = T_f + N_o T_c. \quad (28) $$ + +## 4.3 The computation of the channel parameter sequence p + +In this section, we derive the channel estimation scheme that only relies on frame-level sampling rate data. To begin with, let us introduce some notations. Recalling the equation $N_o = N_c/M$ following (27), we divide the $N_c$-length sequence **p** into $M$ blocks each of size $N_o$. Therefore, equation (24) becomes + +$$ \mathbf{p} = [\mathbf{h}_0, \mathbf{h}_1, \dots, \mathbf{h}_m, \dots, \mathbf{h}_{M-1}] \quad (29) $$ + +where the *m*-th block $\mathbf{h}_m$ is defined as + +$$ \mathbf{h}_m = [h_{mN_o}, h_{mN_o+1}, \dots, h_{mN_o+N_o-1}] \quad (30) $$ + +with $m \in \{0, 1, \dots, M-1\}$. Let $\mathbf{F}_i$ denote the $N_o$-length coefficient sequence of the $i$-th template $W_i(t)$ in (27), i.e., + +$$ \mathbf{F}_i = [\omega_{N_o}^0 \omega_{N_o}^i \omega_{N_o}^{2i} \dots \omega_{N_o}^{(N_o-1)i}] . \quad (31) $$ + +The discrete Fourier transform (DFT) of the $N_o$-length sequence $\mathbf{h}_m = [h_{mN_o}, h_{mN_o+1}, \dots, h_{mN_o+N_o-1}]$ is denoted as + +$$ \mathbf{H}_m = [H_m^0, H_m^1, \dots, H_m^i, \dots, H_m^{N_o-1}] \quad (32) $$ +---PAGE_BREAK--- + +where the frequency-domain channel parameter $H_m^i$ is + +$$ H_m^i = \mathbf{F}_i \mathbf{h}_m^T = \sum_{k=0}^{N_o-1} \omega_{N_o}^{ik} h_{mN_o+k} \quad (33) $$ + +with $m \in \{0, 1, \dots, M-1\}$ and $i \in \{0, 1, \dots, S\}$. + +Our channel estimation algorithm proceeds through the following two steps. + +**Step 1:** Utilizing the set of frame-level samples $\{Y_i[n]\}_{n=1}^N$ generated from the i-th template, we compute the ML estimates of the frequency-domain channel parameters $\{H_m^i\}_{m=1}^M$ for $i \in \{0, 1, \dots, S\}$. To do this, we show that the samples $\{Y_i[n]\}_{n=0}^{N-1}$ from the i-th template has the following decomposition. + +**Proposition 1:** Every sample in the set $\{Y_i[n]\}_{n=0}^{N-1}$ can be decomposed into the sum of a frequency-domain channel parameter and a noise sample, that is, + +$$ \left\{ \begin{array}{l} Y_i[qM] = 2E_f H_0^i + Z_i[qM] \\ Y_i[qM+1] = 2E_f H_1^i + Z_i[qM+1] \\ \vdots \\ Y_i[qM+m] = 2E_f H_m^i + Z_i[qM+m] \\ \vdots \\ Y_i[qM+M-1] = 2E_f H_{M-1}^i + Z_i[qM+M-1] \end{array} \right. \qquad (34) $$ + +where $Z_i[n]$ represents the noise sample. The parameter $q$ belongs to the set $\{0, 1, \dots, Q-1\}$ with $Q = \lfloor \frac{N}{M} \rfloor$. + +Performing ML estimation to the $(m+1)$-th equation in (34) for $q=0, 1, \dots, Q-1$, we can compute the ML estimate $\hat{H}_m^i$ for the frequency-domain channel parameter $H_m^i$ as + +$$ \hat{H}_m^i = \frac{1}{2E_f Q} \sum_{q=0}^{Q-1} Y_i[qM+m] \quad (35) $$ + +with $m \in \{0, 1, \dots, M-1\}$ and $i \in \{0, 1, \dots, S\}$. + +**Step 2:** Utilizing the computed frequency-domain channel parameters $\{\hat{H}_m^i\}_{i=0}^S$ from the Step 1, we derive the estimate of the time-domain channel sequence $\mathbf{h}_m$ for $m \in \{0, 1, \dots, M-1\}$. From the symmetry of the DFT, the time-domain channel parameter sequence $\mathbf{h}_m = [h_{mN_o} \ h_{mN_o+1} \ \dots \ h_{mN_o+N_o-1}]$ is a real valued sequence, which suggests that the DFT of $\mathbf{h}_m$ satisfies + +$$ H_m^{N_o-i} = (\hat{H}_m^i)^* \quad (36) $$ + +with $i \in \{0, 1, \dots, S\}$ and $S = N_o/2$. + +Utilizing equation (36), we obtain the estimate for the $N_o$-point DFT of $\mathbf{h}_m$ as + +$$ \hat{\mathbf{H}}_m = [\hat{H}_m^0, \hat{H}_m^1, \dots, \hat{H}_m^S, (\hat{H}_m^{S-1})^*, \dots, (\hat{H}_m^2)^*, (\hat{H}_m^1)^*] \quad (37) $$ +---PAGE_BREAK--- + +The estimate of the time-domain channel parameter $\hat{h}_m$ can be computed via $N_o$-point IFFT. In view of equation (29), the estimated channel parameter sequence **p** in (24) is given by + +$$ \hat{\mathbf{p}} = [\hat{\mathbf{h}}_0, \hat{\mathbf{h}}_1, \dots, \hat{\mathbf{h}}_{M-1}]. \quad (38) $$ + +Fig. 5. MSE performance of the algorithm proposed in (Wang & Ge, 2007) and the proposed algorithm with different number of templates ($S = 4, 8, 16$), when the length of the training sequence $N_s$ is 30. + +## 4.4 Simulation + +In this section, computer simulations are performed to test the proposed algorithm. The propagation channels are generated by the channel model CM 4 described in (Foerster, 2003). We choose the second-order derivative of the Gaussian pulse as the transmitted pulse with duration $T_p = 1$ ns. Other parameters are selected as follows: $T_f = 64$ ns, $T_c = 1$ ns, $N_c = 64$ and $N_f = 24$. + +Fig. 5 presents the normalized mean-square error (MSE) of our channel estimation algorithm with different number of templates ($S = 4, 8, 16$) when the length of the training sequence $N_s$ is 30. As a comparison, we also plot the MSE curve of the approach in (Wang & Ge, 2007) which needs chip-level sampling rate. Fig. 6 illustrates the bit-error-rates (BERs) performance for the both algorithms. The BERs corresponding to the perfect channel estimation (Perfect CE) is also plotted for comparisons. From these figures, the MSE and BER performances of our algorithm improve as the number of templates increases. In particular, as shown in Fig. 5 and Fig. 6, the MSE and BER performances of our algorithm that relies only on the frame-level sampling period $T_f = 64$ ns is comparable to that of the approach proposed in (Wang & Ge, 2007) which requires chip-level sampling period $T_c = 1$ ns. +---PAGE_BREAK--- + +Fig. 6. BER performance of Perfect CE, the algorithm proposed in (Wang & Ge, 2007) and the proposed algorithm with different number of templates ($S = 4, 8, 16$), when the length of the training sequence $N_s$ is 30. + +## 5. Conclusion + +In this chapter, we are focusing on the low sampling rate time acquisition schemes and channel estimation algorithms of UWB signals. First, we develop a novel optimum data-aided (DA) timing offset estimator that utilizes only symbol-rate samples to achieve the channel delay spread scale timing acquisition. For this purpose, we exploit the statistical properties of the power delay profile of the received signals to design a set of the templates to ensure the effective multipath energy capture at any time. Second, we propose a novel optimum data-aided channel estimation scheme that only relies on frame-level sampling rate data to derive channel parameter estimates from the received waveform. + +## 6. References + +* Karaoguz, J. (2001). High-rate wireless personal area networks, *IEEE Commun. Mag.*, vol. 39, pp. 96-102. + +* Lovelace, W. M. & Townsend, J. K. (2002). The effect of timing jitter and tracking on the performance of impulse radio, *IEEE J. Sel. Areas Commun.*, vol. 20, no. 9, pp. 1646-1651. + +* Tian, Z. & Giannakis, G. B. (2005). BER sensitivity to mistiming in ultrawideband impulse radios-part I: modeling, *IEEE Trans. Signal Processing*, vol. 53, no. 4, pp. 1550-1560. + +* Tian, Z. & Giannakis, G. B. (2005). A GLRT approach to data-aided timing acquisition in UWB radios-Part I: Algorithms, *IEEE Trans. Wireless Commun.*, vol. 53, no. 11, pp. IV.2956-2967. + +* Yang, L. & Giannakis, G. B. (2005). Timing Ultra-wideband Signals with Dirty Templates, *IEEE Trans. on Commun.*, vol. 53, pp. 1952-1963. +---PAGE_BREAK--- + +Carbonelli, C. & Mengali, U. (2006). Synchronization algorithms for UWB signals, *IEEE Trans. on Commun.*, vol. 54, no. 2, pp. 329-338. + +He, N. & Tepedelenlioglui, C. (2008). Joint Pulse and Symbol Level Acquisition of UWB Receivers, *IEEE Trans. on Wireless Commun.*, vol. 7, no. 1, pp. 6-14. + +Carbonelli, C. & Mengali, U. (2005). Low complexity synchronization for UWB noncoherent receivers, in *Proc. 2005 Vehicular Technology Conf.*, vol. 2, pp. 1350-1354. + +Furusawa, K.; Sasaki, M.; Hioki, J.; Itami, M.; (2008). Schemes of optimization of energy detection receivers for UWB-IR communication systems under different channel model, *IEEE International Conference on Ultra-Wideband*, pp.157 - 160, Leibniz Universitat Hannover, Germany. + +Cheng, X. & Guan, Y. (2008). Effects of synchronization errors on energy detection of UWB signals, *IEEE International Conference on Ultra-Wideband*, pp.161 - 164, Leibniz Universitat Hannover, Germany. + +Sasaki, M.; Ohno, J.; Ohno, H.; Ohno, K.; Itami, M. (2010). A study on multi-user access in energy detection UWB-IR receiver, *2010 IEEE 11th International Symposium on Spread Spectrum Techniques and Applications (ISITA)* pp.141 - 146, Taichung, Taiwan. + +Xu, W.; Zhao,J.; Wang, D. (2009). A Frame-Level Timing Acquisition Scheme of Ultra-wideband Signals Using Multi-templates, *The 6th International Symposium on Wireless Communication Systems*, pp.61 - 65, Tuscany, Italy. + +J. Foerster, Channel modeling sub-committee report final, *IEEE P802.15-02/490*. + +Stoica, L.; Rabbachin, A.; Repo, H.; Tiuraniemi,T.; Oppermann, I. (2005). An ultra-wideband system architecture for tag based wireless sensor networks, *IEEE Trans. on Veh. Technol.*, vol. 54, no. 5, pp. 1632-1645. + +Turin, G. L. (1980). Introduction to spread-spectrum antimultipath techniques and their application to urban digital radio, *Proc. IEEE*, vol. 68, pp. 328-353. + +Lottici, V; D'Andrea, A. N.; Mengali, U. (2002). Channel estimation for ultra-wideband communications, *IEEE J. Select. Areas Commun.*, vol. 20, no. 9, pp. 1638-1645. + +Yang, L. & Giannakis, G. B. (2004). Optimal pilot waveform assisted modulation for ultra-wideband communications, *IEEE Trans. Wireless Commun.*, vol. 3, no. 4, pp. 1236-1249. + +Cramer, R. J. M.; Scholtz, R. A.; Win, M. Z. (2002). Evaluation of an ultra wideband propagation channel, *IEEE Trans. Antennas Propagat.*, vol. 50, No. 5. + +Carbonelli, C. & Mitra, U. (2007). Clustered ML Channel Estimation for Ultra-Wideband Signals, *IEEE Trans. Wireless Commun.*, vol. 6, No. 7,pp.2412 - 2416. + +Paredes, J.L.; Arce, G.R.; Wang, Z. (2007). Ultra-Wideband Compressed Sensing: Channel Estimation, *IEEE Journal of Selected Topics in Signal Processing*, vol. 1, No. 3,pp.383 - 395. + +Shi, L.; Zhou, Z.; Tang, L.; Yao, H.; Zhang, J. (2010). Ultra-wideband channel estimation based on Bayesian compressive sensing, *2010 International Symposium on Communications and Information Technologies (ISCIT)*, pp.779 - 782, Tokyo, Japan. + +Wang, X. & Ge, H. (2007). On the CRLB and Low-Complexity Channel Estimation for UWB Communications. *IEEE 41st Annual Conference on Information Sciences and Systems*, Baltimore, pp. 151-153. +---PAGE_BREAK--- + +Xu, Z. & Liu, P. (2003). A subspace approach to blind estimation of ultrawideband channels, in *Proc. IEEE Thirty-Seventh Asilomar Conference on Signals, Systems & Computers*. vol. 2, pp. 1249-1253. + +Wang, Z. & Yang, X. (2004). Ultra wide-band communications with blind channel estimation based on first-order statistics, in *Proc. IEEE (ICASSP-04)*. vol. 4, pp. iv-529 - iv-532, Montreal, Canada. +---PAGE_BREAK--- + +ULTRA WIDEBAND +COMMUNICATIONS + +NOVEL TRENDS - SYSTEM, ARCHITECTURE +AND IMPLEMENTATION + +Edited by Mohammad A. Matin + +Ultra Wideband Communications: Novel Trends - System, +Architecture and Implementation + +Edited by Dr. Mohammad Matin + +ISBN 978-953-307-461-0 + +Hard cover, 348 pages + +Publisher InTech + +Published online 27, July, 2011 + +Published in print edition July, 2011 + +This book has addressed few challenges to ensure the success of UWB technologies and covers several research areas including UWB low cost transceiver, low noise amplifier (LNA), ADC architectures, UWB filter, and high power UWB amplifiers. It is believed that this book serves as a comprehensive reference for graduate students in UWB technologies. + +## How to reference + +In order to correctly reference this scholarly work, feel free to copy and paste the following: + +Wei Xu and Jiaxiang Zhao (2011). Low Sampling Rate Time Acquisition Schemes and Channel Estimation Algorithms of Ultra-Wideband Signals, Ultra Wideband Communications: Novel Trends - System, Architecture and Implementation, Dr. Mohammad Matin (Ed.), ISBN: 978-953-307-461-0, InTech, Available from: http://www.intechopen.com/books/ultra-wideband-communications-novel-trends-system-architecture-and-implementation/low-sampling-rate-time-acquisition-schemes-and-channel-estimation-algorithms-of-ultra-wideband-signa + +## INTECH + +open science | open minds + +### InTech Europe + +University Campus STeP Ri +Slavka Krautzeka 83/A +51000 Rijeka, Croatia +Phone: +385 (51) 770 447 +Fax: +385 (51) 686 166 +www.intechopen.com + +### InTech China + +Unit 405, Office Block, Hotel Equatorial Shanghai +No.65, Yan An Road (West), Shanghai, 200040, China +中国上海市延安西路65号上海国际贵都大饭店办公楼405单元 +Phone: +86-21-62489820 +Fax: +86-21-62489821 +---PAGE_BREAK--- + +© 2011 The Author(s). Licensee IntechOpen. This chapter is distributed under the terms of the [Creative Commons Attribution-NonCommercial-ShareAlike-3.0 License](http://creativecommons.org/licenses/by-nc-nd/3.0/), which permits use, distribution and reproduction for non-commercial purposes, provided the original is properly cited and derivative works building on this content are distributed under the same license. \ No newline at end of file diff --git a/samples_new/texts_merged/1973835.md b/samples_new/texts_merged/1973835.md new file mode 100644 index 0000000000000000000000000000000000000000..b668e27e8280cf1a2ac3da799a5dd3904ce9dca3 --- /dev/null +++ b/samples_new/texts_merged/1973835.md @@ -0,0 +1,940 @@ + +---PAGE_BREAK--- + +# Representation Dependence in Probabilistic Inference + +**Joseph Y. Halpern** +*Cornell University, Computer Science Department* +*Ithaca, NY 14853* + +http://www.cs.cornell.edu/home/halpern + +HALPERN@CS.CORNELL.EDU + +**Daphne Koller** +*Stanford University, Computer Science Department* +*Stanford, CA 94035* + +http://www.cs.stanford.edu/koller + +KOLLER@CS.STANFORD.EDU + +## Abstract + +Non-deductive reasoning systems are often *representation dependent*: representing the same situation in two different ways may cause such a system to return two different answers. Some have viewed this as a significant problem. For example, the *principle of maximum entropy* has been subjected to much criticism due to its representation dependence. There has, however, been almost no work investigating representation dependence. In this paper, we formalize this notion and show that it is not a problem specific to maximum entropy. In fact, we show that any representation-independent probabilistic inference procedure that ignores irrelevant information is essentially entailment, in a precise sense. Moreover, we show that representation independence is incompatible with even a weak default assumption of independence. We then show that invariance under a restricted class of representation changes can form a reasonable compromise between representation independence and other desiderata, and provide a construction of a family of inference procedures that provides such restricted representation independence, using relative entropy. + +## 1. Introduction + +It is well known that the way in which a problem is represented can have a significant impact on the ease with which people solve it, and on the complexity of an algorithm for solving it. We are interested in what is arguably an even more fundamental issue: the extent to which the *answers* that we get depend on how our input is represented. Here too, there is well known work, particularly by Tversky and Kahneman (see, for example, (Kahneman, Slovic, & Tversky, 1982)), showing that the answers given by people can vary significantly (and in systematic ways) depending on how a question is framed. This phenomenon is often viewed as indicating a problem with human information processing; the implicit assumption is that although people do make mistakes of this sort, they shouldn't. On the other hand, there is a competing intuition that suggests that representation does (*should*) matter; representation dependence is just a natural consequence of this fact. + +Here we consider one type of reasoning, probabilistic inference, and examine the extent to which answers depend on the representation. The issue of representation dependence is of particular interest in this context because of the interest in using probability for knowledge representation (e.g., (Pearl, 1988)) and because probabilistic inference has been the source +---PAGE_BREAK--- + +of many of the concerns expressed regarding representation. However, our approach should +be applicable far more generally. + +We begin by noting that the notion of “probabilistic inference” has two quite different interpretations. In one interpretation, which forms the basis for the Bayesian paradigm, probabilistic inference consists basically of conditioning: We start out with a prior distribution over some event space, and then condition on whatever observations are obtained. In the other interpretation, we are given only a set of probabilistic assertions, and our goal is to reach conclusions about the probabilities of various events. For most of this paper, we focus on the latter interpretation, although we discuss the relationship to the Bayesian approach in Section 7.2. + +Suppose that we have a procedure for making inferences from a probabilistic knowledge base. How sensitive is it to the way knowledge is represented? Consider the following examples, which use perhaps the best-known non-deductive notion of probabilistic inference, *maximum entropy* (Jaynes, 1978).¹ + +**Example 1.1:** Suppose that we have no information whatsoever regarding whether an object is colorful. What probability should we assign to the proposition *colorful*? Symmetry arguments might suggest 1/2. Since we have no information, it seems that an object should be just as likely to be colorful as non-colorful. This is also the conclusion reached by maximum entropy provided that the language has only the proposition *colorful*. But now suppose we know about the colors red, blue, and green, and have propositions corresponding to each of these colors. Moreover, by *colorful* we actually mean *red ∨ blue ∨ green*. In this case, maximum entropy dictates that the probability of *red ∨ blue ∨ green* is 7/8. Note that, in both cases, the only conclusion that follows from our constraints is the trivial one: that the probability of the query is somewhere between 0 and 1. ■ + +**Example 1.2:** Suppose that we are told that half of the birds fly. There are two reasonable ways to represent this information. One is to have propositions *bird* and *fly*, and use a knowledge base $KB_1^{fly} =\text{def}$ [Pr(*fly* | *bird*) = 1/2]. A second might be to have as basic predicates *bird* and *flying-bird*, and use a knowledge base $KB_2^{fly} =\text{def}$ [(flying-bird ⇒ bird) ∧ Pr(flying-bird | bird) = 1/2]. Although the first representation may appear more natural, it seems that both representations are intuitively adequate insofar as representing the information that we have been given. But if we use an inference method such as maximum entropy, the first representation leads us to infer Pr(bird) = 1/2, while the second leads us to infer Pr(bird) = 2/3. ■ + +Examples such as these are the basis for the frequent criticisms of maximum entropy on the grounds of representation dependence. But other than pointing out these examples, there has been little work on this problem. In fact, other than the work of Salmon (1961, 1963) and Paris (1994), there seems to have been no work on formalizing the notion of representation dependence. One might say that the consensus was: “whatever representation independence is, it is not a property enjoyed by maximum entropy.” But are there any + +1. Although much of our discussion is motivated by the representation-dependence problem encountered by maximum entropy, an understanding of maximum entropy and how it works is not essential for understanding our discussion. +---PAGE_BREAK--- + +other inference procedures that have it? In this paper we attempt to understand the notion of representation dependence, and to study the extent to which it is achievable. + +To study representation dependence, we must first understand what we mean by a "representation". The real world is complex. In any reasoning process, we must focus on certain details and ignore others. At a semantic level, the relevant distinctions are captured by using a space $X$ of possible alternatives or states (possible worlds). In Example 1.1, our first representation focused on the single attribute *colorful*. In this case, we have only two states in the state space, corresponding to *colorful* being true and false, respectively. The second representation, using *red*, *blue*, and *green*, has a richer state space. Clearly, there are other distinctions that we could make. + +We can also interpret a representation as a syntactic entity. In this case, we typically capture relevant distinctions using some formal language. For example, if we use propositional logic as our basic knowledge representation language, our choice of primitive propositions characterizes the distinctions that we have chosen to make. We can then take the states to be truth assignments to these propositions. Similarly, if we use a probabilistic representation language such as *belief networks* (Pearl, 1988) as our knowledge representation language, we must choose some set of relevant random variables. The states are then then possible assignments of values to these variables. + +What does it mean to shift from a representation (i.e., state space) $X$ to another representation $Y$? Roughly speaking, we want to capture at the level of the state space a shift from, say, feet to meters. Thus, in $X$ distances might be described in terms of feet where in $Y$ they might be described in terms of meters. We would expect there to be a constraint relating feet to meters. This constraint would not give any extra information about $X$; it would just relate worlds in $X$ to worlds in $Y$. Thus, we first attempt to capture representation independence somewhat indirectly, by requiring that adding constraints relating $X$ to $Y$ that place no constraints on $X$ itself should not result in different conclusions about $X$. The resulting notion, called *robustness*, turns out to be surprisingly strong. We can show that every robust inference procedure must behave essentially like logical entailment. + +We then try to define representation independence more directly, by using a mapping $f$ from one representation to another. For example, $f$ could map a world where an individual is 6 feet tall to the corresponding world where the individual is 1.83 meters tall. Some obvious constraints on $f$ are necessary to ensure that it corresponds to our intuition of a representation shift. We can then define a *representation-independent* inference procedure as one that preserves inferences under every legitimate mapping $f$; i.e., for any KB and $\theta$, $KB \vdash \theta$ iff $f(KB) \vdash f(\theta)$. + +This definition turns out to be somewhat more reasonable than our first attempt, in that there exist nontrivial representation-independent inference procedures. However, it is still a strong notion. In particular, any representation-independent inference procedure must act essentially like logical entailment for a knowledge base with only *objective* information (i.e., essentially non-probabilistic information). Moreover, we show that representation independence is incompatible with even the simplest default assumption of independence. Even if we are told nothing about the propositions $p$ and $q$, representation independence does not allow us to jump to the conclusion that $p$ and $q$ are independent. + +These results suggest that if we want inference procedures that are capable of jumping to nontrivial conclusions, then we must accept at least some degree of representation de- +---PAGE_BREAK--- + +pendence. They add support to the claim that the choice of language does carry a great +deal of information, and that complete representation independence is too much to expect. +On a more positive note, we show that we can use the intuition that the choice of language +carries information to get limited forms of representation independence. The idea is that the +language should put further constraints on what counts as an “appropriate” representation +shift. For example, suppose that certain propositions represent colors while others represent +birds. While we may be willing to transform *colorful* to red ∨ blue ∨ green, we may not be +willing to transform red to sparrow. There is no reason to demand that an inference pro- +cedure behave the same way if we suddenly shift to a wildly inappropriate representation, +where the symbols mean something completely different. We provide a general approach to +constructing inference procedures that are invariant under a specific class of representation +shifts. This construction allows us to combine some degree of representation independence +with certain non-deductive properties that we want of our inference procedure. In partic- +ular, we present an inference method that supports a default assumption of independence, +and yet is invariant under a natural class of representation shifts. + +The rest of this paper is organized as follows. In Section 2, we define probabilistic inferencese procedures and characterize them. In Section 3, we define robust inference procedures and show that every robust inference procedure is essentially entailment. In Section 4, we define representation independence, and show that representation independence is a very strong requirement. In particular, we show that a representation-independent inference procedure essentially acts like logical entailment on objective knowledge bases and that representation independence is incompatible with a default assumption of independence. Section 5 contains some general discussion of the notion of representation independence and how reasonable it is to assume that the choice of language should affect inference. While it may indeed seem reasonable to assume that the choice of language should affect inference, we point out that this assumption has some consequences that some might view as unfortunate. In Section 6, we discuss how limited forms of representation independence can be achieved. We discuss related work in Section 7, and conclude in Section 8. + +**2. Probabilistic Inference** + +We begin by defining probabilistic inference procedures. As we discussed in the introduction, there are two quite different ways in which this term is used. In one, we are given a prior distribution over some probability space; our “knowledge” then typically consists of events in that space, which can be used to condition that distribution and obtain a posterior. In the other, which is the focus of our work, a probabilistic inference procedure takes as input a probabilistic knowledge base and returns a probabilistic conclusion. + +We take both the knowledge base and the conclusion to be assertions about the proba- +bilities of events in some measurable space (X, $\mathcal{F}_X$), where a measurable space consists of a +set X and an algebra $\mathcal{F}_X$ of subsets of X (that is, $\mathcal{F}_X$ is a set of subsets of X closed under +union and complementation, containing X itself).² Formally, these assertions can be viewed +as statements about (or constraints on) probability measures on (X, $\mathcal{F}_X$). For example, if + +2. If $X$ is infinite, we may want to consider countably-additive probability measures and take $\mathcal{F}_X$ to be closed under countable unions. This issue does not play significant role in this paper. For simplicity, we restrict to finite additivity and require only that $\mathcal{F}_X$ be closed under finite unions. +---PAGE_BREAK--- + +$S \in \mathcal{F}_X$, a statement $\Pr(S) \ge 2/3$ holds only for distributions where $S$ has probability at least 2/3. Therefore, if $\Delta_{(X,\mathcal{F}_X)}$ is the set of all probability measures on $(X, \mathcal{F}_X)$ (that is, all probability measures with domain $\mathcal{F}_X$), we can view a knowledge base as a set of constraints on $\Delta_{(X,\mathcal{F}_X)}$. When $\mathcal{F}_X$ is clear from context, we often omit it from the notation, writing $\Delta_X$ rather than $\Delta_{(X,\mathcal{F}_X)}$. + +We place very few restrictions on the language used to express the constraints. We assume that it includes assertions of the form $\Pr(S) \ge \alpha$ for all subsets $S \in \mathcal{F}_X$ and rational $\alpha \in [0,1]$, and that it is closed under conjunction and negation, so that if *KB* and *KB'* are knowledge bases expressing constraints, then so are *KB* ∧ *KB'* and ¬*KB*. (However, the langauge could include many assertions besides those obtained by starting with assertions of the form $\Pr(S) \ge \alpha$ and closing off under conjunction and negation.) Since the language puts constraints on probability measures, we cannot directly say that $S \in \mathcal{F}_X$ must hold. The closest approximation in the language is the assertion $\Pr(S) = 1$. Thus, we call such constraints *objective*. A knowledge base consisting of only objective constraints is called an *objective knowledge base*. Since $\Pr(T_1) = 1 \land \Pr(T_2) = 1$ is equivalent to $\Pr(T_1 \cap T_2) = 1$, without loss of generality, an objective knowledge base consists of a single constraint of the form $\Pr(T) = 1$. Given a knowledge base *KB* placing constraints on $\Delta_X$, we write $\mu \models KB$ if $\mu$ is a measure in $\Delta_X$ that satisfies the constraints in *KB*. We use $[\![KB]\!]_X$ to denote all the measures satisfying these constraints. + +In practice, our knowledge is typically represented syntactically, using some logical language to describe the possible states. Typical languages include propositional logic, first-order logic, or a language describing the values for some set of random variables. In general, a base logic $\mathcal{L}$ defines a set of formulas $\mathcal{L}(\Phi)$ for a given vocabulary $\Phi$. In propositional logic, the vocabulary $\Phi$ is simply a set of propositional symbols. In probability theory, the vocabulary can consist of a set of random variables. In first-order logic, the vocabulary is a set of constant symbols, function symbols, and predicate symbols. To facilitate comparison between vocabularies, we assume that for each base logic all the vocabularies are finite subsets of one fixed infinite vocabulary $\Phi^*$. + +When working with a language, we assume that each state in the state space defines an interpretation for the symbols in $\Phi$ and hence for the formulas in $\mathcal{L}(\Phi)$. In the case of propositional logic, we thus assume that we can associate with each state a truth assignment to the primitive propositions in $\Phi$. For first-order logic, we assume that we can associate with each state a domain and an interpretation of the symbols in $\Phi$. In the probabilistic setting, we assume that we can associate with each state an assignment of values to the random variables. It is often convenient to assume that the state space is in fact some subset $W$ of $W(\Phi)$, the set of all interpretations for (or assignments to) the vocabulary $\Phi$. Note that the truth of any formula $\varphi$ in $\mathcal{L}(\Phi)$ is determined by a state. If $\varphi$ is true in some state $w$, we write $w \models \varphi$. + +The probabilistic extension $\mathcal{L}^{pr}(\Phi)$ of a base logic $\mathcal{L}(\Phi)$ is simply the set of probability formulas over $\mathcal{L}(\Phi)$. Formally, for each $\varphi \in \mathcal{L}(\Phi)$, $\Pr(\varphi)$ is a numeric term. The formulas in $\mathcal{L}^{pr}(\Phi)$ are defined to be all the Boolean combinations of arithmetic expressions involving numeric terms. For example, $\Pr(fly | bird) \ge 1/2$ is a formula in $\mathcal{L}^{pr}(\{\text{fly}, \text{bird}\})$ (where we interpret a conditional probability expression $\Pr(\varphi | \psi)$ as $\Pr(\varphi \wedge \psi)/\Pr(\psi)$ and then multiply to clear the denominator). By analogy with constraints, a formula of the form $\Pr(\varphi) = 1$ is called an *objective formula*. +---PAGE_BREAK--- + +Given a set $W \subseteq W(\Phi)$, assume that $\mathcal{F}_W$ is the algebra consisting of all sets of the form $[[\varphi]]_W = \{w : w \models \varphi\}$, for $\varphi \in \mathcal{L}(\Phi)$. (In the case of propositional logic, where $\Phi$ consists of a finite set of primitive propositions, $\mathcal{F}_W = 2^W$. In the case of first-order logic, not all sets are necessarily definable by formulas, so $\mathcal{F}_W$ may be a strict subset of $2^W$.) Let $\mu$ be a probability measure on $(W, \mathcal{F}_W)$. We can then ascribe semantics to $\mathcal{L}^{pr}(\Phi)$ in the probability space $(W, \mathcal{F}_W, \mu)$ in a straightforward way. In particular, we interpret the numeric term $\text{Pr}(\varphi)$ as $\mu(\{w \in W : w \models \varphi\})$. Since a formula $\varphi \in \mathcal{L}(\Phi)$ describes an event in the space $W$, a formula $\theta$ in $\mathcal{L}^{pr}(\Phi)$ is clearly a constraint on measures on $W$. We write $\mu \models \theta$ if the measure $\mu \in \Delta_W$ satisfies the formula $\theta$. + +A syntactic knowledge base $KB \in \mathcal{L}^{pr}(\Phi)$ can be viewed as a constraint on $\Delta_W$ in an obvious way. Formally, $KB$ represents the set of probability measures $[[KB]]_{\Phi} \subseteq \Delta_W$, which consists of all measures $\mu$ on $W$ such that $\mu \models KB$. + +We say that $KB$ (whether syntactic or semantic) is *consistent* if $[[KB]]_X \neq \emptyset$, i.e., if the constraints are satisfiable. Finally, we say that $KB$ *entails* $\theta$ (where $\theta$ is another set of constraints on $\Delta_X$), written $KB \models_X \theta$, if $[[KB]]_X \subseteq [[\theta]]_X$, i.e., if every measure that satisfies $KB$ also satisfies $\theta$. We write $|\models_X \theta$ if $\theta$ is satisfied by every measure in $\Delta_X$. We omit the subscript $X$ from $|=$ if it is clear from context. + +Entailment is well-known to be a very weak method of drawing conclusions from a knowledge base, in particular with respect to its treatment of irrelevant information. Consider the knowledge base consisting only of the constraint $\text{Pr(fly} | bird) \ge 0.9$. Even though we know nothing to suggest that red is at all relevant, entailment will not allow us to reach any nontrivial conclusion about $\text{Pr(fly} | bird \land red)$. + +One way to get more powerful conclusions is to consider, not all the measures that satisfy $KB$, but a subset of them. Intuitively, given a knowledge base $KB$, an inference procedure picks a subset of the measures satisfying $KB$, and infers $\theta$ if $\theta$ holds in this subset. Clearly, more conclusions hold for every measure in the subset than hold for every measure in the entire set. + +**Definition 2.1:** An $(X, \mathcal{F}_X)$-inference procedure is a partial function $I : 2^{\Delta(X, \mathcal{F}_X)} \to 2^{\Delta(X, \mathcal{F}_X)}$ such that $I(A) \subseteq A$ for $A \subseteq \Delta(X, \mathcal{F}_X)$ and $I(A) = \emptyset$ iff $A = \emptyset$ for all $A \in 2^{\Delta(X, \mathcal{F}_X)}$ in the domain of $I$ (i.e., for all $A$ for which $I$ is defined). We write $KB \Vdash_I \theta$ if $I([KB]_X) \subseteq [[\theta]]_X$. + +When $\mathcal{F}_X$ is clear from context or irrelevant, we often speak of X-inference procedures. We remark that Paris (1994) considers what he calls *inference processes*. These are just inference procedures as we have defined them that, given a set $A$ of probability measures, return a unique probability measure in $A$ (rather than an arbitrary subset of $A$). Paris gives a number of examples of inference processes. He also considers various properties that an inference process might have. Some of these are closely related to various properties of representation independence that we consider. We discuss Paris's work in Section 7. + +Entailment is the X-inference procedure defined on all sets determined by taking I to be the identity. Maximum entropy is also an inference procedure in this sense. + +**Definition 2.2:** Given a probability measure $\mu$ on a finite space $X$ (where all sets are measurable), its entropy $H(\mu)$ is defined as $-\sum_{x \in X} \mu(x) \log \mu(x)$. (The log is taken to the base 2 here.) Given a set $A$ of measures in $\Delta_X$, let $I_X^{me}(A)$ consist of the measures in $A$ +---PAGE_BREAK--- + +that have the highest entropy if there are measures in A whose entropy is at least as high +as that of any measure in A; if there are no such measures, $\inf^{me}(A)$ is undefined. ■ + +It is easy to see that $\inf^{me}(A)$ is defined if $A$ is closed (in the topological sense; i.e., if $\mu_n$ is a sequence of probability measures in $A$ and $\mu_n$ converges to $\mu$, then $\mu \in A$). Thus, we could take the domain of $\inf_X^{me}$ to consist only of the closed sets of measures in $\Delta_X$. There are also open sets $A$ for which $\inf^{me}(A)$ is defined, although it is not defined for all open sets $A$. For example, suppose $X = \{x_1, x_2\}$ and let $A = \{\mu : \mu(x_1) < 1/2\}$. Let $\mu_0$ be such that $\mu_0(x_1) = 1/2$. It is easy to check that $H(\mu_0) = 1$, and $H(\mu) < 1$ for $\mu \in A$. However, for all $\epsilon$, there is some $\mu \in A$ such that $H(\mu) > 1 - \epsilon$. It follows that there is no measure in $A$ whose entropy is higher than that of any other measure in $A$, so $\inf^{me}(A)$ is undefined. On the other hand, if $A' = \{\mu : \mu(x_1) < 2/3\}$, then there is a measure whose entropy is maximum in the open set $A'$, namely the measure $\mu_0$. + +There are, of course, many inference procedures besides entailment and maximum en- +tropy that can be defined on a measurable space. In fact, as the following proposition shows, +any binary relation $\sim$ satisfying certain reasonable properties is an inference procedure of +this type. + +**Proposition 2.3:** If *I* is an *X*-inference procedure then the following properties hold for every *KB*, *KB'*, θ, ψ over *X* such that *KB* is in the domain of *I*. + +• Reflexivity: $KB \preceq_I KB$. + +* Left Logical Equivalence: if KB is logically equivalent to KB', i.e., if $|KB| \Leftrightarrow KB'$, then for every $\theta$ $KB \preceq_I \theta$ iff $KB' \preceq_I \theta$. + +• Right Weakening: if $KB \vDash_I \theta$ and $| \theta \Rightarrow \psi $ then $KB \vDash_I \psi$. + +* And: if $KB \vDash_I \theta$ and $KB \vDash_I \psi$, then $KB \vDash_I \theta \wedge \psi$. + +* Consistency: if KB is consistent then $KB \nvDash_I false.$ ■ + +**Proof:** Straightforward from the definitions. ■ + +Interestingly, these properties are commonly viewed as part of a core of reasonable +properties for a nonmonotonic inference relation (Kraus, Lehmann, & Magidor, 1990). + +We would like to also prove a converse, showing that any relation $\vdash$ over probabilistic constraints on some space X that satisfies the five properties above must have the form $\vdash_{I_X}$. This is not quite true, as the following example shows. + +**Example 2.4:** Fix a measurable space $(X, \mathcal{F}_X)$. Let the language consist of all (finite) Boolean combination of statements of the form $\text{Pr}(S) \ge \alpha$, where $S \in \mathcal{F}_X$. Now fix one nonempty strict subset $S_0$ of $X$, and let $\varphi_n$ be the statement $\text{Pr}(S_0) \le 1/n$. Define an inference procedure $\Vdash$ as follows. If KB is not equivalent to true (i.e, if $[KB]_X \neq \Delta_X$), then KB $\Vdash$ $\theta$ iff $KB \models \theta$. On the other hand, true $\Vdash$ $\theta$ iff $\varphi_n \models \theta$ for all sufficiently large $n$. That is, true $\Vdash$ $\theta$ if there exists an $N$ such that for all $n \ge N$, we have $\varphi_n \models \theta$. It is easy to check that all five properties in Proposition 2.3 hold for $\Vdash$. However, $\Vdash$ is not $\Vdash_I$ for an $X$-inference procedure $I$. For suppose it were. Note that $\varphi_n \models \varphi_m$ for all $n \ge m,$ +---PAGE_BREAK--- + +so true $\vdash \varphi_m$ for all $m$. Thus, we must have $I(\Delta_X) \subseteq [[\varphi_n]]_X$ for all $n$. It follows that $I_X(\Delta_X) \subseteq [[\Pr(S_0) = 0]]_X$, and so true $\vdash_I \Pr(S_0) = 0$. However, $\varphi_n \not= \Pr(S) = 0$ for any $n$, so we do not have true $\vdash \Pr(S) = 0$. This contradicts the assumption that $\vdash = \vdash_I$. $\blacksquare$ + +Essentially what we need to get the converse to Proposition 2.3 is an infinitary version of +the And Rule, which would say that if KB $\vdash_I \theta_i$ for all $i$, then KB $\vdash_I \bigwedge_i \theta_i$. If the language +were closed under infinite conjunctions, then this rule would in fact be just what we need. +Since we have not assumed that the language is closed under infinite conjunctions, we use +a variant of this rule. + +* *Infinitary And:* For any set $\Sigma$ of statements, if $KB \vdash_I \theta$ for all $\theta \in \Sigma$ and $\Sigma \models \psi$, then $KB \vdash_I \psi$. + +**Proposition 2.5:** Let $\vdash$ be a relation over probabilistic constraints on $X$ for which the properties Reflexivity, Left Logical Equivalence, Right Weakening, Infinitary And, and Consistency hold for all $KB$ in the domain of $\vdash$. (That is, if $KB$ is in the domain of $\vdash$ in that $KB \vdash \theta$ for some $\theta$, then $KB \vdash KB$, and so on.) Then $\vdash$ is $\vdash_I$ for some $X$-inference procedure $I$. + +**Proof:** See Appendix A.1. $\blacksquare$ + +We are typically interested not just in an inference procedure defined on one space X, +but in a family of related inference procedures, defined on a number of spaces. For example, +entailment is an inference procedure that is defined on all spaces X; maximum entropy is +defined on all finite measurable spaces (X, 2^X). + +**Definition 2.6:** If $\mathcal{X}$ is a set of measurable spaces, an $\mathcal{X}$-inference procedure is a set $\{I_{(X,\mathcal{F}_X)} : (X,\mathcal{F}_X) \in \mathcal{X}\}$, where $I_{(X,\mathcal{F}_X)}$ is an $(X,\mathcal{F}_X)$-inference procedure for $(X,\mathcal{F}_X) \in \mathcal{X}$. + +We sometimes talk about an $\mathcal{X}$-inference procedure $I$, and write $KB \vdash_I \theta$ when $(X, \mathcal{F}_X) \in \mathcal{X}$ is clear from context. However, it should be stressed that, formally, an $\mathcal{X}$-inference procedure is a really a set of inference procedures (typically related in some natural way). + +Clearly entailment is an $\mathcal{X}$-inference procedure for any $\mathcal{X}$, where $I_X$ is simply the identity function for $X \in \mathcal{X}$. If $\mathcal{X}$ consists of finite measurable spaces where all sets are measurable, then maximum entropy is an $\mathcal{X}$-inference procedure. We typically denote this inference procedure $\vdash_{me}$. Thus, $KB \vdash_{me} \theta$ if $\theta$ holds for all the probability measures of maximum entropy satisfying $KB$. + +**Important assumptions:** For the remainder of this paper, we deal only with $\mathcal{X}$-inference procedures $I$ for which $\mathcal{X}$ satisfies two richness assumptions. These assumptions hold for all the standard inference procedures that have been considered. + +* We assume that $\mathcal{X}$ is closed under crossproducts, so that if $(X, \mathcal{F}_X), (Y, \mathcal{F}_Y) \in \mathcal{X}$, +then $(X \times Y, \mathcal{F}_{X\times Y}) \in \mathcal{X}$, where $\mathcal{F}_{X\times Y}$ is the algebra formed by taking finite unions +of disjoint sets of the form $S \times T$, for $S \in \mathcal{F}_X$ and $T \in \mathcal{F}_Y$. It is easy to see that +this is an algebra, since $\overline{S} \times \overline{T} = \overline{S} \times T \cup S \times \overline{T} \cup \overline{S} \times T$ +and $(S \times T) \cap (S' \times T') = (S \cap S') \times (T \cap T')$ (from which it also follows that any union of such sets can be +---PAGE_BREAK--- + +written as a disjoint union). Note that if $X$ and $Y$ are finite sets, $\mathcal{F}_X = 2^X$, and $\mathcal{F}_Y = 2^Y$, then $\mathcal{F}_X \times \mathcal{F}_Y = 2^{X\times Y}$. As we shall see, having $(X \times Y, \mathcal{F}_{X\times Y}) \in \mathcal{X}$ if each of $(X, \mathcal{F}_X)$ and $(Y, \mathcal{F}_Y)$ is in $\mathcal{X}$ allows us to relate constraints on $X$ to constraints on $Y$ in a natural way. + +* We assume that $\mathcal{X}$ contain sets of all finite cardinalities; more precisely, for all $n \ge 2$, there exists a set $(X, \mathcal{F}_X) \in \mathcal{X}$ such that $|X| = n$ and $\mathcal{F}_X = 2^X$. This assumption is not actually needed for any of our results, since the assumption that $\mathcal{X}$ is closed under crossproducts already implies that, for any finite $n$, there exists a measurable space $(X, \mathcal{F}_X) \in \mathcal{X}$ such that $|X| \ge n$; this already suffices to prove all the results of the paper. However, assuming that $\mathcal{X}$ has sets of all cardinalities does make the proofs easier. + +We also want the domain of $I$ to satisfy certain assumptions, but we defer stating these assumptions until we have introduced some additional definitions and notation. + +## 3. Robustness + +In order to define robustness to representation shifts, we must first define the notion of a representation shift. Our first attempt at this definition is based on the idea of using constraints that specify the relationship between the two vocabularies. For example, in Example 1.1, we might have $X = \{\textcolorful, \textcolorless\}$ and $Y = \{\textred, \textblue, \textgreen, \textcolorless\}$. We can specify the relationship between $X$ and $Y$ via a constraint that asserts that $\textcolorful \Leftrightarrow (\textred \vee \textblue \vee \textgreen)$. + +Of course, not every constraint is a legitimate mapping between representations. For example, a formula that asserted $\neg$colorful is obviously not a legitimate representation shift. At a minimum, we must assume that the constraint does not give any additional information about $X$ as far as logical inference goes. At a syntactic level, we can use the following definition. Given a knowledge base $KB \in \mathcal{L}^{pr}(\Phi)$, we say that $\psi \in \mathcal{L}^{pr}(\Phi \cup \Phi')$ is $\Phi$-conservative over $KB$ if, for all formulas $\varphi \in \mathcal{L}^{pr}(\Phi)$, we have $KB \models \varphi$ iff $KB \wedge \psi \models \varphi$. Thus, adding $\psi$ to the knowledge base does not permit any additional logical inferences in the vocabulary $\Phi$. An inference procedure $I$ is robust if it is unaffected by conservative extensions; that is, if $KB, \varphi \in \mathcal{L}^{pr}(\Phi)$, then $KB \Vdash_I \varphi$ iff $KB \wedge \psi \Vdash_I \varphi$ for all $\psi$ that are $\Phi$-conservative over $KB$. Roughly speaking, this says that getting new information that is uninformative as far as logical inference goes does not affect default conclusions. + +The formal definition of robustness, which uses semantic rather than syntactic concepts, extends these intuitions to arbitrary constraints on measures (not just ones that can be expressed in the language $\mathcal{L}^{pr}$). + +**Definition 3.1:** For $\mu \in \Delta_{X_1 \times \dots \times X_n}$, define $\mu_{X_i} \in \Delta_{X_i}$ by taking $\mu_{X_i}(A) = \mu(X_1 \times \dots \times X_{i-1} \times A \times X_{i+1} \times \dots \times X_n)$. A constraint $\varphi$ on $\Delta_{X_i}$ can be viewed as a constraint on $\Delta_{X_1 \times \dots \times X_n}$ by taking $[\varphi]_{X_1 \times \dots \times X_n} = \{\mu \in \Delta_{X_1 \times \dots \times X_n} : \mu_{X_i} \models \varphi\}$. We frequently identify constraints on $X_i$ with constraints on $X_1 \times \dots \times X_n$ in this way. For $B \subseteq \Delta_{X_1 \times \dots \times X_n}$, define $\text{proj}_{X_i}(B) = \{\mu_{X_i} : \mu \in B\}$. A constraint $\psi$ on $\Delta_{X_1 \times \dots \times X_n}$ is said to be $X_i$-conservative over the constraint $KB$ on $\Delta_{X_i}$ if $\text{proj}_{X_i}([KB \wedge \psi]_{X_1 \times \dots \times X_n}) = [KB]_{X_i}$. ■ +---PAGE_BREAK--- + +To see that this definition generalizes the earlier language-oriented definition, note that if $\varphi$ and $KB$ are constraints on $\Delta_X$ and $\psi$ is a constraint on $\Delta_{X\times Y}$, then $KB \wedge \psi \models \varphi$ iff $\mathrm{proj}_1(\llbracket KB \wedge \psi \rrbracket_{X\times Y}) \subseteq \llbracket \varphi \rrbracket_X$, while $KB \models \varphi$ iff $\llbracket KB \rrbracket_X \subseteq \llbracket \varphi \rrbracket_X$. + +**Definition 3.2:** $\{I_X : X \in \mathcal{X}\}$ is a robust $\mathcal{X}$-inference procedure if for all spaces $X, Y \in \mathcal{X}$, constraints $KB$ and $\varphi$ on $\Delta_X$, and constraints $\psi$ on $\Delta_{X\times Y}$ that are $X$-conservative over $KB$, we have $KB \Vdash_{I_X} \varphi$ iff $KB \wedge \psi \Vdash_{I_{X\times Y}} \varphi$. (Note that this definition implicitly assumes that $X \times Y \in \mathcal{X}$ if $X, Y \in \mathcal{X}$, an assumption we made explicit earlier.) $\blacksquare$ + +At first glance, robustness might seem like a reasonable desideratum. After all, why should adding a constraint on $\Delta_{X\times Y}$ that places no restrictions on $\Delta_X$ change the conclusions that we might reach about $X$? Unfortunately, it turns out that this definition is deceptively strong, and disallows any “interesting” inference procedures. In particular, one property we may hope for in an inference procedure is to draw nontrivial conclusions about probabilities of events, that is, conclusions that do not follow from entailment. For example, maximum entropy (or any inference procedure based on symmetry) will conclude $\Pr(p) = 1/2$ from the empty knowledge base. We can show that inference procedures that are robust do not really allow much in the way of nontrivial conclusions about the probabilities of events. + +**Definition 3.3:** An $(X, \mathcal{F}_X)$-inference procedure $I$ is essentially entailment for the knowledge base $KB \subseteq \Delta_X$ if for all $S \in \mathcal{F}_X$, if $KB \Vdash_I \alpha < \Pr(S) < \beta$ then $KB \models \alpha \leq \Pr(S) \leq \beta$. $I$ is essentially entailment for $X$ if it is essentially entailment for all knowledge bases $KB$ in the domain of $I_X$. $\blacksquare$ + +Thus, when entailment lets us conclude $\Pr(S) \in [\alpha, \beta]$, an inference procedure that is essentially entailment lets us draw only the slightly stronger conclusion $\Pr(S) \in (\alpha, \beta)$. To prove this, we need to make three assumptions about the domain of $I$. (For other results, we need other assumptions about the domain of $I$.) + +DI1. $\alpha \leq \Pr(S) \leq \beta$ is in the domain of $I_{(X,\mathcal{F}_X)}$ for all $S \in \mathcal{F}_X$, $\alpha, \beta \in \mathbb{R}$. + +DI2. If $KB$ is in the domain of $I_X$, then it is also in the domain of $I_{X\times Y}$ (when $KB$ is viewed as a constraint on $\Delta_{X\times Y}$.) + +DI3. If $KB_1$ and $KB_2$ are in the domain of $I_X$, then so is $KB_1 \wedge KB_2$. + +Note that sets of the form $\alpha \leq \Pr(S) \leq \beta$ are closed sets. It certainly seems reasonable to require that such sets be in the domain of an inference procedure; they correspond to the most basic observations. DI2 seems quite innocuous; as observed earlier, we do want to be able to view constraints on $\Delta_X$ as constraints on $\Delta_{X\times Y}$, and doing so should not prevent them from being in the domain of $I$. DI3 also seems to be a reasonable assumption, since if $KB_1$ and $KB_2$ correspond to possible observations, we want to be able to draw conclusions from combining the observations. DI3 holds if the domain of $I$ consists of closed sets. But note that it does not hold for $I^{\mathrm{me}}$ if we take its domain to consist of all sets that have a measure whose entropy is maximum. For example, if $X = \{x_1, x_2\}$, $A = \{\mu_0\} \cup \{\mu : \mu(x_1) > 3/4\}$, and $B = \{\mu : \mu(x_1) \geq 2/3\}$, where $\mu_0(x_0) = 1/2$, then each of $A$ and $B$ have a measure whose entropy is maximum, but $A \cap B$ does not have a measure whose entropy is maximum. +---PAGE_BREAK--- + +**Theorem 3.4:** If {$I_X : X \in \mathcal{X}$} is a robust $\mathcal{X}$-inference procedure that satisfies DI1, DI2, and DI3, then $I_X$ is essentially entailment for all $X \in \mathcal{X}$. + +**Proof:** See Appendix A.2. ■ + +It is possible to construct robust inference procedures that are almost but not quite entailment, simply by “strengthening” some conclusions from $\Pr(S) \in [\alpha, \beta]$ to $\Pr(S) \in (\alpha, \beta)$. Clearly, however, any robust inference procedure is extremely limited in its ability to jump to conclusions. In the next section, we look at a definition that seems closer to the intuitive notion of representation independence, and has somewhat more reasonable consequences. + +# 4. Representation Independence + +## 4.1 Representation shifts + +If $X$ and $Y$ are two different representations of the same phenomena then, intuitively, there should be a way of relating states in $X$ to corresponding states in $Y$. We want this correspondence to respect the logical structure of events. Formally, we require that it be a homomorphism with respect to complementation and intersection. + +**Definition 4.1:** An $(X, \mathcal{F}_X)-(Y, \mathcal{F}_Y)$ embedding $f$ is a function $f : \mathcal{F}_X \mapsto \mathcal{F}_Y$ such that $f(S \cup T) = f(S) \cup f(T)$ and $f(\bar{S}) = \bar{f(S)}$ for all $S, T \in \mathcal{F}_X$. ■ + +As elsewhere, we talk about $X-Y$ embeddings rather than $(X, \mathcal{F}_X)-(Y, \mathcal{F}_Y)$ embeddings if $\mathcal{F}_X$ and $\mathcal{F}_Y$ do not play a significant role. + +Our goal is to consider the effect of a transformation on probabilistic formulas. Hence, we are interested in sets of states and their probabilities. + +**Definition 4.2:** If $f$ is an $X-Y$ embedding, $\mu \in \Delta_X$, and $\nu \in \Delta_Y$, then $\mu$ and $\nu$ correspond under $f$ if $\mu(S) = \nu(f(S))$ for all events $S \in \mathcal{F}_X$. We define a mapping $f^* : 2^{\Delta_X} \to 2^{\Delta_Y}$ as follows. We first define $f^*$ on singleton sets (except that, for convenience, we write $f^*(\mu)$ rather than $f^*(\{\mu\})$ by taking $f^*(\mu) = \{\nu \in \Delta_Y : \nu(f(S)) = \mu(S) \text{ for all } S \in \mathcal{F}_X\}$. Thus, $f^*(\mu)$ consists of all measures in $\Delta_Y$ that correspond to $\mu$ under $f$. If $\mathcal{D}$ is an arbitrary subset of $2^{\Delta_X}$, define $f^*(\mathcal{D}) = \bigcup_{\mu \in \mathcal{D}} f^*(\mu)$ for $\mathcal{D} \subseteq \Delta_X$. ■ + +If $\theta$ is a constraint on $\Delta_X$ expressed in some language, we typically write $f^*(\theta)$ rather than $f^*([\theta]_X)$. We implicitly assume that the language is such that the constraint $f^*(\theta)$ is also expressible. It is not hard to see that $f^*(\theta)$ is the constraint that results by replacing every set $S \in \mathcal{F}_X$ that appears in $\theta$ by $f(S)$. + +**Example 4.3:** In Example 1.1, we might have $X = \{\textcolor{red}{\text{colorful}}, \textcolor{blue}{\text{colorless}}\}$ and $Y = \{\textcolor{red}{\text{red}}, \textcolor{blue}{\text{blue}}, \textcolor{green}{\text{green}}, \textcolor{colorless}{\text{colorless}}\}$. In this case, we might have $f(\textcolor{colorful}{\text{colorful}}) = \{\textcolor{red}{\text{red}}, \textcolor{blue}{\text{blue}}, \textcolor{green}{\text{green}}\}$ and $f(\textcolor{colorless}{\text{colorless}}) = \{\textcolor{colorless}{\text{colorless}}\}$. Consider the measure $\mu \in \Delta_X$ such that $\mu(\textcolor{colorful}{\text{colorful}}) = 0.7$ and $\mu(\textcolor{colorless}{\text{colorless}}) = 0.3$. Then $f^*(\mu)$ is the set of measures $\nu$ such that the total probability assigned to the set of states $\{\textcolor{red}{\text{red}}, \textcolor{blue}{\text{blue}}, \textcolor{green}{\text{green}}\}$ by $\nu$ is 0.7. Note that there are uncountably many such measures. It is easy to check that if $\theta$ is a constraint on $\Delta_X$ such as $\Pr(\textcolor{colorful}{\text{colorful}}) > 3/4$, then $f^*(\theta)$ is $\Pr(\{\textcolor{red}{\text{red}}, \textcolor{blue}{\text{blue}}, \textcolor{green}{\text{green}}\}) > 3/4$. ■ +---PAGE_BREAK--- + +Embeddings can be viewed as the semantic analogue to the syntactic notion of interpretation defined in (Enderton, 1972, pp. 157–162), which has also been used in the recent literature on abstraction (Giunchiglia & Walsh, 1992; Nayak & Levy, 1995). Essentially, an interpretation maps formulas in a vocabulary Φ to formulas in a different vocabulary Ψ by mapping the primitive propositions in Φ (e.g., *colorful*) to formulas over Ψ (e.g., *red* ∨ *blue* ∨ *green*) and then extending to complex formulas in the obvious way. The representation shift in Example 1.2 can also be captured in terms of an interpretation, this one taking *flying-bird* to fly ∧ bird. + +**Definition 4.4:** Let $\Phi$ and $\Psi$ be two vocabularies. In the propositional case, a *interpretation of $\Phi$ into $\Psi$* is a function $i$ that associates with every primitive proposition $p \in \Phi$ a formula $i(p) \in \mathcal{L}(\Psi)$. A more complex definition in the same spirit applies to first-order vocabularies. For example, if $R$ is a $k$-ary predicate, then $i(R)$ is a formula with $k$ free variables. ■ + +Given an interpretation $i$, we get a syntactic translation from formulas in $\mathcal{L}(\Phi)$ to formulas in $\mathcal{L}(\Psi)$ using $i$ in the obvious way; for example, $i((p \wedge \neg q) \vee r) = (i(p) \wedge \neg i(q)) \vee i(r)$ (see (Enderton, 1972) for the details). Clearly an interpretation $i$ from $\Phi$ to $\Psi$ induces an embedding $f$ from $W_1 \subseteq W(\Phi)$ to $W_2 \subseteq W(\Psi)$: we map $[\varphi]_{W_1}$ to $[\dot{i}(\varphi)]_{W_2}$. + +Of course, not all embeddings count as legitimate representation shifts. For example, consider an embedding $f$ defined in terms of an interpretation that maps both the propositions $p$ and $q$ to the proposition $r$. Then the process of changing representations using $f$ gives us the information that $p$ and $q$ are equivalent, information that we might not have had originally. Intuitively, $f$ gives us new information by telling us that a certain situation—that where $p \wedge \neg q$ holds—is not possible. More formally, the embedding $f$ has the following undesirable property: it maps the set of states satisfying $p \wedge \neg q$ to the empty set. This means a state where $p \wedge \neg q$ holds does not have an analogue in the new representation. We want to disallow such embeddings. + +**Definition 4.5:** An X-Y embedding $f$ is *faithful* if, for all $S,T \in \mathcal{F}_X$, we have $S \subseteq T$ iff $f(S) \subseteq f(T)$. ■ + +This definition has the desired consequence of disallowing embeddings that give new +information as far as logical consequence goes. + +**Lemma 4.6:** An X-Y embedding *f* is faithful if and only if for all constraints KB and θ, +we have KB |= θ iff *f*^(*KB*) |= *f*^(*θ*). + +**Proof:** See Appendix A.3. ■ + +It is clear that our embedding from Example 4.3 is faithful: $f(\textcolor{red}{\mathbf{c}}) = \{\textcolor{red}{\mathbf{r}}, \textcolor{blue}{\mathbf{b}}, \textcolor{green}{\mathbf{g}}\}$ and $f(\textcolor{orange}{\mathbf{c}}) = \textcolor{orange}{\mathbf{c}}$. The following proposition gives further insight into faithful embeddings. + +**Proposition 4.7:** Let *f* be a *faithful X-Y embedding*. Then the following statements are equivalent: + +(a) $\mu$ and $\nu$ correspond under *f*; +---PAGE_BREAK--- + +(b) for all formulas $\theta$, $\mu \models \theta$ iff $\nu \models f^*(\theta)$. + +**Proof:** See Appendix A.3. ■ + +If the embedding $f$ is a “reasonable” representation shift, we would like an inference procedure to return the same answers if we shift representations using $f$. + +**Definition 4.8:** If $X, Y \in \mathcal{X}$, then the $\mathcal{X}$-inference procedure $\{I_X : X \in \mathcal{X}\}$ is invariant under the $X-Y$ embedding $f$ if for all constraints $KB$ and $\theta$ on $\Delta_X$, we have $KB \vdash_{I_X} \theta$ iff $f^*(KB) \vdash_{I_Y} f^*(\theta)$. (Note that, in particular, this means that $KB$ is in the domain of $\vdash_{I_X}$ iff $f^*(KB)$ is in the domain of $\vdash_{I_Y}$.) ■ + +**Definition 4.9:** The $\mathcal{X}$-inference procedure $\{I_X : X \in \mathcal{X}\}$ is *representation independent* if it is invariant under all faithful $X-Y$ embeddings for all $X, Y \in \mathcal{X}$. ■ + +Since the embedding for Example 4.3 is faithful, any representation-independent inference procedure would return the same answers for $\Pr(\textcolorful)$ as for $\Pr(\text{red} \vee \text{blue} \vee \text{green})$. The issue is somewhat more subtle for Example 1.2. There, we would like to have an embedding $f$ generated by the interpretation $i(\text{flying-bird}) = \text{fly} \wedge \text{bird}$ and $i(\text{bird}) = \text{bird}$. This is not a faithful embedding, since $\text{flying-bird} \Rightarrow \text{bird}$ is not a valid formula, while $i(\text{flying-bird} \Rightarrow \text{bird})$ is $(\text{fly} \wedge \text{bird}) \Rightarrow \text{bird}$ which is valid. Looking at this problem semantically, we see that the state corresponding to the model where $\text{flying-bird} \wedge \neg\text{bird}$ holds is mapped to $\emptyset$. But this is clearly the source of the problem. According to our linguistic intuitions for this domain, this is not a “legitimate” state. Rather than considering all the states in $\mathcal{W}(\{\text{flying-bird}, \text{bird}\})$, it is perhaps more appropriate to consider the subset $X$ consisting of the truth assignments characterized by the formulas $\{\text{flying-bird} \wedge \text{bird}, \neg\text{flying-bird} \wedge \text{bird}, \neg\text{flying-bird} \wedge \neg\text{bird}\}$. If we now use $i$ to embed $X$ into $\mathcal{W}(\{\text{fly}, \text{bird}\})$, the resulting embedding is indeed faithful. So, as for the previous example, invariance under this embedding would guarantee that we get the same answers under both representations. + +## 4.2 Representation-independent inference procedures + +Although the definition of representation independence seems natural, so did the definition of robustness. How do the two definitions relate to each other? First, we show that representation independence is a weaker notion than robustness. For this result, we need to consider inference procedures that satisfy two further assumptions. + +DI4. If $f$ is a faithful $X-Y$ embedding, then $KB$ is in the domain of $I_X$ iff $f^*(KB)$ is in the domain of $I_Y$. + +DI5. If $KB$ is in the domain of $I_{X\times Y}$, $f$ is a faithful $X-Y$ embedding, and $\varphi_1$ is a constraint on $\Delta_X$, then $KB \wedge (\varphi_1 \Leftrightarrow f^*(\varphi_1))$ is in the domain of $I_{X\times Y}$. + +DI4 is very natural and is satisfied by all the standard inference procedures. It is easy to check that if $KB$ is closed iff $f^*(KB)$ is closed. While DI5 may not appear so natural, it does hold for domains consisting of closed sets, since it is not hard to check that $\varphi \Leftrightarrow f^*(\varphi_1)$ is closed. DI5 would follow from DI3 and the assumption that $\varphi \Leftrightarrow f^*(\varphi_1)$ is in the domain of $I_{X\times Y}$, but it is actually weaker than the combination of these two assumptions. In particular, it holds for the domain consisting of all sets on which there is a measure of maximum entropy. +---PAGE_BREAK--- + +**Theorem 4.10:** If an $\mathcal{X}$-inference procedure is robust that satisfies DI2, DI4, and DI5, then it is representation independent. + +**Proof:** See Appendix A.3. ■ + +We have already shown that any robust inference procedure must be almost trivial. Are there any interesting representation-independent inference procedures? As we shall see, the answer is mixed. There are nontrivial representation-independent inference procedures, but they are not very interesting. + +Our first result shows that representation independence, like robustness, trivializes the +inference procedure, but only for some knowledge bases. + +**Theorem 4.11:** If {$I_X : X \in \mathcal{X}$} is a representation-independent $\mathcal{X}$-inference procedure then, for all $X \in \mathcal{X}$, $I_X$ is essentially entailment for all objective knowledge bases in its domain.³ + +**Proof:** See Appendix A.3. ■ + +**Corollary 4.12:** If {$I_X : X \in \mathcal{X}$} is a representation-independent $\mathcal{X}$-inference procedure, $KB$ is objective, and $KB \Vdash_I \alpha < \text{Pr}(S) < \beta$ for some $\alpha \ge 0$ and $\beta \le 1$, then $\alpha = 0$ and $\beta = 1$. + +This result tells us that from an objective knowledge base Pr($T$) = 1, we can reach only three possible conclusions about a set $S$. If $T \subseteq S$, then we can conclude that Pr($S$) = 1; if $T \subseteq \bar{S}$, then we can conclude that Pr($S$) = 0; otherwise, the strongest conclusion we can make about Pr($S$) is that it is somewhere between 0 and 1. + +We can construct a representation-independent inference procedure that is not entailment and has precisely this behavior if we restrict attention to countable state spaces. Suppose that $X$ is countable. Given an objective knowledge base $KB$ of the form $Pr(T) = 1$, where $T \in \mathcal{F}_X$, let $KB^+$ consist of all formulas of the form $0 < Pr(S) < 1$ for all nonempty strict subsets $S$ of $T$ in $\mathcal{F}_X$.⁴ We now define an $X$-inference procedure $I_X^0$ as follows: If $KB$ is equivalent to an objective knowledge base, then $KB \Vdash_{I^0} \varphi$ if $KB \wedge KB^+ \models \varphi$; if $KB$ is not equivalent to an objective knowledge base, then $KB \Vdash_{I^0} \varphi$ if $KB \models \varphi$. It follows easily from Proposition 2.5 that $I_X^0$ is indeed an inference procedure. Moreover, it is not equivalent to the standard notion of entailment; for example, we have true $\Vdash_{I^0} 0 < Pr(p) < 1$, while $\not\vDash_0 < Pr(p) < 1$. Nevertheless, we can prove that $I^0$ is representation independent. + +**Lemma 4.13:** Let $\mathcal{X}$ consist of only countable sets. Then {$I_X^0 : X \in \mathcal{X}$} is a representation-independent $\mathcal{X}$-inference procedure. + +3. In an earlier version of this paper (Halpern & Koller, 1995), we claimed that any representation-independent inference procedure that satisfied a minimal irrelevance property (implied by robustness, but not equivalent to it) is essentially entailment for all knowledge bases. As Jaeger (1996) shows, an inference procedure along the lines of $I^1$ described below can be constructed to show that this result is not correct. We seem to need the full strength of robustness. + +4. The requirement that $X$ be countable is necessary here. If $X$ is uncountable and every singleton is in $\mathcal{F}_X$, then $KB^+$ is inconsistent if both $T$ and $\bar{T}$ are uncountable. It is impossible that each of an uncountable collection of points has positive measure. +---PAGE_BREAK--- + +**Proof:** See Appendix A.3. ■ + +While objective knowledge bases may not appear so interesting if we restrict to propositional languages, for languages that include first-order and statistical information they become quite interesting. Indeed, as shown in (Bacchus, 1990; Bacchus, Grove, Halpern, & Koller, 1996), knowledge bases with first-order and (objective) statistical information allow us to express a great deal of the information that we naturally encounter. For example, we can express the fact that “90% of birds fly” as an objective statement about the number of flying birds in our domain relative to the overall number of birds. Of course, Theorem 4.11 applies immediately to such knowledge bases. + +Theorem 4.11 also implies that various inference procedures cannot be representation independent. In particular, since true $\Vdash_{me} \Pr(p) = 1/2$ for a primitive proposition $p$, it follows that maximum entropy is not essentially entailment. This observation provides another proof that maximum entropy is not representation independent. + +It is consistent with Theorem 4.11 that there are representation-independent inference procedures that are not almost entailment for probabilistic knowledge bases. For example, consider the X-inference procedure $I_X^1$ defined as follows. Given $A \subseteq \Delta_X$, if there exists some $S \in \mathcal{F}_X$ such that $A = \{\mu \in \Delta_X : \mu(S) \ge 1/4\}$, then $I_X^1(A) = \{\mu \in \Delta_X : \mu(S) \ge 1/3\}$; otherwise, $I_X^1(A) = A$. Thus, $\Pr(S) \ge 1/4 \Vdash_{I1} \Pr(S) \ge 1/3$. Clearly, $I_X^1$ is not essentially entailment. Yet, we can prove the following result. + +**Lemma 4.14:** Suppose that $\mathcal{X}$ consists only of measure spaces of the form $(X, 2^X)$, where $X$ is finite. Then $\{I_X^1 : X \in \mathcal{X}\}$ is a representation-independent $\mathcal{X}$-inference procedure. + +**Proof:** See Appendix A.3. ■ + +Note that it follows from Theorem 3.4 that $I^1$ cannot be robust. Thus, we have shown that representation independence is a strictly weaker notion than robustness. + +This example might lead us to believe that there are representation-independent inference procedures that are “interesting” for probabilistic knowledge bases. However, as we now show, a representation-independent inference procedure cannot satisfy one key desideratum: the ability to conclude independence by default. For example, an important feature of the maximum-entropy approach to nonmononotic reasoning (Goldszmidt, Morris, & Pearl, 1993) has been its ability to ignore “irrelevant” information, by implicitly assuming independence. Of course, maximum entropy does not satisfy representation independence. Our result shows that no approach to probabilistic reasoning can simultaneously assure representation independence and a default assumption of independence. + +We do not try to give a general notion of “default assumption of independence” here, since we do not need it for our result. Rather, we give a minimal property that we would hope an inference procedure might have, and show that this property is sufficient to preclude representation independence. Syntactically, the property we want is that if $\Phi$ and $\Psi$ are disjoint vocabularies, $KB \in \mathcal{L}^{pr}(\Phi)$, $\varphi \in \mathcal{L}(\Phi)$, and $\psi \in \mathcal{L}(\Psi)$, then $KB \Vdash_I \Pr(\varphi \wedge \psi) = \Pr(\varphi) \times \Pr(\psi)$. +---PAGE_BREAK--- + +**Definition 4.15:** An $\mathcal{X}$-inference procedure $\{I_X : X \in \mathcal{X}\}$ enforces minimal default independence if, whenever $X$ and $Y$ are in $\mathcal{X}$, $KB$ is a constraint on $\Delta_X$ in the domain of $|\sim_{I_X}$, $S \in \mathcal{F}_X$, and $T \in \mathcal{F}_Y$, then $KB |_{\sim_{I_X \times Y}} \Pr(S \times T) = \Pr(S) \times \Pr(T).^5$ + +This definition clearly generalizes the syntactic definition. + +Clearly, entailment does not satisfy minimal default independence. Maximum entropy, however, does. Indeed, a semantic property that implies minimal default independence is used by Shore and Johnson (1980) as one of the axioms in an axiomatic characterization of maximum-entropy. + +**Theorem 4.16:** If $\{I_X : X \in \mathcal{X}\}$ is an $\mathcal{X}$-inference procedure that enforces minimal default independence and satisfies DI1, then $I_X$ is not representation independent. + +*Proof:* See Appendix A.3. ■ + +This result is very interesting as far as irrelevance is concerned. We might hope that learning irrelevant information does not affect our conclusions. While we do not attempt to define irrelevance here, certainly we would expect that if $KB'$ is in a vocabulary disjoint from $KB$ and $\varphi$, then, for example, $KB |_{\sim_I} \Pr(\varphi) = \alpha$ iff $KB \wedge KB' |_{\sim_I} \Pr(\varphi) = \alpha$. If $KB'$ is objective, then the standard probabilistic approach would be to identify learning $KB'$ with conditioning on $KB'$. Suppose that we restrict to inference procedures that do indeed condition on objective information (as is the case for the class of inference procedures we consider in Section 6). Then $KB \wedge KB' |_{\sim_I} \Pr(\varphi) = \alpha$ exactly if $KB |_{\sim_I} \Pr(\varphi | KB') = \alpha$. Thus, Theorem 4.16 tells us that inference procedures that condition on new (objective) information cannot both be representation independent and ignore irrelevant information. + +Thus, although representation independence, unlike robustness, does not force us to use entirely trivial inference procedures, it does prevent us from using procedures that have certain highly desirable properties. + +## 5. Discussion + +These results suggest that any type of representation independence is hard to come by. They also raise the concern that perhaps our definitions were not quite right. We can provide what seems to be even more support for the latter point. + +**Example 5.1:** Let $Q$ be a unary predicate and $c_1, \dots, c_{100}, d$ be constant symbols. Suppose that we have two vocabularies $\Phi = \{Q, d\}$ and $\Psi = \{Q, c_1, \dots, c_{100}, d\}$. Consider the interpretation $i$ from $\Phi$ to $\Psi$ for which $i(d) = d$ and $i(Q(x)) = Q(x) \wedge Q(c_1) \wedge \dots \wedge Q(c_{100})$. Now, consider $KB = \exists x Q(x)$. In this case, $i(KB) = \exists x (Q(x) \wedge Q(c_1) \wedge \dots \wedge Q(c_{100}))$. Intuitively, since all the $c_i$'s may refer to the same domain element, the only conclusion we can make with certainty from $Q(c_1) \wedge \dots \wedge Q(c_{100})$ is that there exists at least one $Q$ in the domain, which gives us no additional information beyond $KB$. We can convert this example into a general argument that the embedding $f$ corresponding to $i$ is faithful. Intuitively, for + +5. Since we are working in the space $X \times Y$, $KB$ should be viewed as a constraint on $\Delta_{X\times Y}$ here, $\Pr(S)$ should be understood as $\Pr(S \times Y)$, while $\Pr(T)$ should be understood as $\Pr(X \times T)$. Recall that, by assumption, $X \times Y \in \mathcal{X}$. +---PAGE_BREAK--- + +any *KB*, we can only get the conclusion $Q(c_1) \wedge \dots \wedge Q(c_{100})$ from $f^*(\mathbf{KB})$ if $Q(x)$ appears +positively in *KB*; but, in this case, we already know that there is at least one *Q*, so we +gain no new information from the embedding. But it does not seem unreasonable that an +inference procedure should assign different degrees of belief to $Q(d)$ given $\mathbf{KB} = \exists x Q(x)$ on +the one hand and given $i(\mathbf{KB}) = \exists x (Q(x) \wedge Q(c_1) \wedge \dots \wedge Q(c_{100}))$ on the other,$^6$ particularly +if the domain is small. In fact, many reasoning systems explicitly adopt a *unique names assumption*, which would clearly force different conclusions in these two situations. $\blacksquare$ + +This example suggests that, at least in the first-order case, even faithful embeddings +do not always match our intuition for a “reasonable” representation shift. One might +therefore think that perhaps the problem is with our definition even in the propositional +case. Maybe there is a totally different definition of representation independence that avoids +these problems. While this is possible, we do not believe it to be the case. The techniques +that we used to prove Theorem 4.16 and 3.4 seem to apply to any reasonable notion of +representation independence.$^7$ To give the flavor of the type of argument used to prove these +theorems, consider Example 1.1, and assume that true $\Vdash_I$ Pr(**colorful**) = $\alpha$ for $\alpha \in (0,1).$$^8$ +Using an embedding $g$ such that $g(\mathbf{colorful}) = red$, we conclude that true $\Vdash_I$ Pr(**red**) = $\alpha$. +Similarly, we can conclude Pr(**blue**) = $\alpha$ and Pr(**green**) = $\alpha$. But in order for $\Vdash_I$ to be +invariant under our original embedding, we must have true $\Vdash_I$ Pr(**red** $\lor$ **blue** $\lor$ **green**) = $\alpha$, +which is completely inconsistent with our previous conclusions. But the embeddings we use +in this argument are very natural ones; we would not *want* a definition of representation +independence that disallowed them. + +These results can be viewed as support for the position that representation dependence +is justified; the choice of an appropriate representation encodes significant information. In +particular, it encodes the bias of the knowledge-base designer about the world. Researchers +in machine learning have long realized that bias is an inevitable component of effective +inductive reasoning (i.e., learning from evidence). So we should not be completely surprised +if it turns out that other types of leaping to conclusions (as in our context) also depend on +the bias. + +But we need to be a little careful here. For example, in some cases we can identify +the vocabulary (and hence, the representation) with the sensors that an agent has at its +disposal. It may not seem that unreasonable that an agent with a temperature sensor and +a motion sensor might carve up the world differently from an agent with a color sensor +and a distance sensor. But consider two agents with different sensors who have not yet +made any observations. Suppose that both of them can talk about the distance to a tree. +Is it reasonable that the two agents should reach different conclusions about the distance +just because they have different sensors (and thus use different vocabularies), although they +have not made any observations? It would then follow that the agents should change their +conclusions if they switched sensors, despite not having made any observations. This does +not seem so reasonable! + +Bias and representation independence can be viewed as two extremes in a spectrum. +If we accept that the knowledge base encodes the user's bias, there is no obligation to be + +6. Actually, $i(Q(d)) = Q(d) \wedge Q(c_1) \wedge \dots \wedge Q(c_{100})$, but the latter is equivalent to $Q(d)$ given $KB$. + +7. They certainly applied to all of the many definitions that we tried! + +8. In fact, it suffices to assume that true $\Vdash_I$ Pr(**colorful**) $\in [\alpha, \beta]$, as long as $\alpha > 0$ or $\beta < 1$. +---PAGE_BREAK--- + +invariant under any representation shifts at all. On the other hand, if we assume that the representation used carries no information, coherence requires that our inference procedure give the same answers for all “equivalent” representations. We believe that the right answer lies somewhere in between. There are typically a number of reasonable ways in which we can represent our information, and we might want our inference procedure to return the same conclusions no matter which of these we choose. It thus makes sense to require that our inference procedure be invariant under embeddings that take us from one reasonable representation to another. But it does not follow that it must be invariant under all embeddings, or even all embeddings that are syntactically similar to the ones we wish to allow. We may be willing to refine *colorful* to *red ∨ blue ∨ green* or to define *flying-bird* as *fly ∧ bird*, but not to transform *red* to *sparrow*. In the next section, we show how to construct inference procedures that are representation independent under a limited class of representation shifts. + +## 6. Selective invariance + +As discussed above, we want to construct an inference procedure *I* that is invariant only under certain embeddings. For the purposes of this section, we restrict attention to finite spaces, where all sets are measurable. That is, we focus on *X*-inference procedures where *X* consists only of measure spaces of the form (*X*, 2*X*), where *X* is finite. + +Our first step is to understand the conditions under which an *X*-inference procedure *I* is invariant under a specific *X*-*Y* embedding *f*. When do we conclude *θ* from KB ⊆ Δ*X*? Recall that an inference procedure *I**X* picks a subset D*X* = *I**X*(KB), and concludes *θ* iff θ holds for every measure in D*X*. Similarly, when applied to *f**(KB) ⊆ Δ*Y*, *I**Y* picks a subset D*Y* = *I**Y*(*f**(KB)). For *I* to be invariant under *f* with respect to KB, there has to be a tight connection between D*X* and D*Y*. + +To understand this connection, first consider a pair of measures μ on X and ν on Y. Recall from Proposition 4.7 that μ and ν correspond under f iff, for all formulas θ, we have μ |=$\theta$ iff ν |=$f^*(\theta)$. To understand how the correspondence extends to sets of probability measures, consider the following example: + +**Example 6.1:** Consider the embedding $f$ of Example 4.3, and let $\mathcal{D}_X = \{\mu, \mu'\}$ where $\mu$ is as above, and $\mu'(colorful) = 0.6$. How do we guarantee that we reach the corresponding conclusions from $\mathcal{D}_X$ and $\mathcal{D}_Y$? Assume, for example, that $\mathcal{D}_Y$ contains some measure $\nu$ that does not correspond to either $\mu$ or $\mu'$, e.g., the measure that assigns probability 1/4 to all four states. In this case, the conclusion $\Pr(colorful) \le 0.7$ holds in $\mathcal{D}_X$, because it holds for both these measures; but the corresponding conclusion $\Pr(red \lor blue \lor green) \le 0.7$ does not hold in $\mathcal{D}_Y$. Therefore, every probability measure in $\mathcal{D}_Y$ must correspond to some measure in $\mathcal{D}_X$. Conversely, every measure in $\mathcal{D}_X$ must correspond to a measure in $\mathcal{D}_Y$. For suppose that there is no measure $\nu \in \mathcal{D}_Y$ corresponding to $\mu$. Then we get the conclusion $\Pr(blue \lor red \lor green) \ne 0.7$ from $\mathcal{D}_Y$, but the corresponding conclusion $\Pr(colorful) \ne 0.7$ does not follow from $\mathcal{D}_X$. Note that these two conditions do not imply that $\mathcal{D}_Y$ must be precisely the set of measures corresponding to measures in $\mathcal{D}_X$. In particular, we might have $\mathcal{D}_Y$ containing only a single measure $\nu$ corresponding to $\mu$ (and at least one corresponding to $\mu'$), e.g., one with $\nu(red) = 0.5$, $\nu(blue) = 0$, $\nu(green) = 0.2$, and $\nu(colorless) = 0.3$. ■ +---PAGE_BREAK--- + +Based on this example, we use the following extension to our definition of correspon- +dence. + +**Definition 6.2:** We say that $\mathcal{D}_X$ and $\mathcal{D}_Y$ correspond under $f$ if for all $\nu \in \mathcal{D}_Y$, there exists a corresponding $\mu \in \mathcal{D}_X$ (so that $\mu(S) = \nu(f(S))$ for all $S \subseteq X$), and for all $\mu \in \mathcal{D}_X$, there exists a corresponding $\nu \in \mathcal{D}_Y$. ■ + +**Proposition 6.3:** Suppose that $f$ is a faithful X-Y embedding, $\mathcal{D}_X \subseteq \Delta_X$, and $\mathcal{D}_Y \subseteq \Delta_Y$. The following two conditions are equivalent: + +(a) $\mathcal{D}_X$ and $\mathcal{D}_Y$ correspond under $f$; + +(b) for all $\theta$, $\mathcal{D}_X \models \theta$ iff $\mathcal{D}_Y \models f^*(\theta)^9$ + +**Proof:** See Appendix A.4. ■ + +To produce an inference procedure that is invariant under some X-Y embedding $f$, +we must ensure that for every KB, $I_X(KB)$ and $I_Y(KB)$ correspond. At first glance, it +seems rather difficult to guarantee correspondence for every knowledge base. It turns out +that the situation is not that bad. In the remainder of this section, we show how, starting +with a correspondence for the knowledge base true—that is, starting with a correspondence +between $I_X(\Delta_X)$ and $I_Y(\Delta_Y)$—we can bootstrap to a correspondence for all KB’s, using +standard probabilistic updating procedures. + +First consider the problem of updating with objective information. The standard way +of doing this update is via conditioning. For a measure $\mu \in \Delta_X$ and an event $S \subseteq X$, define +$\mu|S$ to be the measure that assigns probability $\mu(w)/\mu(S)$ to every $w \in S$, and zero to all +other states. For a set of measures $\mathcal{D}_X \subseteq \Delta_X$, define $\mathcal{D}_X|S$ to be $\{\mu|S : \mu \in \mathcal{D}_X\}$. The +following result is easy to show. + +**Proposition 6.4:** Let $S \subseteq X$ be an event and let $f$ be a faithful X-Y embedding. If $\mu$ and $\nu$ correspond under $f$, then $\mu|S$ and $\nu|f(S)$ also correspond under $f$. + +**Proof:** Almost immediate from the definitions; left to the reader. (In any case, note that this result follows from Theorem 6.4 below.) ∎ + +Clearly, the result extends to sets of measures. + +**Corollary 6.5:** If $f$ is a faithful X-Y embedding, and $\mathcal{D}_X$ and $\mathcal{D}_Y$ correspond under $f$, then $\mathcal{D}_X|S$ and $\mathcal{D}_Y|f(S)$ also correspond under $f$. + +What if we want to update on a constraint that is not objective? The standard extension +of conditioning to this case is via *relative entropy* or *KL-divergence* (Kullback & Leibler, +1951). + +9. While (a) implies (b) for arbitrary spaces, the implication from (b) to (a) depends on the restriction to finite spaces made in this section. For suppose that $X$ is the natural numbers $N$, $f$ is the identity, $\mathcal{D}_X$ consists of all probability measures on $N$, and $\mathcal{D}_Y$ consists of all measures but that measure $\mu_0$ such that $\mu_0(n) = 1/2^{n+1}$. If the language consists of finite Boolean combinations of assertions of the form $\text{Pr}(S) \ge \alpha$, for $S \subseteq N$, then it is easy to see that $\mathcal{D}_X \models \theta$ iff $\mathcal{D}_Y \models \theta$ for all formulas $\theta$, but clearly $\mathcal{D}_X$ and $\mathcal{D}_Y$ do not correspond under the identity map. +---PAGE_BREAK--- + +**Definition 6.6:** If $\mu$ and $\mu'$ are measures on $X$, the relative entropy between $\mu'$ and $\mu$, denoted $KL_X(\mu' || \mu)$, is defined as $\sum_{x \in X} \mu'(x) \log(\mu'(x)/\mu(x))$. For a measure $\mu$ on $X$ and a constraint $\theta$, let $\mu|\theta$ denote the set of measures $\mu'$ satisfying $\theta$ for which $KL_X(\mu' || \mu)$ is minimal. ■ + +Intuitively, the KL-divergence measures the “distance” from $\mu'$ to $\mu$. A measure $\mu'$ satisfying $\theta$ for which $KL_X(\mu' || \mu)$ is minimal can be thought of as the “closest” measure to $\mu$ that satisfies $\theta$. If $\theta$ denotes an objective constraint, then the unique measure satisfying $\theta$ for which $KL_X(\mu' || \mu)$ is minimal is the conditional measure $\mu|\theta$ (Kullback & Leibler, 1951). (That is why we have deliberately used the same notation here as for conditioning.) Moreover, it is easy to show that $KL_X(\mu' || \mu) = 0$ iff $\mu' = \mu$. It follows that if $\mu \in \theta$, then $\mu|\theta = \mu$. + +Given a set of measure $D_X \subseteq \Delta_X$ and a constraint $\theta$ on $\Delta_X$, define $D_X|\theta$ to be $\cup_{\mu \in D_X} \mu|\theta$. + +We can now apply a well-known result (see, e.g., (Seidenfeld, 1987)) to generalize Proposition 6.4 to the case of relative entropy. + +**Theorem 6.7:** Let $\theta$ be an arbitrary constraint on $\Delta_X$. If $f$ is a faithful X-Y embedding and $\mu$ and $\nu$ correspond under $f$, then $\mu|\theta$ and $\nu|f^*(\theta)$ also correspond under $f$. + +**Proof:** See Appendix A.4. ■ + +Again, this result clearly extends to sets of measures. + +**Corollary 6.8:** If $f$ is a faithful X-Y embedding, and $D_X$ and $D_Y$ correspond under $f$, then $D_X|\theta$ and $D_Y|f^*(\theta)$ also correspond under $f$. + +These results give us a way to “bootstrap” invariance. We construct an inference procedure that uses relative entropy starting from some set of prior probability measures. Intuitively, these encode the user’s prior beliefs about the domain. As information comes in, these measures are updated using cross-entropy. If we design the priors so that certain invariances hold, Corollary 6.8 guarantees that these invariances continue to hold throughout the process. + +Formally, a prior function $\mathcal{P}$ on $\mathcal{X}$ maps $X \in \mathcal{X}$ to a set $\mathcal{P}(X)$ of probability measures in $\Delta_X$. Define an inference procedure $I^\mathcal{P}$ by taking $I_X^\mathcal{P}(KB) = \mathcal{P}(X)|_KB$. Note that $I_X^\mathcal{P}(\text{true}) = \mathcal{P}(X)$, so that when we have no constraints at all, we use $\mathcal{P}(X)$ as the basis for our inference. Most of the standard inference procedures are of the form $I^\mathcal{P}$ for some prior function $\mathcal{P}$. It is fairly straightforward to verify, for example, that entailment is $I^\mathcal{P}$ for $\mathcal{P}(X) = \Delta_X$. (This is because, as observed earlier, $\mu|KB = \mu$ if $\mu \in KB$.) Standard Bayesian conditioning (defined for objective knowledge bases) is of this form, where we take $\mathcal{P}(X)$ to be a single measure for each space $X$. More interestingly, it is well known (Kullback & Leibler, 1951) that maximum entropy is $I^\mathcal{P}_u$ where $P_u(X)$ is the singleton set containing only the uniform prior on $X$. + +So what can we say about the robustness of $I^\mathcal{P}$ to representation shifts? Using Proposition 6.3 and Corollary 6.5, it is easy to show that if we want $I^\mathcal{P}$ to be invariant under some set $\mathcal{F}$ of embeddings, then we must ensure that the prior function has the right correspondence property. + +**Theorem 6.9:** If $f$ is a faithful X-Y embedding, then $I^\mathcal{P}$ is invariant under $f$ iff $\mathcal{P}(X)$ and $\mathcal{P}(Y)$ correspond under $f$. +---PAGE_BREAK--- + +**Proof:** See Appendix A.4. + +Theorem 6.9 sheds some light on the maximum entropy inference procedure. As we mentioned, $\vdash_{me}$ is precisely the inference procedure based on the prior function $P_u$. The corollary asserts that $\vdash_{me}$ is invariant under $f$ precisely when the uniform priors on $X$ and $Y$ correspond under $f$. This shows that maximum entropy's lack of representation independence is an immediate consequence of the identical problem for the uniform prior. Is there a class $\mathcal{F}$ of embeddings under which maximum entropy is invariant? Clearly, the answer is yes. It is easy to see that any embedding that takes the elements of $X$ to (disjoint) sets of equal cardinality has the correspondence property required by Theorem 6.9. It follows that maximum entropy is invariant under all such embeddings. In fact, the requirement that maximum entropy be invariant under a subset of these embeddings is one of the axioms in Shore and Johnson's (1980) axiomatic characterization of maximum-entropy. (We remark that Paris (1994, Theorem 7.10) proves that maximum entropy satisfies a variant of his atomicity principle; his invariance result is essentially a special case of Theorem 6.9.) + +If we do not like the behavior of maximum entropy under representation shifts, Theorem 6.9 provides a solution: we should simply start out with a different prior function. If we want to maintain invariance under all representation shifts, $P(X)$ must include all non-extreme priors (i.e., all the measures $\mu$ on $X$ such that $\mu(A) \notin \{0, 1\}$ for all $A$ such that $A \notin \{\emptyset, X\}$). This set of priors gives essential entailment as an inference procedure. If, however, we have prior knowledge as to which embeddings encode “reasonable” representation shifts, we can often make do with a smaller class of priors, resulting in an inference procedure that is more prone to leap to conclusions. Given a class of “reasonable” embeddings $\mathcal{F}$, we can often find a prior function $P$ that is “closed” under each $f \in \mathcal{F}$, i.e., for each measure $\mu \in P(X)$ and each $X-Y$ embedding $f \in F$ we make sure that there is a corresponding measure $\nu \in P(Y)$, and vice versa. Thus, we can guarantee that $P$ has the appropriate structure using a process of closing off under each $f$ in $\mathcal{F}$. + +Of course, we can also execute this process in reverse. Suppose that we want to support a certain reasoning pattern that requires leaping to conclusions. The classical example of such a reasoning pattern is, of course, a default assumption of independence. What is the “most” representation independence that we can get without losing this reasoning pattern? As we now show, Theorem 6.9 gives us the answer. + +We begin by providing one plausible formulation of the desired reasoning pattern. For a finite space $X$, we say that $X_1 \times \cdots \times X_n$ is the *product decomposition* of $X$ if $X = X_1 \times \cdots \times X_n$ and $n$ is the largest number for which $X$ can be written as a product in this way. (It is easy to see that if $X$ is finite, then this “maximal” product decomposition is unique.) A measure $\mu \in \Delta_X$ is a *product measure on* $X$ if $X_1 \times \cdots \times X_n$ is the product decomposition of $X$ and there exist measures $\mu_i \in \Delta_{X_i}$ for $i = 1, \dots, n$ such that $\mu = \mu_1 \times \cdots \times \mu_n$, that is, $\mu(U_1 \times \cdots \times U_n) = \prod_{i=1}^n \mu_i(U_i)$, if $U_i \subseteq X_i$, $i = 1, \dots, n$. Let $\mathcal{P}_{\Pi}$ be the set of all product measures on $X$. If $\mathcal{P}_{\Pi}$ is the prior and the relative entropy rule is used to update the prior given a knowledge base, then $\vdash_{\mathcal{P}_{\Pi}}$ satisfies a form of minimal default independence. In fact, it is easy to show that it satisfies the following stronger property. +---PAGE_BREAK--- + +**Proposition 6.10:** Suppose that $X_1 \times \dots \times X_n$ is the product decomposition on $X$ and, for each $i = 1, \dots, n$, $KB_i$ is a constraint on $X_i$, and $S_i$ is a subset of $X_i$. Then + +$$ \bigwedge_{i=1}^{n} KB_i \sim_{I_{\mathcal{P}_{\Pi}}} \Pr(S_1 \wedge \dots \wedge S_n) = \prod_{i=1}^{n} \Pr(S_i). $$ + +**Proof:** See Appendix A.4. ■ + +Theorem 4.16 shows that $\Vdash_{\mathcal{P}_{\Pi}}$ cannot be invariant under all embeddings. Theorem 6.9 tells us that it is invariant under precisely those embeddings for which $\mathcal{P}_{\Pi}$ is invariant. These embeddings can be characterized syntactically in a natural way. Suppose that $\Phi_1, \dots, \Phi_n$ is a partition of a finite set $\Phi$ of primitive propositions. Note that a truth assignment to the primitive propositions in $\Phi$ can be viewed as a “crossproduct” of truth assignments to the primitive propositions in $\Phi_1, \dots, \Phi_n$. Under this identification, suppose that a set $X$ of truth assignments to $\Phi$ is decomposed as $X_1 \times \dots \times X_n$, where $X_i$ consists of truth assignments to $\Phi_i$. In that case, if $p \in \Phi_j$ and $q, r \in \Phi_k$ for some $j \neq k$, then true $\Vdash_{\mathcal{P}}$ Pr($p \wedge q$) = Pr($p$) $\times$ Pr($q$), but since $q$ and $r$ are in the same subset, we do not have true $\Vdash_{\mathcal{P}}$ Pr($r \wedge q$) = Pr($r$) $\times$ Pr($q$). Hence, $\mathcal{P}_{\Pi}$ is not invariant under an interpretation $i$ that maps $p$ to $r$ and maps $q$ to itself. Intuitively, the problem is that $i$ is “crossing subset boundaries”; it is mapping primitive propositions that are in different subsets to the same subset. If we restrict to interpretations that “preserve subset boundaries”, then we avoid this problem. + +We can get a semantic characterization of this as follows. If the product decomposition of $X$ is $X_1 \times \dots \times X_n$ and the product decomposition of $Y$ is $Y_1 \times \dots \times Y_n$, then $f$ is an *X-Y product embedding* if $f$ is an *X-Y* embedding and there are $X_i-Y_i$ embeddings $f_i$, $i = 1, \dots, n$, and $f(\langle x_1, \dots, x_n \rangle) = f_1(x_1) \times \dots \times f_n(x_n)$. Product embeddings capture the intuition of preserving subset boundaries; elements in a given subset $X_i$ remain in the same subset ($Y_i$) after the embedding. However, the notion of product embedding is somewhat restrictive; it requires that elements in the $i$th subset of $X$ map to elements in the $i$th component of $Y$, for $i = 1, \dots, n$. We can still preserve default independence if the components of a product are permuted. An $g$ is a *permutation embedding* if there exists a permutation $\pi$ of $\{1, \dots, n\}$ such that $g(\langle x_1, \dots, x_n \rangle) = \langle x_{\pi(1)}, \dots, x_{\pi(n)} \rangle$. + +**Theorem 6.11:** *The inference procedure $I_{\mathcal{P}_{\Pi}}$ is invariant under faithful product embeddings and under permutation embeddings.* + +Theorem 6.9 thus provides us with the basic tools to easily define an inference procedure that enforces minimal default independence for constraints involving disjoint parts of the language, while at the same time guaranteeing invariance under a large and natural class of embeddings. Given our negative result in Theorem 4.16, this type of result is the best that we could possibly hope for. In general, Theorem 6.9 provides us with a principled framework for controlling the tradeoff between the strength of the conclusions that can be reached by an inference procedure and invariance under representation shifts. + +# 7. Related Work + +As we mentioned earlier, there are two types of probabilistic inference. We partition our discussion of related work along those lines. +---PAGE_BREAK--- + +## 7.1 Probabilistic Inference from a Knowledge Base + +Given the importance of representation in reasoning, and the fact that one of the main criticisms of maximum entropy has been its sensitivity to representation shifts, it is surprising how little work there has been on the problem of representation dependence. Indeed, to the best of our knowledge, the only work that has focused on representation independence in the logical sense that we have considered here prior to ours is that of Salmon and Paris. + +Salmon (1961) defined a *criterion of linguistic invariance*, which seems essentially equivalent to our notion of representation independence. He tried to use this criterion to defend one particular method of inductive inference but, as pointed out by Barker in the commentary at the end of (Salmon, 1961), his preferred method does not satisfy his criterion either. Salmon (1963) then attempted to define a modified inductive inference method that would satisfy his criterion but it is not clear that this attempt succeeded. In any case, our results show that his modified method certainly cannot be representation independent in our sense. + +As we said earlier, Paris (1994) considers inference processes, which given a constraint on $\Delta_X$, choose a unique measure satisfying the constraint. He then considers various properties that an inference process might have. Several of these are closely related to properties that we have considered here. (In describing these notions, we have made some inessential changes so as to be able to express them in our notation.) + +* An $\mathcal{X}$-inference process $I$ is language invariant if all $X, Y \in \mathcal{X}$ and all constraints $KB$ and $\varphi$ on $\Delta_X$, we have that $KB \Vdash_{I_X} \varphi$ iff $KB \Vdash_{I_{X\times Y}} \varphi$. Clearly language invariance is a special case of robustness. Paris shows that a *center of mass* inference process (that, given a set $A \subseteq \Delta_X$, chooses the measure that is the center of mass of $A$) is not language invariant; on the other hand, it is well known that maximum entropy is language invariant. + +* An $\mathcal{X}$-inference process $I$ satisfies the *principle of irrelevant information* if for all spaces $X, Y \in \mathcal{X}$, constraints $KB$ and $\varphi$ on $\Delta_X$, and constraints $\psi$ on $\Delta_Y$, we have $KB \Vdash_{I_X} \varphi$ iff $KB \wedge \psi \Vdash_{I_{X\times Y}} \varphi$. Again, this is a special case of robustness, since a constraint $\psi$ on $\Delta_Y$ must be $X$-conservative. Paris shows that maximum entropy satisfies this principle. (He restricts the domain of the maximum entropy process to closed convex sets, so that there is always a unique probability measure that maximizes entropy.) + +* An $\mathcal{X}$-inference process $I$ satisfies the *renaming principle* if, whenever $X$ and $Y$ are finite spaces, $g: X \to Y$ is an isomorphism, and $f: 2^X \to 2^Y$ is the faithful embedding based on $g$ (in that $f(S) = \{g(s) : s \in S\}$), then for all constraints $KB$ and $\theta$ on $\Delta_X$, we have $KB \Vdash_{I_X} \theta$ iff $f^*(KB) \Vdash_{I_Y} f^*(\theta)$. Clearly, the renaming principle is a special case of representation independence. Paris shows that a number of inference processes (including maximum entropy) satisfy the renaming principle. + +* An $\mathcal{X}$-inference process $I$ satisfies the *principle of independence* if, whenever $X, Y$, and $Z$ are in $\mathcal{X}$, $S \in \mathcal{F}_X$, $T \in \mathcal{F}_Y$, $U \in \mathcal{F}_Z$, and $KB$ is the constraint $\Pr(U) = a \wedge \Pr(S|U) = b \wedge \Pr(T|U) = c$, where $a > 0$, then $KB \Vdash \Pr(S \times T|U) = bc$. Ignoring the conditional probabilities, this is clearly a special case of minimal default independence. Paris and Vencovska (1990) show that maximum entropy is the unique +---PAGE_BREAK--- + +inference process satisfying a number of principles, including renaming, irrelevant information, and independence. + +* An $\mathcal{X}$-inference process $I$ satisfies the *atomicity principle* if, for all $X, Y_1, \dots, Y_n$ in $\mathcal{X}$, whenever $f'$ is an embedding from $\{0, 1\}$ to $X$, and $f$ is the obvious extension of $f'$ to an embedding from to $\{0, 1\} \times Y_1 \times \dots \times Y_n$ to $X \times Y_1 \times \dots \times Y_n$, then for all constraints $KB$ and $\theta$ on $\Delta_{\{0,1\} \times Y_1 \times \dots \times Y_n}$, we have $KB \vdash_{I_X} \theta$ iff $f^*(KB) \vdash_{I_Y} f^*(\theta)$. Clearly atomicity is a special case of representation independence. Paris shows that there is no inference process that satisfies atomicity. The argument is similar in spirit to that used to prove Theorems 4.11 and 4.16, but much simpler, since inference processes return a unique probability measure, not a set of them. + +More recently, Jaeger (1996), building on our definitions, has examined representation independence for general nonmonotonic logics. He considers representation independence with respect to a collection of transformations, and proves results about the degree to which certain nonmonotonic formalisms, such as *rational closure* (Lehmann & Magidor, 1992), satisfy representation independence. + +Another line of research that is relevant to representation independence is the work on *abstraction* (Giunchiglia & Walsh, 1992; Nayak & Levy, 1995). Although the goal of this work is again to make connections between two different ways of representing the same situation, there are significant differences in focus. In the work on abstraction, the two ways of representing the situation are not expected to be equivalent. Rather, one representation typically abstracts away irrelevant details that are present in the other. On the other hand, their treatment of the issues is in terms of deductive entailment, not in terms of general inference procedures. It would be interesting to combine these two lines of work. + +## 7.2 Bayesian Probabilistic Inference + +Bayesian statistics takes a very different perspective on the issues we discuss in this paper. As we discussed, the Bayesian approach generally assumes that we construct a prior, and use standard probabilistic conditioning to update that prior as new information is obtained. In this approach, the representation of the knowledge obtained has no effect on the conclusions. Two pieces of information that are semantically equivalent (denote the same event) will have precisely the same effect when used to condition a distribution. + +In this paradigm, our analysis is more directly related to the step that precedes the probabilistic conditioning—the selection of the prior. When we have very specific beliefs that we want to encode in a prior distribution (as we do, for example, when constructing a Bayesian network), we design our prior to reflect these beliefs in terms of the vocabulary used. For example, if we have a particular distribution in mind over the location of an object, we will encode it one way when representing the space in terms of Cartesian coordinates, and in another way when using polar coordinates. In effect, we can view the representation transformation as an embedding $f$, and the two priors as corresponding under $f$, in the sense of Definition 4.2. Thus, the design of the prior already takes the representation into account. + +On the other hand, when we are trying to construct an “uninformed” prior for some class of problems, the issue of representation independence becomes directly relevant. Indeed, +---PAGE_BREAK--- + +most of the standard problems with maximum entropy arise even in the simple case when we simply do Bayesian conditioning starting with a uniform prior over our space. + +A standard approach in Bayesian statistics is to use the invariance under certain transformations in order to define an appropriate uninformed prior. For example, we might want a prior over images that is invariant to rotation and translation. In certain cases, once we specify the transformation under which we want a measure to be invariant, the measure is uniquely determined (Jaynes, 1968; Kass & Wasserman, 1993). In this case, the argument goes, the uniquely determined measure is perforce the “right” one. This idea of picking a prior using its invariance properties is in the same spirit as the approach we take in Section 6. Indeed, as this approach simply uses standard probabilistic conditioning for objective information (such as observations), the Bayesian approach with an uninformed prior invariant to a set of embeddings is, in a sense, a special case. However, our approach does not force us to choose a unique prior. Rather, we allow the use of a set of prior distributions, allowing us to explore a wider spectrum of inference procedures. + +This approach is also related to the work of Walley (1996), who observes that representation independence is an important desideratum in certain statistical applications involving multinomial data. Walley proposes the use of sets of Dirichlet densities to encode ignorance about a prior, and shows that this approach is representation independent in its domain of application. + +# 8. Conclusions + +This paper takes a first step towards understanding the issue of representation dependence in probabilistic reasoning, by defining notions of invariance and representation independence, showing that representation independence is incompatible with drawing many standard default conclusions, and defining limited notions of invariance that might that allow a compromise between the desiderata of being able to draw interesting conclusions (not already entailed by the evidence) and representation independence. Our focus here has been on inference in probabilistic logic, but the notion of representation independence is just as important in many other contexts. Our definitions can clearly be extended to non-probabilistic logics. As we mentioned, Jaeger (1996) has obtained some results on representation independence in a more general setting, but there is clearly much more that can be done. More generally, it would be of interest to understand better the tension between representation independence and the strength of conclusions that can be drawn from an inference procedure. + +# Acknowledgments + +Thanks to Ed Perkins for pointing us to (Keisler & Tarski, 1964) and, in particular, the result that a countably additive probability measure defined on a subalgebra of an algebra $\mathcal{F}$ could not necessarily be extended to a countably additive probability measure on $\mathcal{F}$. Thanks to the reviewers of the paper for their perceptive comments and for pointing out (Horn & Tarski, 1948). Much of Halpern's work on the paper was done while he was at the IBM Almaden Research Center. His recent work has been supported by NSF under +---PAGE_BREAK--- + +grant IRI-96-25901 and IIS-0090145 and ONR under grant N00014-01-1-0795. Some of +Koller's work was done at U.C. Berkeley. Her research was sponsored in part by the Air +Force Office of Scientific Research (AFSC), under Contract F49620-91-C-0080, and by a +University of California President's Postdoctoral Fellowship. Daphne Koller's later work +on the paper was supported through the generosity of the Powell foundation, and by ONR +grant N00014-96-1-0718. A preliminary version of this appears in *Proceedings of IJCAI '95*, +pp. 1853–1860. + +# Appendix A. Proofs + +## A.1 Proofs for Section 2 + +**Proposition 2.5:** Let $\vdash$ be a relation on probabilistic constraints on $X$ for which the properties Reflexivity, Left Logical Equivalence, Right Weakening, Infinitary And, and Consistency hold for all $KB$ in the domain of $\vdash$. (That is, if $KB$ is in the domain of $\vdash$, in that $KB \vdash \theta$ for some $\theta$, then $KB \vdash KB$, and so on.) Then $\vdash$ is $\vdash_I$ for some $X$-inference procedure $I$. + +**Proof:** Define $I$ as follows. If $A \subseteq \Delta_X$, $KB$ is in the domain of $\vdash$, and $A = [[KB]]_X$ for some statement $KB$, then $A$ is in the domain of $I$ and $I(A) = \cap\{[[\theta]]_X : KB \vdash \theta\}$. Note that by Left Logical Equivalence, this is well defined, since if $A = [[KB']]]_X$, then $\cap\{[[\theta]]_X : KB \vdash \theta\} = \cap\{[[\theta]]_X : KB' \vdash \theta\}$. If $A \neq [[KB]]_X$ for some statement $KB$, then $A$ is not in the domain of $I$. It remains to check that $I$ is an $X$-inference procedure (i.e., that $I(A) \subseteq A$ and that $I(A) = \emptyset$ iff $A = \emptyset$ for all $A$ in the domain of $I$), and that $\vdash = \vdash_I$. To check that $I$ is an $X$-inference procedure, suppose that $A$ is in the domain of $I$. Thus, $A = [[KB]]_X$ By Reflexivity, it easily follows that $I([KB]) \subseteq [KB]$. Next suppose that $I([KB]) = \emptyset$. It follows that $\cap\{[\theta]_X : KB \vdash \theta\} = \emptyset$. Thus, $\{\theta : KB \vdash \theta\} \models false$. By the Infinitary AND rule, we must have $KB \vdash_I false$. By the Consistency Rule, it follows that $[KB] = \emptyset$. Thus, $I$ is indeed an $X$-inference procedure. Finally, note that if $KB \vdash \psi$ then, by definition of $I$, $I([KB]) \subseteq [\psi]$, so $KB \vdash_I \psi$. For the opposite inclusion, note that if $KB \vdash_I \psi$, then $\{\theta : KB \vdash \theta\} \models \psi$. Thus, by the Infinitary And rule, it follows that $KB \vdash_I \psi$. ■ + +## A.2 Proofs for Section 3 + +To prove Theorem 3.4, we need the following lemma. + +**Lemma A.1:** Given two spaces $X_0$ and $X_1$, measures $\mu^0 \in \Delta_{(X_0, \mathcal{F}_{X_0})}$ and $\mu^1 \in \Delta_{(X_1, \mathcal{F}_{X_1})}$, and subsets $S_0 \in \mathcal{F}_{X_0}$ and $S_1 \in \mathcal{F}_{X_1}$ such that $\mu^0(S_0) = \mu^1(S_1)$, there exists a measure $\mu^2 \in \Delta_{(X_0 \times X_1, \mathcal{F}_{X_0} \times \mathcal{F}_{X_1})}$ such that $\mu^2_{X_i} = \mu^i$, for $i = 1, 2$, and $\mu^2(S_0 \Leftrightarrow S_1) = 1$.¹⁰ + +**Proof:** For $A \times B \in \mathcal{F}_{X_0} \times \mathcal{F}_{X_1}$, define + +$$\mu^2(A \times B) = (\mu^0(A \cap S_0)\mu^1(B \cap S_1)/\mu^1(S_1)) + (\mu^0(A \cap \overline{S_0})\mu^1(B \cap \overline{S_1})/\mu^1(\overline{S_1})),$$ + +where we take $\mu^0(A \cap S_0)\mu^1(B \cap S_1)/\mu^1(S_1) = 0$ if $\mu^1(S_1) = 0$ and take $\mu^0(A \cap \overline{S_0})\mu^1(B \cap \overline{S_1})/\mu^1(\overline{S_1}) = 0$ if $\mu^1(\overline{S_1}) = 0$. Extend to disjoint unions of such sets by additivity. Since + +10. If A and B are sets, we use the notation $A \Leftrightarrow B$ to denote the set $(A \cap B) \cup (\bar{A} \cap \bar{B})$. +---PAGE_BREAK--- + +all sets in $\mathcal{F}_{X_0 \times X_1}$ can be written as disjoint unions of sets of the form $A \times B \in \mathcal{F}_{X_0} \times \mathcal{F}_{X_1}$, +this suffices to define $\mu^2$. To see that $\mu^2$ is actually a measure, note that $\mu^2(X \times Y) =$ +$\mu^0(S_0) + \mu^0(\overline{S_0}) = 1$. Additivity is clearly enforced by the definition. Finally, to see that +$\mu^2$ has the desired properties, suppose that $\mu^1(S_1) \neq 0$ and $\mu^1(\overline{S_1}) \neq 0$. (The argument is +easier if this is not the case; we leave details to the reader.) Then + +$$ +\begin{align*} +\mu_{X_0}^2(A) &= \mu^2(A \times Y) = \frac{\mu^0(A \cap S_0)\mu^1(S_1)}{\mu^1(S_1)} + \frac{\mu^0(A \cap \overline{S_0}\mu^1(\overline{S_1}))}{\mu^1(\overline{S_1})} \\ +&= \mu^0(A \cap S_0) + \mu^0(A \cap \overline{S_0}) = \mu^0(A). +\end{align*} +$$ + +Since $\mu^0(S_0) = \mu^1(S_1)$ by assumption (and so $\mu^0(\overline{S_0}) = \mu^1(\overline{S_1})$), + +$$ +\begin{align*} +\mu_{X_1}^2(B) &= \mu^2(X \times B) = \frac{\mu^0(S_0)\mu^1(B \cap S_1)}{\mu^1(S_1)} + \frac{\mu^0(\overline{S_0})\mu^1(B \cap \overline{S_1})}{\mu^1(\overline{S_1})} \\ +&= \frac{\mu^1(B \cap S_1)}{\mu^1(B \cap \overline{S_1})} = \frac{\mu^1(B)}{\mu^1(B)}. +\end{align*} +$$ + +This completes the proof. ■ + +**Theorem 3.4:** If {$I_X : X \in \mathcal{X}$} is a robust $\mathcal{X}$-inference procedure that satisfies DI1, DI2, and DI3, then $I_X$ is essentially entailment for all $X \in \mathcal{X}$. + +*Proof:* Suppose that {$I_X : X \in \mathcal{X}$} is robust and $I_X$ is not essentially entailment for $X \in \mathcal{X}$. +Then there must be a constraint $KB$ on $\Delta_X$ and a set $S \in \mathcal{F}_X$ such that $KB |_{\sim_I \alpha} < \text{Pr}(S) < \beta$ and $KB |_{\neq \alpha} \leq \text{Pr}(S) \leq \beta$. Thus, there must be some $\gamma \notin [\alpha, \beta]$ such that $KB \wedge \text{Pr}(S) = \gamma$ is consistent. We can assume without loss of generality that $\gamma < \alpha$ (otherwise we can replace $S$ by $\overline{S}$). + +We first construct a space $Y_0 \in \mathcal{X}$ that has subsets $U_1, \dots, U_n$ with the following prop- +erties: + +(a) There is no measure $\mu \in \Delta_{Y_0}$ such that $\mu(U_i) > \alpha$, for all $i = 1, \dots, n$. + +(b) For each *i*, there is some measure $\mu'_i \in \Delta_{Y_0}$ such that $\mu'_i(U_i) = 1$ and $\mu'_i(U_j) > \gamma$ for all $j \neq i$. + +We proceed as follows. Choose $n$ and $d$ such that $\gamma < (d-1)/(n-1) < d/n < \alpha$. By assumption, there exists a $Y_0 \in \mathcal{X}$ such that $|Y_0| = n!/(n-d)!$. Without loss of generality, we can assume that $Y_0$ consists of all tuples of the form $(a_1, \dots, a_d)$, where the $a_i$'s are all distinct, and between 1 and $n$. Let $U_i$ be consist of all the tuples in $Y_0$ that have $i$ somewhere in the subscript; it is easy to see that there are $d(n-1)!/(n-d)!$ such tuples. Suppose that $\mu$ is a probability measure in $\Delta_{Y_0}$. It is easy to see that $\mu(U_1) + \cdots + \mu(U_n) = d$, since each tuple in $Y_0$ is in exactly $d$ of the $U_i$'s and so gets counted exactly $d$ times, and the sum of the probabilities of the tuples is 1. Thus, we cannot have $\mu(U_i) > d/n$ for all $i$ (and, *a fortiori*, we cannot have $\mu(U_i) > \alpha$ for all $i$). This takes care of the first requirement. Next, consider a probability distribution $\mu'_i$ that makes all the tuples making up $U_i$ equally probable, and gives all the other tuples probability 0. Then it is easy to see that $\mu'_i(U_i) = 1$. Moreover, since it is straightforward to check that there are exactly $d(d-1)(n-2)!/(n-d)!$ tuples in $U_i \cap U_j$ for $j \neq i$, we have $\mu'_i(U_j) = [d(d-1)(n-2)!/(n-d)]/[d(n-1)/(n-d)] = (d-1)/(n-1)$. This takes care of the second requirement. + +By assumption, there is also a measurable space $Y \in \mathcal{X}$ such that $|Y| = 2$. Suppose +that $Y = \{y, y'\}$. Let $Z = X^n \times Y_0 \times Y^n$, where the $n$ is the same as the $n$ chosen in the +construction of $Y_0$. Again, by assumption, $Z \in \mathcal{X}$. For $i = 1, \dots, n,$ +---PAGE_BREAK--- + +* if $A \subseteq X$, let $A_i = X^{i-1} \times A \times X^{n-i} \times Y_0 \times Y^n \subseteq Z$. + +* let $KB_i = \{\mu \in \Delta_Z : \mu_{X_i} \in KB\}$; + +• let $Y_i$ be the subset of $Y^n$ where the $i$th copy of $Y$ is replaced by $\{y\}$; + +* let $V_i$ be the subset of $Z$ of the form $X^n \times U_i \times Y_i$ (where $U_1, \dots, U_n$ are the subsets of $Y_i$ constructed above). + +Let $\sigma$ be the following constraint on $\Delta_Z$: + +$$ +KB_1 \wedge \dots \wedge KB_n \wedge \Pr(S_1 \Leftrightarrow V_1) = 1 \wedge \dots \wedge \Pr(S_n \Leftrightarrow V_n) = 1. +$$ + +Let $X_i$ denote the $i$th copy of $X$ in $Z$. That is, for ease of exposition, we view $Z$ as being of the form $X_1 \times \cdots \times X_n \times Y_0 \times Y^m$, although all the $X_i$'s are identical, since it is helpful to be able to refer to a specific $X_i$. We claim that $\sigma$ is $X_i$-conservative over $KB$, for $i=1,\ldots,n$. Thus, we must show that $\text{proj}_{X_i}([[KB_i \wedge \sigma]]_Z) = [[KB]]_X$. It is immediate that $\text{proj}_{X_i}([[KB_i \wedge \sigma]]_Z) \subseteq [[KB]]_X$. For the opposite inclusion, suppose that $\nu \in [[KB]]_X$. We must show that there exists some $\mu \in [[KB_i \wedge \sigma]]_Z$ such that $\mu_{X_i} = \nu$. We proceed as follows. + +Let $\mu'_0$ be a measure in $\Delta_{Y_0}$ such that $\mu'_0(U_i) = 1$ and $\mu'_0(U_j) > \gamma$, for $j \neq i$. By construction of the $U_j$'s, such a measure must exist. For $j \in \{1, \dots, n\}$, let $\mu'_j$ be the measure in $\Delta_Y$ such that $\mu'_i(y) = \nu(S)$ and if $j \neq i$, then $\mu'_j(y) = \gamma/\mu'_0(U_j)$ (and $\mu'_j(y') = 1 - \mu'_j(y)$). Let $\mu'$ be the measure on $Y_0 \times Y^n$ that is the “crossproduct” of $\mu'_0, \dots, \mu'_n$. That is, $\mu'(T_0 \times \dots \times T_n) = \mu'_0(T_0) \times \dots \times \mu'_n(T_n)$. By construction, $\mu'(V_j) = \gamma$ for $j \neq i$ and $\mu'(V_i) = \nu(S)$. + +By assumption, there is a measure $\nu_0 \in \Delta_X$ such that $\nu_0 \models KB \wedge \Pr(S) = \gamma$. We now proceed inductively to define a measure $\mu^k \in \Delta_{X^k \times Y_0 \times Y^n}$ such that (a) $\Pr((S_1 \Leftrightarrow V_1) \cap \dots \cap (S_k \Leftrightarrow V_k)) = 1$, (b) $\mu_Y^j = \mu'$ and $\mu_{X_j}^j = \nu$ for $j = 1, \dots, k$. We define $\mu^0 = \mu'$. For the inductive step, we simply apply Lemma A.1. Finally, we take $\mu$ to be $\mu^n$. Our construction guarantees that $\mu_{X_j} = \nu$, hence that $\mu \models KB_j$. In addition, the construction guarantees that $\mu \models \Pr(S_1 \Leftrightarrow V_1) = 1 \wedge \dots \wedge \Pr(S_n \Leftrightarrow V_n) = 1$. Hence $\mu = \sigma$, as desired. + +It follows from DI1, DI2, and DI3 that $\sigma$ is in the domain of $I_Z$. Since $KB_i \wedge \sigma$ is equivalent to $\sigma$, it follows that $KB_i \wedge \sigma$ is also in the domain of $I_Z$. Now, by robustness, for any constraint $\varphi$ on $\Delta_{X_i}$, we have $KB_i \wedge \sigma \nvDash_I \varphi$ iff $KB_i \vdash_I \varphi$. Since $KB_i \vdash_I \Pr(S_i) > \alpha$ and $KB_i \wedge \sigma$ is equivalent to $\sigma$, it follows that $\sigma \vdash_I \Pr(S_i) > \alpha$ for $i=1,\dots,n$. By the And rule (Proposition 2.3), it follows that $\sigma \vdash_I \Pr(S_1) > \alpha \wedge \dots \wedge \Pr(S_n) > \alpha$. Since $\sigma \models (\Pr((S_1 \Leftrightarrow V_1) \cap (S_n \Leftrightarrow V_n)) = 1$, it easily follows that $\sigma \vdash_I \Pr(U_1) > \alpha \wedge \dots \wedge \Pr(U_n) > \alpha$. But our construction guarantees that $\Pr(U_1) > \alpha \wedge\dots\wedge\Pr(U_n) > \alpha$ is inconsistent. Thus, $\sigma \vdash_I false$. By robustness, it follows that $KB_i \vdash_I false$. But this can happen only if $KB \models false$, which implies that $KB \models \alpha \leq \Pr(S) \leq \beta$, contradicting our original assumption. $\blacksquare$ + +**A.3 Proofs for Section 4** + +To prove Lemma 4.6, it is useful to first prove two additional results: + +Lemma A.2: If f is an X-Y embedding, then f(X) = Y and f(Ø) = Ø. +---PAGE_BREAK--- + +**Proof:** Suppose that $f$ is an $X-Y$ embedding. We first show that $f(\emptyset) = \emptyset$. From the definition of embedding, it follows that $f(\emptyset) = f(X \cap \emptyset) = f(X) \cap f(\emptyset)$. Thus, $f(\emptyset) \subseteq f(X)$. But the definition of embedding also implies that $f(\emptyset) = f(\overline{X}) = \overline{f(X)}$. Thus, we have $\overline{f(X)} \subseteq f(X)$. This can happen only if $f(X) = Y$ and $f(\emptyset) = \overline{f(X)} = \emptyset$. $\blacksquare$ + +**Lemma A.3:** If $f$ is a faithful $X-Y$ embedding, then + +(a) for any $\mu \in \Delta_X$, there is a measure $\nu \in \Delta_Y$ such that $\nu$ corresponds to $\mu$; + +(b) for any $\nu \in \Delta_Y$, there is a measure $\mu \in \Delta_X$ such that $\mu$ corresponds to $\nu$. + +**Proof:** To prove (a), consider the algebra of subsets of $Y$ the form $f(S)$, for $S \in \mathcal{F}_X$. Define a function $\nu'$ on the algebra via $\nu'(f(S)) = \mu(S)$. This mapping is well defined, for if $f(S) = f(T)$, then faithfulness guarantees that $S = T$. Moreover, $\nu'$ is a probability measure on the algebra. To see this, note by Lemma A.2 that $\nu'(Y) = \nu'(f(X)) = \mu(X) = 1$. Moreover, if $f(S) \cap f(T) = \emptyset$, then (by definition of embedding) $f(S \cap T) = \emptyset$ and so, since $f$ is faithful, $S \cap T = \emptyset$ (for otherwise $f(S \cap T) = f(\emptyset)$ by Lemma A.2, but $S \cap T \neq \emptyset$). Thus, + +$$\nu'(f(S) \cup f(T)) = \nu'(f(S \cup T)) = \mu(S \cup T) = \mu(S) + \mu(T) = \nu'(f(S)) + \nu'(f(T)).$$ + +As shown by Horn and Tarski (1948), it is possible to extend $\nu'$ to a probability measure $\nu$ on $\mathcal{F}_Y$.¹¹ By construction, we have that $\nu$ corresponds to $\mu$. + +To prove (b), we use a very similar process. Define a function $\mu$ on the algebra of sets $S \subseteq X$ via $\mu(S) = \nu(f(S))$. It is easy to see that $\mu$ is already a probability measure in $\Delta_X$, which by construction corresponds to $\nu$. $\blacksquare$ + +We can now prove Lemma 4.6. + +**Lemma 4.6:** An $X-Y$ embedding $f$ is faithful if and only if for all constraints KB and $\theta$, we have $KB \models \theta$ iff $f^*(KB) \models f^*(\theta)$. + +**Proof:** Suppose that $f$ is faithful. To show that $KB \models \theta$ iff $f^*(KB) \models f^*(\theta)$, we must show that $[[KB]]_X \subseteq [[\theta]]_X$ iff $[[f^*(KB)]_Y \subseteq [[f^*(\theta)]_Y]$. The “only if” direction is immediate from the definition of $f^*$. To prove the “if” direction, suppose not. Then there must exist some $\mu \in [[KB]]_X - [[\theta]]_X$ such that $f^*(\mu) \subseteq [[f^*(\theta)]_Y]$. Let $\nu$ be some probability measure that corresponds to $\mu$. Since $\nu \in f^*(\mu) \subseteq f^*(\theta)$, there must be some $\mu' \in [\theta]_X$ such that $\nu \in f^*(\mu')$. Since $\mu' \neq \mu$, there must be some $S \in \mathcal{F}_X$ such that $\mu'(S) \neq \mu(S)$. Since $\nu \in f^*(\mu) \cap f^*(\mu')$, we must have both $\nu(f(S)) = \mu(S)$ and $\nu(f(S)) = \mu'(S)$. But this is a contradiction. This completes the proof of the “if” direction. + +For the converse, suppose we have $KB \models \theta$ iff $f^*(KB) \models f^*(\theta)$ for all KB and $\theta$. Given $S, T \in \mathcal{F}_X$, we have the following chain of equivalences: + +11. It is critical for this result that we are working with finitely additive measures. There may not be a countably additive measure $\nu$ extending $\nu'$, even if $\nu'$ is countably additive. For example, take $\mathcal{F}_Y$ to be the Borel sets on $[0, 1]$ and take $\mathcal{F}_Y$ to be all subsets of $[0, 1]$. Let $\nu'$ be Lebesgue measure. It is known that, under the continuum hypothesis, there is no countably additive measure extending $\nu'$ defined on all subsets of $[0, 1]$ (Ulam, 1930) (see (Keisler & Tarski, 1964) for further discussion). +---PAGE_BREAK--- + +$$S \subseteq T$$ + +iff $\Pr(S) = 1 \models \Pr(T) = 1$ + +iff $f^*(\Pr(S) = 1) \models f^*(\Pr(T) = 1)$ (by assumption) + +iff $\Pr(f(S)) = 1 \models \Pr(f(T)) = 1$ (by definition of $f^*$) + +iff $f(S) \subseteq f(T)$. + +Thus, $f$ is faithful. ■ + +**Proposition 4.7:** Let $f$ be a faithful $X-Y$ embedding. Then the following statements are equivalent: + +(a) $\mu$ and $\nu$ correspond under $f$; + +(b) for all formulas $\theta$, $\mu \models \theta$ iff $\nu \models f^*(\theta)$. + +**Proof:** We first show that (a) implies (b). So suppose that $\mu$ and $\nu$ correspond under $f$. The only if direction of (b) is trivial: If $\mu \models \theta$ then $\nu \in f^*(\mu) \subseteq f^*(\theta)$, since $f$ is faithful. For the if direction, we proceed much as in the proof of Lemma 4.6. Assume that $\nu \models f^*(\theta)$ but that $\mu \not\models \theta$. Since $\nu \in f^*(\theta)$, by definition of $f^*$ there must be some $\mu' \in [\theta]_X$ such that $\nu \in f^*(\mu')$. Since $\mu' \models \theta$ whereas $\mu \not\models \theta$, we must have $\mu \neq \mu'$. Hence, there must be some $S \in \mathcal{F}_X$ such that $\mu(S) \neq \mu'(S)$. Since $\nu \in f^*(\mu) \cap f^*(\mu')$, it follows that $\nu(f(S)) = \mu(S)$ and that $\nu(f(S)) = \mu'(S)$, which gives the desired contradiction. + +We now show that (b) implies (a). Assume by contradiction that $\mu$ and $\nu$ do not correspond under $f$. Then there must be some event $S \in \mathcal{F}_X$ such that $\mu(S) \neq \nu(f(S))$. Let $p = \mu(S)$ and let $\theta$ be the constraint $\Pr(S) = p$. Then $\mu \models \theta$, whereas $\nu \not\models f^*(\theta)$, providing the desired contradiction. ■ + +**Theorem 4.10:** If an $\mathcal{X}$-inference procedure is robust that satisfies DI2, DI4, and DI5, then it is representation independent. + +**Proof:** Suppose that {$I_X : X \in \mathcal{X}$} is a robust $\mathcal{X}$-inference procedure. We want to show that it is representation independent. So suppose $KB, \varphi$ are constraints on $\Delta_X$ and $f$ is an $X-Y$ embedding, for some $X, Y \in \mathcal{X}$. We want to show that $KB \sim_{I_X} \varphi$ iff $f^*(KB) \sim_{I_Y} f^*(\varphi)$. Let $\psi$ be the following constraint on $\Delta_{X\times Y}$: + +$$ (\varphi \Leftrightarrow f^*(\varphi)) \wedge (KB \Leftrightarrow f^*(KB)). $$ + +We claim that $\psi$ is $X$-conservative over $KB$ and $Y$-conservative over $f^*(KB)$. Thus, we must show that $\text{proj}_X([\text{KB} \wedge \psi]_{X\times Y}) = [\text{KB}]_X$ and $\text{proj}_Y([\text{f}^*(\text{KB}) \wedge \psi]_{X\times Y}) = [\text{f}^*(\text{KB})]_Y$. We show that $\text{proj}_X([\text{KB} \wedge \psi]_{X\times Y}) = [\text{KB}]_X$ here; the argument that $\text{proj}_Y([\text{f}^*(\text{KB}) \wedge \psi]_{X\times Y}) = [\text{f}^*(\text{KB})]_Y$ is almost identical. + +Clearly if $\mu \in [\text{KB} \wedge \psi]_{X\times Y}$ then $\mu_X \in [\text{KB}]_X$, so $\text{proj}_X([\text{KB} \wedge \psi]_{X\times Y}) \subseteq [\text{KB}]_X$. For the opposite inclusion, suppose that $\nu \in [\text{KB}]_X$. We want to find a measure $\nu' \in [\text{KB} \wedge \psi]_{X\times Y}$ such that $\nu'_X = \nu$. Let $\nu''$ be any measure in $f^*(\mu)$ and let $\nu' \in \Delta_{X\times Y}$ be the “crossproduct” of $\nu$ and $\nu''$; that is, $\nu'(A \times B) = \nu(A)\nu''(B)$. Clearly $\nu'_X = \nu$. To see that $\nu' \in [\text{KB} \wedge \psi]_{X\times Y}$, it clearly suffices to show that $\nu' \models \psi$. But since $\nu$ and $\nu''$ correspond under $f$, it is immediate from Proposition 4.7 that $\nu \models KB$ iff $\nu'' \models f^*(KB)$ and $\nu \models \varphi$ iff $\nu'' \models f^*(\varphi)$. Thus, $\nu \models \psi$, as desired. +---PAGE_BREAK--- + +Now suppose that $KB \vdash_{I_X} \varphi$. By DI2 and DI5, $KB \wedge \psi$ is in the domain of $I_{X\times Y}$. By robustness, $KB \wedge \psi \vdash_{I_{X\times Y}} \varphi$. Thus, $I([KB \wedge \psi]_{X\times Y}) \subseteq [[\varphi]_{X\times Y}}$. Since $I([KB \wedge \psi]_{X\times Y}) \subseteq [[KB \wedge \psi]]_{X\times Y} \subseteq [[\varphi \Leftrightarrow f^*(\varphi)]]_{X\times Y}$, it follows that $I([KB \wedge \psi]_{X\times Y}) \subseteq [[f^*(\varphi)]]_{X\times Y}$. Moreover, $KB \wedge \psi$ is equivalent to $f^*(KB) \wedge \psi$, so $I([f^*(KB) \wedge \psi]_{X\times Y}) \subseteq [[f^*(\varphi)]]_{X\times Y}$, i.e., $f^*(KB) \wedge \psi \vdash_{I_{X\times Y}} f^*(\varphi)$. By DI4, $f^*(KB)$ is in the domain of $I_Y$. Since $\psi$ is $Y$-conservative over $f^*(KB)$, the robustness of $\{I_X : X \in \mathcal{X}\}$ implies that $f^*(KB) \vdash_{I_Y} f^*(\varphi)$. The opposite implication (if $f^*(KB) \vdash_{I_Y} f^*(\varphi)$ then $KB \vdash_{I_X} \varphi$) goes the same way. Thus, $\{I_X : X \in \mathcal{X}\}$ is representation independent. ■ + +Next, we turn our attention to Theorems 4.11 and 4.16. Both of these results follow in a relatively straightforward way from one key proposition. Before we state it, we need some definitions. + +**Definition A.4:** We say that a constraint $KB$ on $\Delta_X$ depends only on $S_1, \dots, S_k \in \mathcal{F}_X$ (the sets $S_1, \dots, S_k$ are not necessarily disjoint) if, whenever $\mu, \mu' \in \Delta_X$ agree on $S_1, \dots, S_k$, then $\mu \models KB$ iff $\mu' \models KB$. ■ + +For example, if $KB$ has the form $\Pr(S_1) > 1/3 \wedge \Pr(S_2) \le 3/4$, then $KB$ depends only on $S_1$ and $S_2$. Similarly, if $KB$ has the form $\Pr(S_1 | S_2) > 3/4$, then $KB$ depends only on $S_1$ and $S_2$. + +**Definition A.5:** Given $S_1, \dots, S_k \in \mathcal{F}_X$, an atom over $S_1, \dots, S_k$ is a set of the form $T_1 \cap \dots \cap T_k$, where $T_i$ is either $S_i$ or $\bar{S}_i$. ■ + +**Proposition A.6:** Suppose that $\{I_X : X \in \mathcal{X}\}$ is an $\mathcal{X}$-inference procedure and, for some $X \in \mathcal{X}$, there exist $S, S_1, \dots, S_K \in \mathcal{F}_X$ and a consistent constraint $KB$ on $\Delta_X$ that depends only on $S_1, \dots, S_k$, such that the following two conditions are satisfied: + +* both $T \cap S$ and $T \cap \bar{S}$ are nonempty for every nonempty atom $T$ over $S_1, \dots, S_k$, + +* $KB \vdash_{I_X} \alpha < \Pr(S) < \beta$, where either $\alpha > 0$ or $\beta < 1$. + +Then $\{I_X : X \in \mathcal{X}\}$ is not representation independent. + +**Proof:** Suppose, by way of contradiction, that $\{I_X : X \in \mathcal{X}\}$ is a representation-independent inference procedure but nevertheless, for some $X \in \mathcal{X}$, there exists sets $S, S_1, \dots, S_k \in \mathcal{F}_X$ and a knowledge base $KB$ that satisfies the conditions above, for some $\alpha, \beta$. Assume that $\alpha > 0$ (a similar argument can be used to deal with the case that $\beta < 1$). + +Let $T_1, \dots, T_M$ be the nonempty atoms over $S_1, \dots, S_k$. Choose $N$ such that $1/N < \alpha$. Our goal is to find a collection $f_1, \dots, f_N$ of embeddings of $X$ into some $Y \in \mathcal{Y}$ such that each of these embeddings has the same effect on $KB$, but such that the sets $f_j(S)$ are disjoint. Since $KB \vdash_{I_X} \Pr(f_j(S)) > \alpha$ for $j = 1, \dots, N$, and $f_j^*(KB) = f^*(KB)$ for $j = 1, \dots, N$, it will follow that $f^*(KB) \vdash_{I_Y} \Pr(f_j(S)) > \alpha$ for $j = 1, \dots, N$, a contradiction. We proceed as follows. + +By assumption, there exists a set $Z$ in $\mathcal{X}$ such that $|Z| = MN$. Let $Y = X \times Z$. Since $\mathcal{X}$ is closed under crossproducts, $Y \in \mathcal{X}$. Suppose that $Z = \{z_1, \dots, z_{MN}\}$, and let $Z_i = \{z_{N(i-1)+1}, \dots, z_{Ni}\}$, for $i=1,\dots,M$. Thus, the $Z_i$s partition $Z$ into $M$ disjoint sets, each of cardinality $N$. Let $B_i = X \times Z_i$, and let $B_{ij} = X \times \{z_{N(i-1)+j}\}$, for $j=1,\dots,N$. It is easy to see that we can find faithful X-Y embeddings $f_1, \dots, f_N$ such that +---PAGE_BREAK--- + +1. $f_j(T_i) = B_i$, for $i=1, \dots, M$, $j=1, \dots, N$, + +2. $f_j(T_i \cap S) = B_{ij}$, for $i=1, \dots, M$, $j=1, \dots, N$. + +Notice that we need the assumption that both $T_i \cap S$ and $T_i \cap \bar{S}$ are nonempty for $T_1, \dots, T_M$ (that is, for each nonempty atom over $S_1, \dots, S_k$) to guarantee that we can find such faithful embeddings. For if $T_i \cap S = \emptyset$, then since $f_j$ is an embedding, $f(T_i \cap S) = \emptyset \neq B_i$; and if $T_i \cap \bar{S} = \emptyset$, then $f_j(T_i \cap \bar{S}) = f_j(T_i) - f(T_i \cap S) = \emptyset$, which means that $B_i = B_{ij}$, again inconsistent with the construction. + +It is easy to check that, since $\mathbf{KB}$ depends only on $S_1, \dots, S_k$, $f_j^*(\mathbf{KB})$ depends only on $f_j(S_1), \dots, f_j(S_k)$, for $j=1, \dots, N$. We next show that $f_j(S_i)$ is independent of $j$; that is, $f_j(S_i) = f_j'(S_i)$ for $1 \le j, j' \le N$. Notice that for $h=1, \dots, k$, we have that $f_j(S_h) = \cup_{T_i \subseteq S_h} f_j(T_i) = \cup_{\{i:T_i \subseteq S_h\}} B_i$. Thus, $f_j(S_h)$ is independent of $j$, as desired. Since $f_j^*(\mathbf{KB})$ depends only on $f_j(S_1), \dots, f_j(S_k)$, it too must be independent of $j$. Let $\mathbf{KB}^*$ be $f_1^*(\mathbf{KB})$ (which, as we have just observed, is identical to $f_2^*(\mathbf{KB}), \dots, f_k^*(\mathbf{KB}))$. + +Since, by assumption, $\{I_X : X \in \mathcal{X}\}$ is representation independent, and $\mathbf{KB} \vdash_{I_X} \mathrm{Pr}(S) > \alpha$, we have that $\mathbf{KB}^* \vdash_{I_Y} \mathrm{Pr}(f_j(S)) > \alpha$, for $j=1, \dots, N$. Thus, $\mathbf{KB}^* \vdash_{I_Y} \mathrm{Pr}(f_1(S)) > \alpha \wedge \dots \wedge \mathrm{Pr}(f_N(S)) > \alpha$. But note that, by construction, $f_j(S) = \cup_{\{i:T_i \cap S \neq \emptyset\}} B_{ij}$. Thus, the sets $f_j(S)$ are pairwise disjoint. Since $\alpha > 1/N$, we cannot have $N$ disjoint sets each with probability greater than $\alpha$. Thus, $\mathbf{KB}^* \vdash_{I_Y} \mathrm{false}$. But $\mathbf{KB}$ is consistent, so $\mathbf{KB}^* = f_j(\mathbf{KB})$ must be as well. Thus, $I_Y(\mathbf{KB}^*) \neq \emptyset$, by assumption. But this contradicts our conclusion that $\mathbf{KB}^* \vdash_{I_Y} \mathrm{false}$. Thus, $\{I_X : X \in \mathcal{X}\}$ cannot be representation independent. $\blacksquare$ + +We can use Proposition A.6 to help prove Theorem 4.11. + +**Theorem 4.11:** If $\{I_X : X \in \mathcal{X}\}$ is a representation-independent $\mathcal{X}$-inference procedure then, for all $X \in \mathcal{X}$, $I_X$ is essentially entailment for all objective knowledge bases in its domain. + +**Proof:** Suppose, by way of contradiction, that $\{I_X : X \in \mathcal{X}\}$ is representation independent but $I_X$ is not essentially entailment for some $X \in \mathcal{X}$ and objective knowledge base $\mathbf{KB}$. Then there must be some set $S \in \mathcal{F}_X$ such that $\mathbf{KB} \vdash_{I_X} \alpha < \mathrm{Pr}(S) < \beta$ and $\mathbf{KB} \not\vdash \alpha \leq \mathrm{Pr}(S) \leq \beta$. Without loss of generality, we can assume that $\mathbf{KB}$ has the form $\mathrm{Pr}(T) = 1$ for some $T \in \mathcal{F}_X$. Moreover, we can assume that if $\bar{T} \neq \emptyset$, then $\bar{T}$ has a nonempty, measurable strict subset. (For otherwise, choose $Y = \{y, y'\} \in \mathcal{X}$ and consider the space $X' = X \times Y$. By assumption, $X' \in \mathcal{X}$. Let $f$ be the X-Y embedding that maps $U \in \mathcal{F}_X$ to $U \times Y$. Since $I$ is representation independent, we have that $\mathrm{Pr}(T \times Y) = 1 \vdash_I \alpha < \mathrm{Pr}(S \times Y) < \beta$, and $\bar{T} \times \{y\} \subset \bar{T} \times Y$.) + +If $\bar{T}$ is nonempty, let $Z$ be any nonempty, measurable strict subset of $\bar{T}$ (which exists by assumption); otherwise let $Z$ be the empty set. Let $U$ be the set $(T \cap S) \cup (\bar{T} \cap Z)$. Notice that $S \cap T = U \cap T$. Moreover, since, for any set $V$, $\mathrm{Pr}(T) = 1 \Rightarrow \mathrm{Pr}(V) = \mathrm{Pr}(V \cap T)$ is valid, it follows from Reflexivity and Right Weakening that $\mathbf{KB} \vdash_{I_X} \mathrm{Pr}(V) = \mathrm{Pr}(V \cap T)$. Thus, $\mathbf{KB} \vdash_{I_X} \mathrm{Pr}(S) = \mathrm{Pr}(S \cap T) = \mathrm{Pr}(U \cap T) = \mathrm{Pr}(U)$. It follows that $\mathbf{KB} \vdash_{I_X} \alpha < \mathrm{Pr}(U) < \beta$. + +We now want to apply Proposition A.6. Note that $\mathbf{KB}$ depends only on $T$. Thus, we must show that $T \cap U$ and $T \cap \bar{U}$ are nonempty, and if $\bar{T}$ is nonempty, then $\bar{T} \cap U$ and $\bar{T} \cap \bar{U}$ are as well. As we observed above, $T \cap U = T \cap S$. Thus, if $T \cap U = \emptyset$, then $T \subseteq S$, contradicting our assumption that $\mathbf{KB} \vdash_I \mathrm{Pr}(S) > 0$. It is easy to see that $T \cap \bar{U} = T \cap S$. Again, we cannot have $T \cap \bar{U} = \emptyset$, for then $T \subseteq S$, contradicting our assumption that +---PAGE_BREAK--- + +$KB \vDash_I \Pr(S) < 1$. By construction, $\bar{T} \cap U = \bar{T} \cap Z = Z$. By assumption, if $\bar{T} \neq \emptyset$, then $Z \neq \emptyset$. Finally, $\bar{T} \cap \bar{U} = \bar{T} \cap \bar{Z}$; again, by construction, this is a nonempty set if $\bar{T} \neq \emptyset$. It now follows from Proposition A.6 that $\{I_X : X \in \mathcal{X}\}$ is not representation independent. $\blacksquare$ + +**Corollary 4.12:** If $\{I_X : X \in \mathcal{X}\}$ is a representation-independent $\mathcal{X}$-inference procedure, then for all $X \in \mathcal{X}$, if $KB$ is an objective knowledge base putting constraints on $\Delta_X$, and $KB \vDash_{I_X} \alpha < \Pr(S) < \beta$ for some $\alpha \ge 0$ and $\beta \le 1$, then $\alpha = 0$ and $\beta = 1$. + +**Proof:** Assume the hypotheses of the corollary hold. Since $KB$ is objective, it is of the form $\Pr(T) = 1$ for some $T \in \mathcal{F}_X$. Then there are three possibilities. Either (1) $T \subseteq S$, (2) $T \subseteq \bar{S}$, or (3) both $T \cap S$ and $T \cap \bar{S}$ are nonempty. If (1) holds, we have $KB \models \Pr(S) = 1$, while if (2) holds, we have $KB \models \Pr(S) = 0$. Thus, both (1) and (2) are incompatible with $KB \vDash_{I_X} \alpha < \Pr(S) < \beta$. On the other hand, if (3) holds, it is easy to see that for all $\gamma$, $\Pr(S) = \gamma$ is consistent with $KB$ (since there is a probability measure that assigns probability $\gamma$ to $T \cap S$ and probability $1 - \gamma$ to $T \cap \bar{S}$). Since $KB \vDash_{I_X} \alpha < \Pr(S) < \beta$, by Theorem 4.11, we must have $KB \models \alpha \le \Pr(S) \le \beta$. It follows that the only choices of $\alpha$ and $\beta$ for which this can be true are $\alpha = 0$ and $\beta = 1$. $\blacksquare$ + +**Theorem 4.16:** If $\{I_X : X \in \mathcal{X}\}$ is an $\mathcal{X}$-inference procedure that enforces minimal default independence and satisfies DI1, then $I_X$ is not representation independent. + +**Proof:** Suppose that $\{I_X : X \in \mathcal{X}\}$ is an $\mathcal{X}$-inference procedure that enforces minimal default independence and satisfies DI1. Choose $X = \{x, x'\} \in \mathcal{X}$ and let $KB$ be $1/3 \le \Pr(x) \le 2/3$. By assumption, $X \times X \in \mathcal{X}$. We can view $KB$ as a constraint on $\Delta_{X\times X}$; in this case, it should be interpreted as $1/3 \le \Pr(\{x\} \times X) \le 2/3$. by DI1, $KB$ is the domain of $I_{X\times X}$. Note that $KB$ is equivalent to the constraint $1/3 \le \Pr(x') \le 2/3$. By minimal default independence, we have that $KB \vDash_{I_{X\times X}} \Pr((x,x)) > \Pr(x\times X)/3$ and that $KB \vDash_{I_{X\times X}} \Pr((x',x')) > \Pr(x'\times X)/3$. Applying straightforward probabilistic reasoning, we get that $KB \vDash_{I_{X\times X}} \Pr(\{(x,x), (x',x')\}) > 1/3$. We now apply Proposition A.6, taking $S$ to be $\{(x,x), (x',x')\}$ and $S'$ to be $\{(x,x), (x,x')\}$. Note that $KB$ depends only on $S'$. It is almost immediate from the definition of $S$ and $S'$ that all of $S \cap S', \bar{S} \cap S', S \cap \bar{S}',$ and $\bar{S} \cap \bar{S}'$ are nonempty. Thus, by Proposition A.6, $\{I_X : X \in \mathcal{X}\}$ is not representation independent. $\blacksquare$ + +**Lemma 4.13:** Let $\mathcal{X}$ consist of only countable sets. Then $\{I_X^0 : X \in \mathcal{X}\}$ is a representation-independent $\mathcal{X}$-inference procedure. + +**Proof:** As we said in the main part of the text, it easily follows from Proposition 2.5 that $I_X^0$ is an inference procedure for all $X \in \mathcal{X}$, since it is easily seen to have the five properties described in the proposition. To see that $I^0$ is representation independent, suppose that $f$ is a faithful X-Y embedding, for $X,Y \in \mathcal{X}$. Clearly $KB$ is objective if and only if $f^*(KB)$ is objective. If $KB$ is not objective, then it is easy to see that $KB \vDash_{I^0} \theta$ iff $f^*(KB) \vDash_{I^0} f^*(\theta)$, since $\vDash_{I^0}$ reduces to entailment in this case. So suppose that $KB$ is objective and has the form $\Pr(T) = 1$, for some $T \in \mathcal{F}_X$. Then $KB \vDash_{I^0} \theta$ iff $KB \wedge KB^+ \models \theta$. By Lemma 4.6, this holds iff $f^*(KB) \wedge f^*(KB^+) \models f^*(\theta)$. On the other hand, $f^*(KB) \vDash_{I^0} f^*(\theta)$ iff $f^*(KB) \wedge (f^*(KB))^+ \models f^*(\theta)$ Thus, it suffices to show that $f^*(KB) \wedge f^*(KB^+) \models f^*(\theta)$ iff $f^*(KB) \wedge (f^*(KB))^+ \models f^*(\theta)$. It is easy to show that $(f^*(KB))^+$ implies $f^*(KB^+)$, so that if $f^*(KB) \wedge f^*(KB^+) \models f^*(\theta)$ then $f^*(KB) \wedge (f^*(KB))^+ \models f^*(\theta)$. It is not necessarily +---PAGE_BREAK--- + +the case that $f^*(KB^+)$ implies $(f^*(KB))^+$. For example, consider the embedding described in Example 4.3. In that case, if $KB$ is the objective knowledge base $\Pr(\text{colorful}) = 1$, $KB^+$ is empty, and hence so is $f^*(KB^+)$, while $(f^*(KB))^+$ includes constraints such as $0 < \Pr(\text{green}) < 1$. Nevertheless, suppose that $f^*(KB) \wedge (f^*(KB))^+ \models f^*(\theta)$ and, by way of contradiction, there is some $\nu$ such that $\nu \models f^*(KB) \wedge f^*(KB^+) \wedge \neg f^*(\theta)$. Choose $\mu$ such that $\nu \in f^*(\mu)$. Then $\mu$ and $\nu$ correspond, so $\mu \models KB \wedge KB^+ \wedge \neg\theta$. It is easy to show that there exists $\nu' \in f^*(\mu)$ such that $0 < \nu'(S) < 1$ for all nonempty subsets of $S$ of $f(T)$. To see this, note that if $\mu(x) \neq 0$, then it suffices to ensure that $\nu'(f(x)) = \mu(x)$ and $\nu'(y) \neq 0$ for all $y$ in $f(x)$. Since $Y$ is countable, this is straightforward. Since $\mu$ and $\nu'$ correspond, we must have that $\nu' \models \neg f^*(\theta) \wedge f^*(KB)$. By construction, $\nu' \models (f^*(KB))^+$. This contradicts the assumption that $f^*(KB) \wedge (f^*(KB))^+ \models f^*(\theta)$. ■ + +**Lemma 4.14:** Suppose that $\mathcal{X}$ consists only of measure spaces of the form $(X, 2^\mathcal{X})$, where $X$ is finite. Then $\{I_X^1 : X \in \mathcal{X}\}$ is a representation-independent $\mathcal{X}$-inference procedure. + +**Proof:** Suppose that $X, Y \in \mathcal{X}$, $KB$ and $\varphi$ are constraints on $\Delta_X$, and $f$ is an $X-Y$ embedding. We must show that $KB \Vdash_{I_X^1} \varphi$ iff $f^*(KB) \Vdash_{I_Y^1} f^*(\varphi)$. For the purposes of this proof, we say that a subset $A$ of $\Delta_X$ is interesting if there exists some $S \in \mathcal{F}_X$ such that $A = \{\mu \in \Delta_X : \mu(S) \ge 1/4\}$. It is easy to see that if $KB$ is interesting then $f^*(KB)$ is interesting. The converse is also true, given our assumption that $\mathcal{X}$ consists of only finite spaces where all sets are measurable. For suppose that $f^*(KB)$ is interesting. Then there is a set $T \subseteq Y$ such that $f^*(KB) = \{\nu \in \Delta_Y : \nu(T) \ge 1/4\}$. Let $\mathcal{A} = \{S' \subseteq X : f(S') \supseteq T\}$. Since $X$ is finite, so is $\mathcal{A}$; it easily follows that $S = \cap \mathcal{A} \in \mathcal{A}$.¹² Clearly if $\mu(S) \ge 1/4$, then $f^*(\mu) \subseteq f^*(KB)$, so $\mu \in [[KB]]_X$. Thus, [[KB]]_X ⊇ $\{\mu \in \Delta_X : \mu(S) \ge 1/4\}$. On the other hand, if $\mu \in KB$, then $f^*(\mu) \subseteq f^*(KB)$. Thus, if $\nu \in f^*(\mu)$, since $S \in \mathcal{A}$, it must be the case that $\mu(S) = \nu(f(S)) \ge \nu(T) \ge 1/4$. Thus, [[KB]]_X ⊆ $\{\mu \in \Delta_X : \mu(S) \ge 1/4\}$. It follows that $KB$ is equivalent to $\Pr(S) \ge 1/4$, and so must be interesting. (We must also have $T = f(S)$, although this is not needed for the result.) + +If $KB$ is not interesting, then $KB \Vdash_{I_X^1} \varphi$ iff $KB \models \varphi$ iff $f^*(KB) \models f^*(\varphi)$ (since entailment is representation independent) iff $f^*(KB) \Vdash_{I_Y^1} \varphi$. On the other hand, if $KB$ is interesting, then $KB$ is equivalent to $\Pr(S) \ge 1/4$ for some $S \subseteq X$, and $f^*(KB)$ is equivalent to $\Pr(f(S)) \ge 1/4$. Moreover, $KB \Vdash_{I_X^1} \varphi$ iff $\Pr(S) \ge 1/3$ $\models \varphi$ iff $\Pr(f(S)) \ge 1/3$ $\models f^*(\varphi)$ iff $f^*(KB) \Vdash_{I_Y^1} \varphi$. Thus, we get representation independence, as desired. ■ + +## A.4 Proofs for Section 6 + +**Proposition 6.3:** Suppose that $f$ is a faithful $X-Y$ embedding, $\mathcal{D}_X \subseteq \Delta_X$, and $\mathcal{D}_Y \subseteq \Delta_Y$. The following two conditions are equivalent: + +(a) $\mathcal{D}_X$ and $\mathcal{D}_Y$ correspond under $f$; + +(b) for all $\theta$, $\mathcal{D}_X \models \theta$ iff $\mathcal{D}_Y \models f^*(\theta)$. + +12. This is not in general true if $X$ is infinite without the additional requirement that $f(\cup_i A_i) = \cup_i f(A_i)$ for arbitrary unions. +---PAGE_BREAK--- + +**Proof:** To prove that (a) implies (b), assume by way of contradiction that, for some $\theta$, $D_X \models \theta$ but $D_Y \not\models f^*(\theta)$. Then there is some $\nu \in D_Y$ such that $\nu \not\models f^*(\theta)$. Let $\mu \in D_X$ be a measure corresponding to $\mu$. Then, by Proposition 4.7, we have that $\mu \not=\theta$, the desired contradiction. The proof for the other direction of (a) is identical. + +To prove that (b) implies (a), first consider a measure $\mu \in D_X$. We must find a $\nu \in D_Y$ such that $\nu$ corresponds to $\mu$. Suppose that $X = \{x_1, \dots, x_n\}$ (recall that we are restricting to finite spaces in Section 6) and that $\mu(x_i) = a_i$, $i = 1, \dots, n$. Let $\theta$ be the constraint $\wedge_{i=1}^n \text{Pr}(\{x_i\}) = a_i$. By our assumptions about the language, this constraint is in the language. Clearly $[\theta]_X = \{\mu\}$. Since $\mu \in D_X$, we know that $D_X \not\models \neg\theta$. Hence, $D_Y \not\models f^*(\neg\theta)$, so that there exists $\nu \in D_Y$ such that $\nu \notin f^*(\neg\theta)$. Hence $\nu \in f^*(\theta) = f^*(\{\mu\})$. By definition of $f^*$, $\nu$ corresponds to $\mu$. + +Now consider a measure $\nu \in D_Y$, and let $\mu$ be the measure in $\Delta_X$ that corresponds to $\nu$. Assume by way of contradiction that $\mu \notin D_X$. Taking $\theta$ as above, it follows that $D_X \models \neg\theta$ and, therefore, by assumption, $D_Y \models f^*(\neg\theta)$. Thus, $\nu \models f^*(\neg\theta)$. But $\mu \models \theta$ and, by assumption, $\mu$ and $\nu$ correspond. This contradicts Proposition 4.7. ■ + +**Theorem 6.7:** Let $\theta$ be an arbitrary constraint on $\Delta_X$. If $f$ is a faithful X-Y embedding and $\mu$ and $\nu$ correspond under $f$, then $\mu|\theta$ and $\nu|f^*(\theta)$ also correspond under $f$. + +**Proof:** Assume that $\mu$ and $\nu$ correspond under $f$. Recall that we are assuming in this section that $X$ is a finite space; let $X = \{x_1, \dots, x_n\}$. Let $Y_i = f(x_i)$. Given any distribution $\nu'' \in \Delta_Y$, define $\nu_i'' = \nu''|_{Y_i}$ and let $(f^*)^{-1}(\nu'')$ denote the unique $\mu'' \in \Delta_X$ such that $\nu'' \in f^*(\mu'')$. + +Now suppose that $\mu' \in \mu|\theta$. Define $\nu' \in \Delta_Y$ to be the measure such that + +$$\nu'(y) = \mu'(x_i) \cdot \nu_i(y),$$ + +where $i$ is the index such that $y \in Y_i$. Since $\nu_i = \nu|_{Y_i}$, it follows that $\nu_i(Y_i) = 1$. Thus, $\nu'(Y_i) = \mu(x_i)$, and $\nu'$ leaves the relative probabilities of elements within each $Y_i$ the same as in $\nu$. It is easy to verify that $\nu'$ and $\mu'$ correspond. Hence, by Proposition 4.7, $\nu' \models f^*(\theta)$. We claim that $\nu' \in \nu|f^*(\theta)$. To show that, we need show only that $KL_Y(\nu''\|\nu)$ is minimal among all $KL_Y(\nu'''\|\nu)$ such that $\nu'' \models f^*(\theta)$. It follows from standard properties of relative entropy (Cover & Thomas, 1991, Theorem 2.5.3) that for all $\nu'' \in \Delta_Y$, we have + +$$KL_Y(\nu'''\|\nu) = KL_X((f^*)^{-1}(\nu'')\|(f^*)^{-1}(\nu)) + \sum_{i=1}^{n} KL_Y(\nu_i'''\|\nu_i). \quad (1)$$ + +Note that $\nu_i = \nu'_i$, so $KL_Y(\nu_i'''\|\nu_i) = 0$, for $i = 1, \dots, n$. Thus, it follows from (1) that $KL_Y(\nu''\|\nu) = KL_X(\mu''\|\mu)$. + +Now, let $\nu'' \in \Delta_Y$ be such that $\nu'' \models f^*(\theta)$ and let $\mu'' = (f^*)^{-1}(\mu'')$. Since $\nu''$ and $\mu''$ correspond under $f$, it follows from Proposition 4.7 that $\mu'' \models \theta$. Using (1) once again, we have that + +$$\begin{align*} +KL_Y(\nu'''\|\nu) &= KL_X(\mu'''\|\mu) + \sum_{i=1}^{n} KL_Y(\nu_i'''\|\nu_i) \\ +&\geq KL_X(\mu'''\|\mu). +\end{align*}$$ +---PAGE_BREAK--- + +But since $\mu' \in \mu|\theta$, we know that $KL_X(\mu' || \mu) \le KL_X(\mu'' || \mu)$. Hence we conclude that + +$$KL_Y(\nu'' || \nu) \ge KL_Y(\nu' || \nu),$$ + +so that $\nu' \in \nu|f^*(\theta).$ + +**Theorem 6.9:** If $f$ is a faithful X-Y embedding, then $I^P$ is invariant under $f$ iff $\mathcal{P}(X)$ and $\mathcal{P}(Y)$ correspond under $f$. + +**Proof:** Suppose that $f$ is a faithful X-Y embedding. By definition, $I^P$ is invariant under $f$ iff, for all $KB, \theta$, we have + +$$KB \vdash_{I^P} \theta \text{ iff } f^*(KB) \vdash_{I^P} f^*(\theta). \qquad (2)$$ + +By definition of $I^P$, (2) holds iff + +$$\mathcal{P}(X)|KB \subseteq [\theta]_X \text{ iff } \mathcal{P}(Y)|f^*(KB) \subseteq [f^*(\theta)]_Y \text{ for all } KB, \theta. \qquad (3)$$ + +By Proposition 6.3, (3) holds iff $\mathcal{P}(X)|KB$ and $\mathcal{P}(Y)|f^*(KB)$ correspond for all $KB$. By Corollary 6.5, if $\mathcal{P}(X)$ and $\mathcal{P}(Y)$ correspond, then $\mathcal{P}(X)|KB$ and $\mathcal{P}(Y)|f^*(KB)$ correspond for all $KB$. On the other hand, if $\mathcal{P}(X)|KB$ and $\mathcal{P}(Y)|f^*(KB)$ correspond for all $KB$, then $\mathcal{P}(X)$ and $\mathcal{P}(Y)$ must correspond: simply take $KB = true$ and observe that $\mathcal{P}(X)|KB) = \mathcal{P}(X)$ and $\mathcal{P}(Y)|f^*(KB) = \mathcal{P}(Y)$. ■ + +**Proposition 6.10:** Suppose that $X_1 \times \cdots \times X_n$ is the product decomposition on $X$ and, for each $i=1, \dots, n$, $KB_i$ is a constraint on $X_i$, and $S_i$ is a subset of $X_i$. Then + +$$\bigwedge_{i=1}^{n} KB_i \Vdash_{I_{\mathcal{P}_{\Pi}}} \mathrm{Pr}(S_1 \wedge \dots \wedge S_n) = \prod_{i=1}^{n} \mathrm{Pr}(S_i).$$ + +**Proof:** If $KB_i$ is a satisfiable constraint on $\Delta_{X_i}$, for $i=1, \dots, n$, then there exist product measures on $X$ satisfying the constraints $\bigwedge_{i=1}^n KB_i$. These product measures are precisely the measures in $\mathcal{P}_{\Pi}((\bigwedge_{i=1}^n KB_i))$. Since each of these measures satisfies $\mathrm{Pr}(S_1 \wedge \dots \wedge S_n) = \prod_{i=1}^n \mathrm{Pr}(S_i)$ by assumption, the conclusion holds in this case. If any constraint $KB_i$ is not satisfiable, then the result trivially holds. ■ + +**Theorem 6.11:** The inference procedure $I_{\mathcal{P}_{\Pi}}$ is invariant under faithful product embeddings and under permutation embeddings. + +**Proof:** Suppose that $f$ is a faithful X-Y product embedding, $X_1 \times \cdots \times X_n$ is the product decomposition of $X$, and $Y_1 \times \cdots \times Y_n$ is the product decomposition of $Y$. To show that $\mathcal{P}_{\Pi}$ is invariant under $f$, it suffices to show that $\mathcal{P}_{\Pi}(X)$ and $\mathcal{P}_{\Pi}(Y)$ correspond under $f$. Suppose that $\mu \in \mathcal{P}_{\Pi}(Y)$. Then $\mu = \mu_1 \times \cdots \times \mu_n$, where $\mu_i$ is a measure on $X_i$, $i = 1, \dots, n$. Moreover, since $f$ is a product embedding, there exist $f_1, \dots, f_n$ such that $f = f_1 \times \cdots \times f_n$. Let $\nu_i \in f_i^*(\mu_i)$, for $i = 1, \dots, n$. It is easy to check that $\nu = \nu_1 \times \cdots \times \nu_n \in f^*(\mu)$. + +Conversely, suppose that $\nu \in \mathcal{P}_{\Pi}(Y)$. Then $\nu = \nu_1 \times \cdots \times \nu_n$, where $\nu_i \in \Delta_{Y_i}$ for $i = 1, \dots, n$. Define $\mu \in \Delta_{X_i}$ by setting $\mu_i(S) = \nu_i(f_i(S))$. Since $f_i$ is a faithful $X_i-Y_i$ +---PAGE_BREAK--- + +embedding, is easy to check that $\mu_i \in \Delta_{X_i}$ and that $\nu_i \in f_i^*(\mu_i)$. Thus, $\nu \in f^*(\mu)$. This completes the proof that $P_{II}$ is invariant under faithful X-Y product embeddings. + +The argument that $P_{II}$ is invariant under faithful X-X permutation embeddings is similar (and easier). We leave details to the reader. ■ + +## References + +* Bacchus, F. (1990). *Representing and Reasoning with Probabilistic Knowledge*. MIT Press, Cambridge, Mass. + +* Bacchus, F., Grove, A. J., Halpern, J. Y., & Koller, D. (1996). From statistical knowledge bases to degrees of belief. *Artificial Intelligence*, 87(1-2), 75-143. + +* Cover, T. M., & Thomas, J. A. (1991). *Elements of Information Theory*. Wiley, New York. + +* Enderton, H. B. (1972). *A Mathematical Introduction to Logic*. Academic Press, New York. + +* Giunchiglia, F., & Walsh, T. (1992). A theory of abstraction. *Artificial Intelligence*, 56(2-3), 323-390. + +* Goldszmidt, M., Morris, P., & Pearl, J. (1993). A maximum entropy approach to nonmonotonic reasoning. *IEEE Transactions of Pattern Analysis and Machine Intelligence*, 15(3), 220-232. + +* Halpern, J. Y., & Koller, D. (1995). Representation dependence in probabilistic inference. In Proc. Fourteenth International Joint Conference on Artificial Intelligence (IJCAI '95), pp. 1853-1860. + +* Horn, A., & Tarski, A. (1948). Measures in Boolean algebras. *Transactions of the AMS*, 64(1), 467-497. + +* Jaeger, M. (1996). Representation independence of nonmonotonic inference relations. In Principles of Knowledge Representation and Reasoning: Proc. Fifth International Conference (KR '96), pp. 461-472. + +* Jaynes, E. T. (1968). Prior probabilities. *IEEE Transactions on Systems Science and Cybernetics*, SSC-4, 227-241. + +* Jaynes, E. T. (1978). Where do we stand on maximum entropy?. In Levine, R. D., & Tribus, M. (Eds.), *The Maximum Entropy Formalism*, pp. 15-118. MIT Press, Cambridge, Mass. + +* Kahneman, D., Slovic, P., & Tversky, A. (Eds.). (1982). *Judgment Under Uncertainty: Heuristics and Biases*. Cambridge University Press, Cambridge/New York. + +* Kass, R. E., & Wasserman, L. (1993). Formal rules for selecting prior distributions: A review and annotated bibliography. Tech. rep. Technical Report #583, Dept. of Statistics, Carnegie Mellon University. +---PAGE_BREAK--- + +Keisler, J., & Tarski, A. (1964). From accessible to inaccessible cardinals. *Fundamenta Mathematica*, **53**, 225–308. + +Kraus, S., Lehmann, D., & Magidor, M. (1990). Nonmonotonic reasoning, preferential models and cumulative logics. *Artificial Intelligence*, **44**, 167–207. + +Kullback, S., & Leibler, R. A. (1951). On information and sufficiency. *Annals of Mathematical Statistics*, **22**, 76–86. + +Lehmann, D., & Magidor, M. (1992). What does a conditional knowledge base entail?. *Artificial Intelligence*, **55**, 1–60. + +Nayak, P. P., & Levy, A. Y. (1995). A semantic theory of abstractions. In Proc. Fourteenth International Joint Conference on Artificial Intelligence (IJCAI '95), pp. 196–203. + +Paris, J. B. (1994). *The Uncertain Reasoner's Companion*. Cambridge University Press, Cambridge, U.K. + +Paris, J., & Vencovská, A. (1990). A note on the inevitability of maximum entropy. *International Journal of Approximate Reasoning*, **4**(3), 183–224. + +Pearl, J. (1988). *Probabilistic Reasoning in Intelligent Systems*. Morgan Kaufmann, San Francisco. + +Salmon, W. (1961). Vindication of induction. In Feigl, H., & Maxwell, G. (Eds.), *Current Issues in the Philosophy of Science*, pp. 245–264. Holt, Rinehart, and Winston, New York. + +Salmon, W. (1963). On vindicating induction. In Kyburg, H. E., & Nagel, E. (Eds.), *Induction: Some Current Issues*, pp. 27–54. Wesleyan University Press, Middletown, Conn. + +Seidenfeld, T. (1987). Entropy and uncertainty. In MacNeill, I. B., & Umphrey, G. J. (Eds.), *Foundations of Statistical Inferences*, pp. 259–287. Reidel, Dordrecht, Netherlands. + +Shore, J. E., & Johnson, R. W. (1980). Axiomatic derivation of the principle of maximum entropy and the principle of minimum cross-entropy. *IEEE Transactions on Information Theory*, *IT-26*(1), 26–37. + +Ulam, S. (1930). Zur masstheorie in der allgemeinen mengenlehre. *Fundamenta Mathematicae*, **16**, 140–150. + +Walley, P. (1996). Inferences from multinomial data: learning about a bag of marbles. *Journal of the Royal Statistical Society, Series B*, **58**(1), 3–34. Discussion of the paper by various commentators appears on pp. 34–57. \ No newline at end of file diff --git a/samples_new/texts_merged/199837.md b/samples_new/texts_merged/199837.md new file mode 100644 index 0000000000000000000000000000000000000000..23d631f98c2ac145e56f525fa734ba9745a98e15 --- /dev/null +++ b/samples_new/texts_merged/199837.md @@ -0,0 +1,284 @@ + +---PAGE_BREAK--- + +POLYNOMIAL SYSTEMS, H-BASES, AND +AN APPLICATION FROM KINEMATIC +TRANSFORMS + +Tomas Sauer and Dominik Wagenfuehr + +**Abstract.** We review some algebraic methods to solve systems of polynomial equations and illustrate these methods with a real-world problem that comes from computing kinematic transforms in robotics. + +*Keywords:* Gröbner basis, H-basis, polynomial system, kinematic transform + +*AMS classification:* 65H10, 13P10, 70B15 + +§1. Introduction + +Polynomial systems of equations and the structure of their solutions play a crucial role in many fields of theoretical and applied mathematics. The importance of polynomial equations in applications is often due to the need to determine locations of points from given euclidian distances which obviously leads to quadratic equations. + +The mathematical formulation is as follows: Suppose we are given a finite set $F \subset \mathbb{K}[x] = \mathbb{K}[x_1, \dots, x_n]$ of polynomials in the $n$ variables $x_1, \dots, x_n$ with coefficients in the field $\mathbb{K}$, where usually $\mathbb{K} = \mathbb{Q}, \mathbb{R}, \mathbb{C}$, i.e., the rational, real or complex numbers. Given the equations $F$, the goal is to find the solutions $X \subset \overline{\mathbb{K}}^n$ of the system $F(X) = 0$ in the algebraic closure $\overline{\mathbb{K}}$ of $\mathbb{K}$, that is, + +$$ X = \{ x \in \overline{\mathbb{K}}^n : f(x) = 0, f \in F \}. \qquad (1) $$ + +Note that there are two major differences to the “standard approach” for solving nonlinear equations by means of Newton’s method: The number of equations, $\#F$, need not coincide with the number of variables, $n$, and we are not interested in a single solution, but in the set of all solutions of $F(X) = 0$. + +The equations $f(X) = 0, f \in F$, trivially remain valid if each of them is multiplied by an arbitrary polynomial $q_f \in \mathbb{K}[x]$ and if any such modified equations are added. Hence, + +$$ F(X) = 0 \Leftrightarrow \langle F \rangle(X) = 0, \quad \langle F \rangle = \left\{ \sum_{f \in F} q_f f : q_f \in \mathbb{K}[x] \right\}, \quad (2) $$ + +where $\langle F \rangle$ is the *ideal generated by* $F$; recall that an ideal $\mathcal{I}$ is a subset of $\mathbb{K}[x]$ which is closed under addition and multiplication by arbitrary polynomials, cf. [4]. A subset $G$ of an ideal $\mathcal{I}$ is called a *basis* for the ideal $\mathcal{I}$ if $G$ generates the ideal, i.e., $\mathcal{I} = \langle G \rangle$. With this terminology +---PAGE_BREAK--- + +at hand, we can rephrase (2) as that the solution $X$ depends only on the ideal $\mathcal{I}$, but not on the +individual basis $F$. This simple observation is the fundamental idea behind all the algebraic +methods to solve polynomial systems by interpreting the original equations as a basis of an +ideal and then computing another basis for the same ideal from which the solution of the +polynomial system is more easily accessible. In other words: Algebraic methods transform a +given system of equations into a simpler or more useful form. + +§2. Gröbner bases, H-bases and eigenvalues + +Gröbner bases as well as H-bases are special ideal bases which provide representations of minimal degree, where these two types of bases differ by being related to different notions of degree. For Gröbner bases, we need the concept of a term order "<" on $\mathbb{N}_0^n$, that is, a well-ordering on $\mathbb{N}_0^n$ which is compatible with addition, cf. [4]. With respect to this order, any polynomial + +$$f(x) = \sum_{\alpha \in \mathbb{N}_0^n} f_\alpha x^\alpha, \quad f_\alpha \in \mathbb{K}, \quad \#\{\alpha : f_\alpha \neq 0\} < \infty,$$ + +has a maximal nonzero coefficient $f_\alpha$ and $\alpha$ is called the *(multi)degree* of the polynomial +while $f_\alpha x^\alpha$ is usually named the *leading term* of $f$. For H-bases, on the other hand, the +degree is not a multiindex, but a number, namely the maximal length $|\alpha| = \alpha_1 + \cdots + \alpha_n$ of +the indices of nonzero coefficients – the usual *total degree*. Nevertheless, we will write the +degree of a polynomial $f$ as $\delta(f)$, regardless of whether $\delta(f) \in \mathbb{N}_0^n$ or $\delta(f) \in \mathbb{N}_0$; indeed, +there is a joint framework in terms of graded rings, see [5], and [10] for the application in +ideal bases and interpolation. A finite set $H \subset \mathbb{K}[x]$ is called *Gröbner basis* or *H-basis*, +depending on whether $\delta$ is based on on a term order or on the total degree, if any $f \in \langle H \rangle$ +can be written as + +$$f = \sum_{h \in H} f_h h, \quad f_h \in \mathbb{K}[x], \quad \delta(f) \ge \delta(f_h h), \quad h \in H. \tag{3}$$ + +The crucial point of Gröbner bases and H-bases is the degree constraint in (3) which helps to avoid a certain redundancy: Assume that one term in the sum on the right hand side were of higher degree than $f$, then there must be at least a second term of the same or higher degree compensating its leading term, and the representation would be redundant, all the terms of degree higher than that of $f$ unneeded. But the main practical advantage of Gröbner bases and the main reason for their development in [2] is the fact that they permit the *algorithmic computation* of a unique remainder $r$, + +$$f = \sum_{h \in H} f_h h + r. \quad (4)$$ + +This can be extended to the grading by total degree [6, 9] and even to arbitrary gradings in +such a way that the remainder $r$ depends only on $\langle H \rangle$ and the parameters of the grading, +see [11] for details. Thus, we have a method to compute a normal form $\nu_{\langle H \rangle}$ modulo $\langle H \rangle$ +and to efficiently perform arithmetic in the quotient ring $\mathcal{P} := \mathbb{K}[x]/\langle H \rangle$. Moreover, $\mathcal{P}$ is a +---PAGE_BREAK--- + +finite dimensional space if and only if the ideal $\mathcal{I} = \langle H \rangle$ has dimension zero which is in turn equivalent to a finite number of solutions $X$ for $H(X) = 0$. + +So here is the first part of the algebraic simplification: Starting with a finite set $F$ of polynomial equations, one computes a Gröbner basis or H-basis $H$ for the ideal $\langle F \rangle$ from which it can be decided whether $F(X) = 0$ has no solution (this happens if and only if $1 \in H$), a finite number of solutions or infinitely many solutions. It is even possible, see [4], to determine the dimension of the algebraic variety formed by the solutions. But in this paper let us assume that $X$ were nonempty and finite. + +The classical method [13], see also [1, 4], to find $X$ is by means of elimination ideals: A purely lexicographical Gröbner basis for a zero dimensional ideal contains some univariate polynomials whose greatest common divisor vanishes at the projections of the common zeros to this coordinate. Solving and substituting the solutions eliminates the variable and continuing this process, one can systematically find all the common zeros. Unfortunately, this process has a terrible complexity and can be very sensitive to perturbations of the coefficients, cf. [7], which limits its use in practical applications. + +There is, however, a different approach proposed by Möller and Stetter [8, 12] which is based on multiplication tables on the quotient space $\mathcal{P}$. To that end, observe that multiplication of $f, g \in \mathcal{P}$ is defined as $\nu_{\mathcal{I}}(fg)$ and that for fixed $g \in \mathbb{K}[x]$ the operation + +$$f \mapsto M_g(f) := \nu_{\mathcal{I}}(fg)$$ + +is a linear operator on $\mathcal{P}$ that can be represented with respect to a basis of $\mathcal{P}$ by a matrix $M_g$ – the so called multiplication table. For $j = 1, \dots, n$ let now $M_j$ denote the multiplication table for the coordinate polynomials $g(x) = x_j$, then the $M_j$ generalize the classical Frobenius companion matrix, form a commuting family of matrices, have joint eigenvectors and the respective eigenvalues are the coordinates of the common zeros. Thus, the solutions of $F(X) = 0$ can be found by relying on well-developed methods from Numerical Linear Algebra and the flexibility of H-bases now offers an approach that changes continuously with the parameters and thus is much less sensitive to perturbations, see again [7] for an example. + +### §3. Practical Examples + +In this section we want to apply and illustrate the mathematical concepts of the preceding chapters. To that end, we take a look at two slightly different kinematics. First, we will consider a simple example in three dimensions to show how we obtain the equations needed as starting ideal basis for the computation of a Gröber basis or H-basis. Then we present a kinematic that still appears to be quite simple but leads monstrous Gröbner bases and H-bases and also point out how crucial it is to incorporate “implicit” physical restrictions into the system of equations. + +All our kinematics follow the same basic layout: The manipulator (in most cases used for melding or milling) is connected to three (or more) rods of variable length. In the inverse kinematic transform we know the position of the manipulator and want to compute the “machine parameters”, i.e., the lengths of the rods, while in the forward kinematic transform the location of the manipulator is to be determined from the lengths of the rods. In both cases the ideal basis which we first must construct is the same, namely the implicit system of equations. +---PAGE_BREAK--- + +Figure 1: Simple 3D kinematic. + +The only difference consists of the choice which of the parameters are considered variables to be solved. + +**3.1. A Simple 3D-Kinematic** + +The first example is really easy to solve and we only use it to demonstrate how to obtain +the equations from which we compute the Gröbner- or H-Basis. First we take a look at the +construction. In figure 1 the construction is fixed in three points A₁, A₂ and A₃, coplanar +with the origin {0}, and have the same distance *a* to {0}. Furthermore, the distance between +every two points is constant. Now it is easy to see how to obtain the equations we need. +Consider the projection S of T = (x, y, z) in the plane generated by A₁, A₂ and A₃. With +Pythagoras we have + +$$ +l_i = y^2 + \|A_i - S\|_2^2, \quad i = 1, 2, 3, +$$ + +which directly leads to the set of equations + +$$ +y^2 + x^2 + (a-z)^2 - l_1^2 = 0, +$$ + +$$ +y^2 + \left(-\frac{\sqrt{3}}{2}a - x\right)^2 + \left(\frac{-1}{2}a - z\right)^2 - l_2^2 = 0, +$$ + +$$ +y^2 + \left(\frac{\sqrt{3}}{2}a - x\right)^2 + \left(\frac{-1}{2}a - z\right)^2 - l_3^2 = 0. +$$ + +In Maple notation, the ideal is thus generated by $F := [x^2 + y^2 + (a-z)^2 - l_1^2, y^2 + (-\frac{\sqrt{3}}{2}a-x)^2 + (\frac{1}{2}a-z)^2 - l_2^2, y^2 + (\frac{\sqrt{3}}{2}a-x)^2 + (\frac{-1}{2}a-z)^2 - l_3^2]$. +---PAGE_BREAK--- + +Because we used the (square of the) lengths $l_1, l_2$ and $l_3$ explicitly in our ideal basis we can give the solution of the inverse kinematic transform directly as + +$$l_1 = \sqrt{y^2 + x^2 + (a-z)^2},$$ + +$$l_2 = \sqrt{y^2 + \left(-\frac{\sqrt{3}}{2}a - x\right)^2 + \left(\frac{-1}{2}a - z\right)^2},$$ + +$$l_3 = \sqrt{y^2 + \left(\frac{\sqrt{3}}{2}a - x\right)^2 + \left(\frac{-1}{2}a - z\right)^2}.$$ + +For the forward transform we switch the roles of variables and constants which are now declared as $x, y, z$ and $a, b, l_1, l_2, l_3$, respectively. Without further problems we compute an H-basis of $F$ as $H = [9a^2y^2 - 3l_1^2a^2 + l_4^2 - l_3^2l_2^2 + l_2^4 + 9a^4 - l_2^2l_1^2 - 3a^2l_2^2 - 3a^2l_3^2 + l_1^4 - l_1^2l_3^2, 6az - l_2^2 + 2l_1^2 - l_3^2, 12ax + 2\sqrt{3}l_3^2 - 2\sqrt{3}l_2^2]$ and by means of multiplication tables of $\mathcal{P}$ and the corresponding eigenvectors we find that + +$$x = \frac{\sqrt{3}(l_3^2 - l_1^2)}{6a},$$ + +$$y = \frac{\sqrt{-l_2^4 + 3l_1^2a^2 - l_3^4 + l_3^2l_2^2 + 3a^2l_3^2 - 9a^4 + l_2^2l_1^2 + 3a^2l_2^2 - l_1^4 + l_1^2l_3^2}}{-3a},$$ + +$$z = \frac{-2l_1^2 + l_2^2 + l_3^2}{6a}.$$ + +Note that the equations for $x$ and $z$ are significantly simpler than the one for $y$. + +Since $y$ appears quadratically in the H-basis, it follows that together with $(x, y, z)$ also $(x, -y, z)$ is a solution of the system. However, this second solution is impossible in physical reality because the rods are flexible but fixed and cannot cross themselves. Unfortunately, it appears impossible to eliminate this unwanted "solution" a priori by adding more equations to the system; in fact, the only way to distinguish between the two solutions is by means of inequalities. + +**Remark 1.** It is worthwhile to mention that not for all values of $l_1, l_2$ and $l_3$ the solution belongs to the real domain as in some cases the solution gains an additional imaginary part because the three rods have no common point. Though physically impossible this is absolutely correct mathematically. Finding additional constraints that eliminate complex solutions would consist of determining the associated *real* ideal. + +## 3.2. The realistic problem + +Now we want to take a close look at a slightly extended version of the latter three dimensional kinematic used in practical applications. In figure 2 the upper part of the construction equals the one in figure 1 while the lower part differs with the manipulator being attached centrally under a platform which is held and moved by the rods. To make things simpler, we assume that the vertices $B_1, B_2$ and $B_3$ of the platform form an equilateral triangle with distance $b$ between the points and barycenter $T = (x, y, z)$. To stabilize the construction, the platform +---PAGE_BREAK--- + +Figure 2: Complex 3D kinematic. + +is also linked to the origin $\{0\}$ by an additionally guiding rod which is attached perpendicular +in $T$. + +We will not discuss the ideal basis construction in full detail but should mention a few +facts. First, it is not possible to compute the value of $T$ directly, but it is easily found as +midpoint of the triangle formed by $B_1, B_2, B_3$ once these locations are determined. The +lengths $l_1, l_2$ and $l_3$ are just as easy to obtain as before from the equations + +$$ +\|S - A_i\|_2^2 + \|S - B_i\|_2^2 = \|B_i - A_i\|_2^2, \quad i = 1, 2, 3, +$$ + +in which *S* is the projection of *T*, leading to + +$$ +\begin{align*} +x_1^2 + (z_1 - a)^2 + y_1^2 &= l_1^2, \\ +\left(x_2 + \frac{\sqrt{3}a}{2}\right)^2 + \left(z_2 + \frac{a}{2}\right)^2 + y_2^2 &= l_2^2, \\ +\left(x_3 - \frac{\sqrt{3}a}{2}\right)^2 + \left(z_3 + \frac{a}{2}\right)^2 + y_3^2 &= l_3^2. +\end{align*} +$$ + +As mentioned previously the triangle is equilateral giving us the additional three equations + +$$ +(x_i - x_j)^2 + (y_i - y_j)^2 + (z_i - z_j)^2 = b^2, \quad 1 \le i < j \le 3. +$$ + +The orthogonality of the system can finally be described by the inner products $(T - B_i, T) =$ +---PAGE_BREAK--- + +0, i = 1, ..., 3, which leads to + +$$ +\begin{align*} +(x - x_1) x + (y - y_1) y + (z - z_1) z &= 0, \\ +(x - x_2) x + (y - y_2) y + (z - z_2) z &= 0, \\ +(x - x_3) x + (y - y_3) y + (z - z_3) z &= 0. +\end{align*} +$$ + +Finally we need the fact that the midpoint T of the triangle can be written as sum of the outer points $T = \frac{B_1+B_2+B_3}{3}$ yielding three more equations + +$$ +(x_1 + x_2 + x_3) = 3x, \quad (y_1 + y_2 + y_3) = 3y, \quad (z_1 + z_2 + z_3) = 3z. +$$ + +Together, these twelve equations forms our initial ideal basis $F := [x_1^2 + (z_1 - a)^2 + y_1^2 - l_1^2, (x_2 + \frac{\sqrt{3}a}{2})^2 + (z_2 + \frac{a}{2})^2 + y_2^2 - l_2^2, (x_3 - \frac{\sqrt{3}a}{2})^2 + (z_3 + \frac{a}{2})^2 + y_3^2 - l_3^2, (x_1 - x_2)^2 + (y_1 - y_2)^2 + (z_1 - z_2)^2 - b^2, (x_1 - x_3)^2 + (y_1 - y_3)^2 + (z_1 - z_3)^2 - b^2, (x_2 - x_3)^2 + (y_2 - y_3)^2 + (z_2 - z_3)^2 - b^2, (x - x_1)x + (y - y_1)y + (z - z_1)z, (x - x_2)x + (y - y_2)y + (z - z_2)z, (x - x_3)x + (y - y_3)y + (z - z_3)z, (x_1+x_2+x_3)-3x, (y_1+y_2+y_3)-3y, (z_1+z_2+z_3)-3z]$. + +This time we begin with the more interesting forward kinematic transformation and are only interested in the dimension of the variety of the solutions $F(X) = 0$. To do so, we substitute some numerical values for the constants $l_1, l_2, l_3, a$ and $b$ and compute a Gröbner basis which can be done without many problems but with a little bit of time (a tdeg ordered basis has no less than 56 elements). Computing the dimension, we surprisingly realize that the ideal is one-dimensional and not zero-dimensional as it should be if we wanted a finite number of solutions and to apply multiplication tables for their computation. + +So the first question is why we found a one-dimensional variety. For convenience, we substitute (as before) {$a = \sqrt{3}, b = 3, l_i = 4 \mid i = 1, 2, 3$} (see figure 3), and the desired final solution for the platform is + +$$ +T = (0, 4, 0)^T, \quad B_1 = (0, 4, \sqrt{3})^T, \quad B_2 = \left(-\frac{3}{2}, 4, -\frac{\sqrt{3}}{2}\right)^T, \quad B_3 = \left(\frac{3}{2}, 4, -\frac{\sqrt{3}}{2}\right)^T. +$$ + +If we rotate the lower triangle counterclockwise around the origin, so that $B_2$ is below $A_3$, $B_1$ below $A_2$ and $B_3$ below $A_1$ (see figure 4), we find that the point $T' = (0, \sqrt{7}, 0)^T$ resulting from + +$$ +B'_1 = \left(-\frac{3}{2}, \sqrt{7}, -\frac{\sqrt{3}}{2}\right)^T, \quad B'_2 = \left(\frac{3}{2}, \sqrt{7}, -\frac{\sqrt{3}}{2}\right)^T, \quad B'_3 = \left(0, \sqrt{7}, \sqrt{3}\right)^T. +$$ + +is another solution of our polynomial system. + +Consequently, we obtain, by simple rotation, a one-parameter family of solutions and that is precisely the reason why our ideal is not zero-dimensional, so that we have add more equations to the ideal basis in order to prevent rotations. In such situations, it is a good idea to give a closer look to reality and indeed it turns out that such torsions of the robot are impossible since the guiding rod is connected to the upper part by a *universal joint* that can only move forwards/backwards and left/right but does not permit rotational movement. +---PAGE_BREAK--- + +Figure 3: Simple Substitution. + +Figure 4: Simple Rotated Substitution. +---PAGE_BREAK--- + +Again, we will not discuss the modeling of the joint in detail, but here is the basic idea behind our approach: If we know the center $T = (x, y, z)$ of the triangle, the position of the outer points $B_1, B_2, B_3$ is fixed. So take a look at the point $S := (0, -\sqrt{x^2+y^2+z^2}, 0)$ which is just the position of $T$ if the kinematic is not moved to any side ("rest position"). We can calculate the angle $\alpha$ between $S$ and $T$, more precisely the term $c_{\alpha} = \cos\alpha$. Let the points $B'_1, B'_2, B'_3$ be the vertices of the lower triangle in this rest position. With the help of rotation matrices and the angle $\alpha$ we can then compute the solution for the points $B_1, B_2, B_3$ explicitly. Doing so adds eleven further equations to our former ideal basis which makes us end up with $F := [x_1^2 + (z_1-a)^2 + y_1^2 - l_1^2, (x_2 + \frac{\sqrt{3}a}{2})^2 + (z_2 + \frac{a}{2})^2 + y_2^2 - l_2^2, (x_3 - \frac{\sqrt{3}a}{2})^2 + (z_3 + \frac{a}{2})^2 + y_3^2 - l_3^2, (x_1 - x_2)^2 + (y_1 - y_2)^2 + (z_1 - z_2)^2 - b^2, (x_1 - x_3)^2 + (y_1 - y_3)^2 + (z_1 - z_3)^2 - b^2, (x_2 - x_3)^2 + (y_2 - y_3)^2 + (z_2 - z_3)^2 - b^2, (x - x_1)x + (y - y_1)y + (z - z_1)z, (x - x_2)x + (y - y_2)y + (z - z_2)z, (x - x_3)x + (y - y_3)y + (z - z_3)z, (x_1 + x_2 + x_3) - 3x, (y_1 + y_2 + y_3) - 3y, (z_1 + z_2 + z_3) - 3z], \sqrt{3}dl(x-x_1) - bxz, \sqrt{3}dl(y-y_1) - byz, \sqrt{3}l(z-z_1) + bd, \sqrt{3}lby + 2\sqrt{3}dl(x-x_2) + bxz, -\sqrt{3}lbx + 2\sqrt{3}dl(y-y_2) + byz, 2\sqrt{3}l(z-z_2) - bd, -\sqrt{3}lby + 2\sqrt{3}dl(x-x_3) + bxz, \sqrt{3}lbx + 2\sqrt{3}dl(y-y_3) + byz, 2\sqrt{3}l(z-z_3) - bd, x^2 + y^2 - d^2, x^2 + y^2 + z^2 - l^2]$, where $d = \sqrt{x^2+y^2}$ and $l = \sqrt{x^2+y^2+z^2}$. + +To solve the inverse kinematic problem, we choose the variables as $x_1, y_1, z_1, x_2, y_2, z_2, x_3, y_3, z_3, l, d, l_1, l_2, l_3$ and the constants as $x, y, z, a, b$. The H-Basis can be easily computed as $H = [(y^2+x^2)x_1-2xz_z-xy^2+2z^2x-x^3, z_1+2z_3-3z, (2x^2+2y^2)y_2+2zy_z+xbd-2y^3-2yx^2-2yz^2, z_2-z_3, (2x^2+2y^2)x_3+2xz_z+ybd-2z^2x-2xy^2-2x^3, (2x^2+2y^2)y_3+2zy_z-xbd-2y^3-2yx^2-2yz^2, (2x^2+2y^2)x_2+2xz_z-ybd-2z^2x-2x^3-2xy^2, y^2+x^2)y_1-2zy_z-yx^2-y^3+2yz^2, (z^2+y^2+x^2)d^2-2y^2x^2-x^4-x^2z^2-y^4-z^2y^2, (6z^2+6y^2+6x^2)z_3d+(b\sqrt{3}x^2+b\sqrt{3}y^2)l+(-6z^3-6zy^2-6zx^2)d, (12z^2+12y^2+12x^2)z_3^2+(-24zy^2-24zx^2-24z^3)z_3+12z^4+12x^2z^2-b^2x^2-y^2b^2+12z^2y^2, 3bld+(6x^2\sqrt{3}+6\sqrt{3}z^2+6y\sqrt{3})z_3-6\sqrt{3}z^3-6\sqrt{3}zx^2-6y^2z\sqrt{3}, 6z_3l-6zl+\sqrt{3}bd, l^2-x^2-y^2-z^2, 3l_l^1-12az_3-3x^2-b^2-3z^2+18za-3y^2-3a^2, 6y^2+6x^2)l_l^{\frac{1}{5}}+(6xz\sqrt{3}a-6ax^4-6ay^4)z_l-3ayb\sqrt{3}d-6x^4a^4-6xz_za\sqrt{3}-6y^4-6x^4a\sqrt{3}-6xa\sqrt{3}y^4-6x^4a^4-18y^4x^4-18y^4x^4-6a\sqrt{3}y^4-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-18y^{4/5}-6a^{4/5}-6y^{4/5}-6x^{4/5}-(3z^2+3y^2+3x^2))^{-1}/(3z+3)^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z))^{-7/(9)}$, $l_0 = (-(-9z_x z_y - 9z_y z_x - 9z_x z_z - 9z_y z_z) / (9$ +---PAGE_BREAK--- + +$$6x^6 + 6xa\sqrt{3}y^4 - 6x^2a^2z^2 - 12x^2a^2y^2 + 12x^3a\sqrt{3}y^2 - 24y^2x^2z^2 - 2b^2x^2z^2 - 4b^2x^2y^2 - 6a^2y^2z^2 - 2y^2b^2z^2 - 6x^4a^2 - 12y^4z^2 - 18y^4x^2 - 12x^4z^2 - 18x^4y^2 - 2b^2x^4 - 6a^2y^4 - 2y^4b^2 - 12zax^2y^2 + 6x^5a\sqrt{3} - 3ay^3b\sqrt{3}d - 3ayb\sqrt{3}dz^2 - 3ayb\sqrt{3}dx^2 + 6x^3a\sqrt{3}z^2 + 3laxzbd + lax^2\sqrt{3}bd + lay^2\sqrt{3}bd / (6z^2y^2 + 6y^4 + 12y^2x^2 + 6x^2z^2 + 6x^4))^{1/2}, \text{ where}$$ + +$$d = \sqrt{x^2 + y^2} \text{ and } l = \sqrt{x^2 + y^2 + z^2}.$$ + +For the forward transform, the variables are $x_1, y_1, z_1, x_2, y_2, z_2, x_3, y_3, z_3, x, y, zl, d$ and the constants $l_1, l_2, l_3$. Because both Computer Algebra systems we used, Singular and Maple, cannot even compute a Gröbner basis for the ideal as it is given in this form, we had to relocate the points $A_1, A_2$ and $A_3$ to the next integer grid value. Furthermore, we will substitute $\{a = 2, b = 4, l_i = 3 \mid i = 1, 2, 3\}$ because the symbolic solution is still too complex, thus changing the ideal to $F = [x_1^2+y_1^2+(2-z_1)^2-9, (-2-x_2)^2+y_2^2+(-1-z_2)^2-9, (2-x_3)^2+y_3^2+(-1-z_3)^2-9, (x-x_1)x+(y-y_1)y+(z-z_1)z, (x-x_2)x+(y-y_2)y+(z-z_2)z, (x-x_3)x+(y-y_3)y+(z-z_3)z, (x_1-x_2)^2+(y_1-y_2)^2+(z_1-z_2)^2-13, (x_3-x_2)^2+(y_3-y_2)^2+(z_3-z_2)^2-16, (x_1-x_3)^2+(y_1-y_3)^2+(z_1-z_3)^2-13, x_1+x_2+x_3-3x, y_1+y_2+y_3-3y, z_1+z_2+z_3-3z, 2dl(x-x_1)-4xz, 2dl(y-y_1)-4yz, 2l(z-z_1)+4d, 8ly+4dl(x-x_2)+4xz, -8lx+4dl(y-y_2)+4yz, 4l(z-z_2)-4d, -8ly+4dl(x-x_3)+4xz, 8lx+4dl(y-y_3)+4yz, 4l(z-z_3)-4d, x^2+y^2-d^2, x^2+y^2+z^2-l^2]$. + +A (tdeg-ordered) Gröbner basis contains no less than 83 elements and therefore cannot be called very small. But at least we can figure out that there are 40 solutions to the equations and with the algorithm from [3, p. 134ff] we can compute the number of real solutions and discover that there are only four of them, thus, up to symmetry, the desired solution and probably one with crossed rods as before. + +In summary one can say that presently the realistic problem is inaccessible, but its terrible complexity originates from “contamination” by the 36 complex solutions which correspond to physically impossible configurations. This is one more major drawback of algebraic methods which can find the solutions only in the algebraic closure of the original field. + +## References + +[1] ADAMS, W. W., AND LOUSTAUNAU, P. *An Introduction to Groebner Bases*, vol. 3 of *Graduate Studies in Mathematics*. AMS, 1994. + +[2] BUCHBERGER, B. *Ein Algorithmus zum Auffinden der Basiselemente des Restklassen-rings nach einem nulldimensionalen Polonomideal*. PhD thesis, Innsbruck, 1965. + +[3] COHEN, A. M., CUYPERS, H., AND STERK, M., Eds. *Some Tapas of Computer Algebra*, vol. 4 of *Algorithms and Computations in Mathematics*. Springer, 1999. + +[4] COX, D., LITTLE, J., AND O'SHEA, D. *Ideals, Varieties and Algorithms*, 2. ed. Undergraduate Texts in Mathematics. Springer-Verlag, 1996. + +[5] EISENBUD, D. *Commutative Algebra with a View Toward Algebraic Geometry*, vol. 150 of *Graduate Texts in Mathematics*. Springer, 1994. + +[6] MÖLLER, H. M., AND SAUER, T. H-bases for polynomial interpolation and system solving. *Advances Comput. Math.* **12** (2000), 335–362. +---PAGE_BREAK--- + +[7] MÖLLER, H. M., AND SAUER, T. H-bases II: Applications to numerical problems. In *Curve and Surface fitting: Saint-Malo 1999* (2000), A. Cohen, C. Rabut, and L. L. Schumaker, Eds., Vanderbilt University Press, pp. 333–342. + +[8] MÖLLER, H. M., AND STETTER, H. J. Multivariate polynomial equations with multiple zeros solved by matrix eigenproblems. *Numer. Math.* **70** (1995), 311–329. + +[9] SAUER, T. Gröbner bases, H-bases and interpolation. *Trans. Amer. Math. Soc.* **353** (2001), 2293–2308. + +[10] SAUER, T. Ideal bases for graded polynomial rings and applications to interpolation. In *Multivariate Approximation and Interpolation with Applications* (2002), M. Gasca, Ed., vol. 20 of *Monograph. Academia de Ciencias de Zaragoza*, Academia de Ciencias Zaragoza, pp. 97–110. + +[11] SAUER, T. Polynomial interpolation in several variables: Lattices, differences, and ideals. In *Multivariate Approximation and Interpolation*, M. Buhmann, W. Hausmann, K. Jetter, W. Schaback, and J. Stöckler, Eds. Elsevier, 2006, pp. 189–228. + +[12] STETTER, H. J. Matrix eigenproblems at the heart of polynomial system solving. *SIGSAM Bull.* **30**, 4 (1995), 22–25. + +[13] TRINKS, W. Über B. Buchbergers Verfahren, Systeme algebraischer Gleichungen zu lösen. *J. Number Theory* **10** (1978), 475–488. + +Tomas Sauer + +Lehrstuhl für Numerische Mathematik +Universität Giessen + +Heinrich-Buff-Ring 44 +D-35392 Gießen, Germany + +Dominik Wagenführ + +Siemens AG +A&D MC RD 7 + +Frauenauracher Str. 80 +D-91056 Erlangen, Germany + +Tomas.Sauer@math.uni-giessen.de Dominik.Wagenfuehr@automation.siemens.co \ No newline at end of file diff --git a/samples_new/texts_merged/2092097.md b/samples_new/texts_merged/2092097.md new file mode 100644 index 0000000000000000000000000000000000000000..52f30a7186d2108abf077a972de40374a694498c --- /dev/null +++ b/samples_new/texts_merged/2092097.md @@ -0,0 +1,346 @@ + +---PAGE_BREAK--- + +# A GROUP OF AUTOMORPHISMS OF THE HOMOTOPY GROUPS + +HIROSHI UEHARA + +It is well known that the fundamental group $\pi_1(X)$ of an arcwise connected topological space $X$ operates on the $n$-th homotopy group $\pi_n(X)$ of $X$ as a group of automorphisms. In this paper I intend to construct geometrically a group $\mathcal{H}(X)$ of automorphisms of $\pi_n(X)$, for every integer $n \ge 1$, which includes a normal subgroup isomorphic to $\pi_1(X)$, so that the factor group of $\mathcal{H}(X)$ by $\pi_1(X)$ is completely determined by some invariant $\mathcal{L}(X)$ of the space $X$. The complete analysis of the operation of the group on $\pi_n(X)$ is given in §3, §4, and §5. + +Throughout the whole paper, $X$ denotes an arcwise connected topological space which has such suitable homotopy extension properties as a polyhedron does, and all mappings are continuous transformations. + +## §1. Definition of the group $\mathcal{H}(X)$. + +Let $x_0$ be an arbitrary point of the space $X$, and $\Omega$ a collection $\mathcal{X}^*(x_0, x_0)$ of all the mappings that transform $X$ into $X$ and $x_0$ into $x_0$. For two maps $a, b \in \Omega$, $a$ is said to be homotopic to $b$ (in notation : $a \sim b$) if there exists a homotopy $h_t \in \Omega$ (for $1 \le t \le 0$) such that $h_0 = a$ and $h_1 = b$. A mapping $a \in \Omega$ is called to have a (two sided) homotopy inverse, if there is a map $\varphi \in \Omega$ such that $\alpha\varphi \sim 1$ and $\varphi\alpha \sim 1$, where 1 denotes the identity transformation of $X$ onto itself. Let $\Omega^*$ be the collection of all the mappings belonging to $\Omega$, each of which has a homotopy inverse. + +Now let $X \times I$ be the topological product of $X$ and the line segment $I$ between 0 and 1, and let us consider the totality $U$ of the mappings $\vartheta : X \times I \rightarrow X$ which satisfy the following conditions : + +$$ (1.1) \qquad \begin{aligned} \text{i)} & \quad \theta |_{X \times 0} \in \Omega^* \\ \text{ii)} & \quad \theta(x_0, 1) = x_0 \end{aligned} \} $$ + +For two maps $\theta, \theta' \in U$, $\theta$ is homotopic to $\theta'$ (notation : $\theta \sim \theta'$) if there exists a homotopy $h_t : X \times I \to X$ (for $1 \le t \le 0$) such that + +Received Oct. 25, 1950. + +I should like to express my sincere gratitude for the courtesies extended to me by Professor S. T. Hu. This paper is inspired by his paper, "On the Whitehead group of automorphisms of the relative homotopy groups." +---PAGE_BREAK--- + +$$ (1.2) \qquad \begin{alignedat}{2} \text{i)} \qquad & h_0 &&= \theta, \\ \text{ii)} \qquad & h_t(x_0, 0) &&= h_t(x_0, 1) = x_0. \end{alignedat} $$ + +It is easily verified that this relation is an equivalent relation, and therefore $U$ is divided into equivalent classes in this sense. + +We shall denote by $[\theta]$ the class containing $\theta$. For $\theta \in U$ we construct a mapping $\sigma_0 \in U$ as follows: a mapping $\bar{\sigma}_\theta$ which is defined continuously on the set $((X \times 0) \uplus (x_0 \times I))$ such that $\bar{\sigma}_\theta(x, 0) = x$ and $\bar{\sigma}_\theta(x_0, t) = \theta(x_0, t)$, can be extended to a mapping $\sigma_0 \in U$, provided that $\{x_0\}$ has a homotopy extension property in $X$ relative to $X$. The extended mapping is, of course, not unique but the homotopy class containing $\sigma_0$ is uniquely determined if the set $((x_0 \times I) \uplus (X \times 0) \uplus (X \times 1))$ has a homotopy extension property in $X \times I$ relative to $X$; another arbitrarily extended map $\sigma'_0$ is homotopic to $\sigma_0$. Now two maps $\theta_1, \theta_2 \in U$ are 'multiplied' together by the rule, + +$$ (1.3) \qquad \theta_1 \times \theta_2(x, t) = \begin{cases} \rho(x, 2t), & \frac{1}{2} \le t \le 0, \\ \sigma_{\theta_2}(\rho(x, 1), 2t-1), & 1 \le t \le \frac{1}{2}, \end{cases} $$ + +where $\rho(x, t) = \theta_2(\theta_1(x, t), 0)$. Then we have + +**LEMMA 1.1** $\theta_1 \times \theta_2$ is again a member of the collection $U$. + +*Proof.* Let $a_1(x) = \theta_1(x, 0)$, $a_2(x) = \theta_2(x, 0)$, then both $a_1$ and $a_2$ belong to $\Omega^*$, so that $a_1$ and $a_2$ have homotopy inverses $\varphi_1, \varphi_2$ respectively. From the considerations that $\varphi_1\varphi_2$ is a homotopy inverse of $\omega a_1$ and that $\theta_1 \times \theta_2(x, 0) = \rho(x, 0) = \theta_2(\theta_1(x, 0), 0) = \theta_2(a_1(x), 0) = a_2(a_1(x))$, we have $\theta_1 \times \theta_2 | X \times 0 = \Omega^*$ and therefore the condition (1.1) i) is satisfied. Also we have $\theta_1 \times \theta_2(x_0, 1) = \sigma_{\theta_2}(\rho(x_0, 1), 1) = \sigma_{\theta_2}(x_0, 1) = \theta_2(x_0, 1) = x_0$. This proves the Lemma. + +**LEMMA 1.2** The class $[\theta_1 \times \theta_2]$ depends only on the classes $[\theta_1]$ and $[\theta_2]$. + +*Proof.* Let $\theta'_1 \in [\theta_1]$ and $\theta'_2 \in [\theta_2]$, then there exist two homotopies $h_s$, $k_s$: $X \times I \rightarrow X$ ($1 \le s \le 0$) such that $h_0 = \theta'_1$, $h_1 = \theta'_2$, $k_0 = \theta'_2$, and $k_1 = \theta'_2$. Putting $\rho_s(x, t) = k_s(h_s(x, t), 0)$, we have + +$$ (1.4) \qquad \left. \begin{aligned} \text{i)} & \rho_s(x, t) = \theta_s(\theta'_1(x, t), 0), \quad \rho_s(t) = \theta'_2(\theta'_1(t), 0), \\ \text{ii)} & \rho_s(x_0, 0) = k_s(h_s(x_0, 0), 0) = k_s(x_0, 0) = x_0, \\ \text{iii)} & \rho_s(x_0, 1) = k_s(h_s(x_0, 1), 0) = k_s(x_0, 0) = x_0. \end{aligned} \right\} $$ + +Since $k_s(x_0, 0) = k_s(x_0, 1) = x_0$, we can construct, in virtue of the homotopy extension properties previously mentioned, $\sigma_{k_s} \in U$ ($1 \le s \le 0$), which is also continuous with respect to $\epsilon$, just as in case of $\sigma_\epsilon$. Then clearly we have $\sigma_{k_s}(x, 0) = x$ and $\sigma_{k_s}(x_0, t) = k_s(x_0, t)$ by the construction of the function $\sigma_{k_s}$. + +$$ H_s(x, t) = \begin{cases} \rho_s(x, 2t), & \frac{1}{2} \le t \le 0, \\ \sigma_{k_s}(\rho_s(x, 1), 2t-1), & 1 \le t \le \frac{1}{2}, \end{cases} $$ +---PAGE_BREAK--- + +is obviously continuous and satisfies the conditions (1.2) of the homotopy; as +to the condition ii), we have $H_3(x_0, 0) = \rho_5(x_0, 0) = x_0$ from (1.4) ii) and +$H_3(x_0, 1) = \sigma_{k_8}(\rho_5(x_0, 1), 1) = \sigma_{k_5}(x_0, 1) = k_5(x_0, 1) = x_0$ from (1.4) iii). + +Since (1.2) i) is evidently satisfied from (1.4) i), the lemma has been proved. +Thus the multiplication in $U$ induces a multiplication in the set of the homotopy +classes ; $[\theta_1] \times [\theta_3] \equiv [\theta_1 \times \theta_2]$. + +**THEOREM 1.** By the multiplication defined above, all the homotopy classes of $U$ constitute a group $\mathfrak{A}(X)$ with $x_0$ as the base point. + +*Proof.* Let us prove that the multiplicatoin is associative. Let $\theta_1, \theta_2, \theta_3 \in U$, +then $([\theta_1] \times [\theta_2]) \times [\theta_3]$ and $[\theta_1] \times ([\theta_2] \times [\theta_3])$ are represented by mappings +$(\theta_1 \times \theta_2) \times \theta_3$ and $\theta_1 \times (\theta_2 \times \theta_3)$ respectively. By definition + +$$ +\begin{align*} +(\theta_1 \times \theta_2) \times \theta_3 (x, t) &= \begin{cases} +\theta_3 (\theta_2 (\theta_1 (x, 4t), 0), 0), & \frac{1}{2} \ge t \ge 0, x \in X, \\ +\theta_3 (\sigma_{\theta_2} (\theta_2 (\theta_1 (x, 1), 0), 4t-1), 0), & \frac{1}{2} \ge t \ge \frac{1}{4}, x \in X, \\ +\sigma_{\theta_3} (\theta_3 (\sigma_{\theta_2} (\theta_2 (\theta_1 (x, 1), 0), 1), 0), 2t-1), & 1 \ge t \ge \frac{3}{4}, x \in X, +\end{cases} +\\[1em] +\theta_4 \times (\theta_2 \times \theta_3) (x, t) &= \begin{cases} +(\theta_3 (\theta_2 (\theta_1 (x, 2t), 0), 0), & \frac{1}{2} \ge t \ge 0, x \in X, \\ +\sigma_{\theta_2 \times \theta_3} (\theta_3 (\theta_2 (\theta_1 (x, 1), 0), 0), 2t-1), & 1 \ge t \ge \frac{1}{4}, x \in X. +\end{cases} +\end{align*} +$$ + +As it is rather difficult to show directly the existence of homotopy between +($\theta_1 \times \theta_2$) $\times$ $\theta_3$ and $\theta_1 \times (\theta_2' \times \theta_3)$, we prove it by making use of the homotopy +extension property referred to above. From the relation above we have ($\theta_1 \times \theta_2$) +$\times \theta_5 (x, 0) = \theta_3 (\theta_5 (\theta_1 (x, 0), 0), 0) = \theta_1 \times (\theta_2 \times \theta_3) (x, 0)$, and from the property +of $\sigma_\theta$ we have + +$$ +(1.6) \quad (\theta_1 \times \theta_2) \times \theta_3(x_0, t) = \begin{cases} \theta_3(\theta_2(\theta_1(x_0, 4t), 0), 0), & \frac{1}{2} \ge t \ge 1 \\ \theta_3(\theta_2(x_0, 4t-1), 0), & \frac{1}{2} \le t \le 1 \\ \theta_3(x_0, 2t-1), & 1 \le t \le \frac{1}{2} \end{cases} +$$ + +Since $\sigma_{\theta_0 \times \theta_3}(\theta_3(\theta_2(\theta_1(x_0, 1), 0), 0), 2t-1) =: \sigma_{\theta_0 \times \theta_3}(x_0, 2t-1) =$ + +$$ +\begin{align*} +&\theta_{\delta}(0, 0), &&\frac{1}{2} \ge t \geq \frac{1}{2}, \\ +&\sigma_{\delta_{\delta}}(0, 0), &&\frac{1}{2} \ge t \geq 1, \\ +&\sigma_{\delta_{\delta}}(x_{\delta}, 4t - 3) &&= \sigma_{\delta_{\delta}}(x_{\delta}, 4t - 3) = \theta_{\delta}(x_{\delta}, 4t - 3), &&1 \ge t \ge \frac{3}{4}, +\end{align*} +$$ + +we have + +$$ +(1.7) \quad \theta_t \times (\theta_s \times \theta_d)(x_o, t) = \begin{cases} \theta_s(\theta_t(\theta_s(x_o, 2t), 0), 0), & \frac{1}{2} \ge t \ge 0, \\ \theta_s(\theta_t(\theta_s(x_o, 4t-2), 0)), & \frac{1}{4} \le t \le \frac{3}{2}, \\ \theta_s(x_o, 4t-3), & 1 \le t \le 1. \end{cases} +$$ + +From (1.6) and (1.7) there exists a homotopy $h(x, s, t)$ defined on $\{x_n\} \times I^s \times I^t$ +---PAGE_BREAK--- + +such that + +$$h(x_0, 0, t) = (\theta_1 \times \theta_2) \times \theta_3(x_0, t), \quad 1 \ge t \ge 0,$$ + +$$h(x_0, 1, t) = \theta_1 \times (\theta_2 \times \theta_3)(x_0, t), \quad 1 \ge t \ge 0,$$ + +$$h(x_0, s, 0) = h(x_0, s, 1) = x_0, \quad 1 \ge s \ge 0.$$ + +and + +Moreover putting + +$$h(x, 0, t) = (\theta_1 \times \theta_2) \times \theta_3 (x, t), \quad x \in X, \ 1 \ge t \ge 0,$$ + +$$h(x, 1, t) = \theta_1 \times (\theta_2 \times \theta_3)(x, t), \quad x \in X, \ 1 \ge t \ge 0,$$ + +$$h(x, s, 0) = \theta_3(\theta_2(\theta_1(x, 0), 0), 0), \quad x \in X, \ 1 \ge s \ge 0,$$ + +and + +$h$ is defined continuously on the set $\{(X \times \frac{s}{I} \times 0) \cup [(x_0 \times \frac{s}{I}) \cup (X \times 0) \cup (X \times 1)] \\ \times \frac{t}{I}\}$. Thus, if $\{(x_0 \times I) \cup (X \times 0) \cup (X \times 1)\}$ has a homotopy extension property in $X \times I$ relative to $X$, $h$ can be extended to a mapping $X \times \frac{s}{I} \times \frac{t}{I} \to X$, which gives a homotopy between $(\theta_1 \times \theta_2) \times \theta_3$ and $\theta_1 \times (\theta_2 \times \theta_3)$. + +Next we must prove the existence of the unity in $\mathfrak{A}(X)$. Let $\theta_0(x, t) = x$, then clearly $\theta_0 \in U$. For any $\theta \in U$ we have from the definition of multiplication + +$$ (\theta \times \theta_0)(x, t) = \begin{cases} \rho(x, 2t), & x \in X, \quad \frac{1}{2} \le t \le 0, \\ \sigma_{\theta_0}(\rho(x, 1), 2t-1), & x \in X, \quad 1 \le t \le \frac{1}{2}, \end{cases} $$ + +where $\rho(x, 2t) = \theta_0(\theta(x, 2t), 0) = \theta(x, 2t)$, and $\sigma_{\theta_0}(x, t) = x$ may be assumed. Since $\sigma_{\theta_0}(\rho(x, 1), 2t-1) = \rho(x, 1) = \theta_0(\theta(x, 1), 0) = \theta(x, 1)$ for $1 \le t \le \frac{1}{2}$, we have + +$$ (\theta \times \theta_0)(x, t) = \begin{cases} \theta(x, 2t), & x \in X, \quad \frac{1}{2} \le t \le 0, \\ \theta(x, 1), & x \in X, \quad 1 \le t \le \frac{1}{2}. \end{cases} $$ + +Let us define a homotopy $h_s(x, t)$ for $1 \le s \le 0$ as follows; + +$$ h_s(x, t) = \begin{cases} \theta\left(x, \frac{2t}{1+s}\right), & x \in X, \quad \frac{s+1}{2} \le t \le 0, \\ \theta(x, 1), & x \in X, \quad 1 \le t \le \frac{s+1}{2}, \end{cases} $$ + +then $h_s$ satisfies the conditions of the homotopy (1.2), so that $h_0 = \theta \times \theta_0$ and $h_1 = 0$. Thus $\theta_0$ represents the right side unity of the group $\mathfrak{A}(X)$. + +Lastly we proceed to show the existence of the inverse element of any element $[\theta] \in \mathfrak{A}(X)$. By the assumption on an element $\theta$ in $U$, we have $\theta |_{X \times 0} = 0$, so that $\theta |_{X \times 0}$ has a homotopy inverse $\varphi |_{\Omega^*}$. Now we define a mapping $\theta^{-1} : U$ as follows: if we put + +$$ +\begin{align*} +\theta^{-1}(x, 0) &= \varphi(x), && x \in X, \\ +\theta^{-1}(x_0, t) &= \varphi(\theta(x_0, 1-t)), && 1 \ge t \ge 0. +\end{align*} +$$ + +then $\theta^{-1}$ can be extended to a map: $X \times I \to X$ because of the homotopy +---PAGE_BREAK--- + +extension property of {$x_0$}. This extended map $\theta^{-1}$ is shown to represent the inverse of $[\theta]$. Indeed, we have + +$$ \theta \times \theta^{-1}(x, t) = \begin{cases} \rho(x, 2t), & \frac{1}{2} \le t \le 0, x \in X, \\ \sigma_{\theta^{-1}}(\rho(x, 1), 2t-1), & 1 \le t \le \frac{1}{2}, x \in X, \end{cases} $$ + +where $\rho(x, t) = \theta^{-1}(\theta(x, t), 0) = \varphi(\theta(x, t))$, $\sigma_{\theta^{-1}}(x, 0) = x$, and $\sigma_{\theta^{-1}}(x_0, t) = \theta^{-1}(x_0, t) = \varphi(\theta(x_0, 1-t))$. As $\varphi$ is a homotopy inverse of $\theta |_{X \times 0}$, and on the other hand $\sigma_{\theta^{-1}}|_{X_0 \times I}$ represents the inverse element of $[\rho |_{X_0 \times I}]$, we have a continuous function $h$ defined on $((X \times \overline{I} \times 0) \cup (X \times 0) \cup (X \times 1)) \cap (X_0 \times \overline{I}) \times \overline{I})$ such that + +$$ h(x, s, 0) = k(x, s), \quad x \in X, s \in \overline{I}, $$ + +$$ h(x_0, s, t) = l(s, t), \quad s \in \overline{I}, t \in \overline{I}, $$ + +$$ h(x, 0, t) = \theta \times \theta^{-1}(x, t), \quad x \in X, t \in \overline{I}, $$ + +$$ h(x, 1, t) = x, \quad x \in X, t \in \overline{I}, $$ + +where $k$ is a homotopy obtained by the relation $\varphi\theta \sim 1$, and $l$ is also a homotopy whose existence is assured by $\rho(x_0, 1-t) = \sigma_{\theta^{-1}}(x_0, t)$. Again, by the aid of a homotopy extension property of $((x_0 \times I) \cup (X \times 0) \cup (X \times 1)))$, $h$ can be extended to a map $X \times I \times I \to X$, which gives a desired homotopy. This completes the proof. + +In order to clarify the conditions preassigned to the space $X$ we put down here all the homotopy extension properties assumed in the arguments of the above Theorem; + +i) $\{x_0\}$ has a homotopy extension property in $X$ relative to $X$, + +(1.8) ii) $\{(x_0 \times I) \cup (X \times 0) \cup (X \times 1)\}$ has a homotopy extension property in $X \times I$ relative to $X$. + +These assumptions are, of course, satisfied by a polyhedron. + +## § 2. A group of automorphisms $\Sigma(X)$ and the structure of $\mathfrak{A}(X)$. + +Now we define a group $\Sigma(X)$, which operates on $\pi_n(X)$, as we shall see later, as a group of automorphisms, and study a homomorphism of $\mathfrak{A}(X)$ onto $\Sigma(X)$, the kernel of which is isomorphic to the fundamental group $\pi_1(X)$ of $X$. + +Let us define a homotopy concept in $\Omega^*$ in the following sense: we shall write $a \sim b$ for $a, b \in \Omega^*$ if there exists a homotopy $h_t \in \Omega$ ($1 \le t \le 0$) such that $h_0 = a$ and $h_1 = b$. Then $\Omega^*$ is divided into homotopy classes. Let us denote by $\Sigma(X)$ the set of all the homotopy classes. For two maps $a, b \in \Omega^*$ we define $(a \times b)(x) = b(a(x))$ for any $x \in X$. Then $a \times b \in \Omega^*$ because $a \times b \in \Omega$ follows immediately from the definition and, if $\varphi$ and $\psi$ are homotopy inverses of $a$ +---PAGE_BREAK--- + +and $b$ respectively, $\psi \times \varphi \in \Omega^*$ is a homotopy inverse of $a \times b$. Furthermore, if $a \sim a'$ and $a \sim b'$, $a \times b \sim a' \times b'$. Thus the multiplication in $\Omega^*$ induces a multiplication in $\Sigma(X)$. + +**THEOREM 2.** $\Sigma(X)$ constitutes a group. + +*Proof.* It is evident from the definition of multiplication that the associative law holds. As to the existence of unity, let $E$ be a class containing the identity transformation of $X$, then $E \cdot A = A$ and $A \cdot E = A$ for any $A \in \Sigma(X)$. Lastly for any $A = [a]$ we choose $A^{-1} = [\varphi]$ containing a homotopy inverse $\varphi$ of $a$. Then $AA^{-1} = E$ and $A^{-1}A = E$ is clear from the definition of homotopy inverse. + +**THEOREM 3.** $\Sigma(X)$ operates on the *n*-th homotopy group $\pi_n(X, x_0)$, for every integer $n \ge 1$, as a group of automorphisms. + +*Proof.* Let $f$ be a representative of an element $\alpha$ of $\pi_n(X)$ and let $\alpha$ be a representative of $A \in \Sigma(X)$. Let us take the mapping $\text{af}: S^n \to X$ as a representative of $A\alpha$. The correspondence $A; \alpha \to A\alpha$ is a transformation of $\pi_n(X)$ into itself because, if $f'$ is another representative of $\alpha$, we have $\text{af} \sim \text{af}'$, and if $\alpha'$ is another representative of $A$, we have also $\text{af} \sim \alpha'f$. Then it is easily proved that this correspondence is an automorphism of $\pi_n(X)$. + +*Example of $\Sigma(X)$:* + +Let $X$ be an $n$-sphere $S^n$, then from the concept of Brouwer's degree we have $\Sigma(S^n) = \{E = [1], A = [-1]\}$ where $E$ is a class containing the identity transformation and $A$ is a class containing a mapping of degree $-1$. Since clearly $A^2 = A \cdot A = E$, the group is a cyclic group of order 2. + +Now we intend to define a homomorphism $\varphi$ of $\mathfrak{A}(X)$ onto $\Sigma(X)$. Let $\theta \in U$ be a representative of an element of $\mathfrak{A}(X)$, then $a_\theta = \theta | X \times 0$ represents an element of $\Sigma(X)$. From the homotopy concepts given in §1 and §2, it is obvious that if $\theta \sim \theta'$, we have $a_\theta \sim a_{\theta'}$. By the correspondence $\varphi: [\theta] \to [a_\theta]$ we have the following theorem. + +**THEOREM 4.** $\varphi$ is a homomorphism of $\mathfrak{A}(X)$ onto $\Sigma(X)$, the kernel of which is isomorphic to the fundamental group $\pi_1(X)$. + +*Proof.* For two elements $[\theta_1], [\theta_2] \in \mathfrak{A}(X)$, we have $\varphi([\theta_1]) = [a_{\theta_1}]$ and $\varphi([\theta_2]) = [a_{\theta_2}]$. By definition $\varphi([\theta_1] \times [\theta_2]) = \varphi([\theta_1 \times \theta_2])$ may be represented by a mapping $\theta_1 \times \theta_2 | X \times 0 = \rho(x, 0) = \theta_2(\theta_1(x, 0), 0)$, so that $\theta_1 \times \theta_2 | X \times 0 = a_{\theta_1} \times a_{\theta_2}$. Thus $\varphi([\theta_1] \times [\theta_2]) = \varphi([\theta_1]) \times \varphi([\theta_2])$ is proved. Clearly $\varphi$ is an onto-homomorphism from the definition of the group. + +Lastly, in order to complete the proof it is sufficient to prove that the kernel of $\varphi$ is isomorphic to $\pi_1(X)$. If $\varphi([\theta]) = [a_\theta]$ is unity, we may take without loss of generality a representative $\theta$ of $[\theta]$ as follows : +---PAGE_BREAK--- + +$$ (2.1) \qquad \left. \begin{array}{l} \text{i)} \quad \theta: X \times I \to X, \\ \text{ii)} \quad \vartheta(x, 0) = x, \\ \text{iii)} \quad \vartheta(x_{\theta}, 1) = x_0, \end{array} \right\} $$ + +for (1.8) is assumed. To any element $[\theta]$ belonging to the kernel of $\varphi$ let there correspond an element $[\xi_0]$ of the fundamental group $\pi_1(X)$ by the rule, + +$$ (2.2) \qquad \xi_0(t) = \theta(x_0, t). $$ + +This correspondence $\lambda$ has a definite meaning because, if $\theta \sim \theta'$, $\xi_0$ and $\xi_0'$ represent the same element of $\pi_1(X)$. Let us prove that $\lambda$ is an isomorphism. Let $[\theta_1], [\theta_2]$ be two elements belonging to the kernel of $\varphi$, then $[\theta_1] \times [\theta_2]$ is represented by a map $\theta_1 \times \theta_2$, + +$$ \theta_1 \times \theta_2(x, t) = \begin{cases} \theta_3(\theta_1(x, 2t), 0), & 1 \le t \le 0, x \in X, \\ \sigma_{\theta_2}(\theta_3(\theta_1(x, 1), 0), 2t-1), & 1 \le t \le \frac{1}{2}, x \in X. \end{cases} $$ + +Since from (2.1) we have $\theta_2(x, 0) = x$, $\theta_2(\theta_1(x, 2t), 0) = \theta_1(x, 2t)$ and $\sigma_{\theta_2}(\theta_2(\theta_1(x, 1), 0), 2t-1) = \sigma_{\theta_2}(\theta_1(x, 1), 2t-1)$ so that by (2.2) + +$$ \hat{\xi}_{\theta_1 \times \theta_2}(t) = \begin{cases} \theta_1(x_{\theta_1}, 2t), & \frac{1}{2} \le t \le 0, \\ \sigma_{\theta_2}(\theta_1(x_{\theta_2}, 1), 2t-1), & 1 \le t \le \frac{1}{2}. \end{cases} $$ + +Since $\theta_1(x_0, 1) = x_0$ and $\sigma_{\theta_2}(x_0, t) = \theta_1(x_0, t)$, we have $\sigma_{\theta_2}(\theta_1(x_0, 1), 2t-1) = \theta_2(x_0, 2t-1)$. Now $\xi_{\theta_1 \times \theta_2}(t)$ may be described as follows: + +$$ \hat{\xi}_{\theta_1 \times \theta_2}(t) = \begin{cases} \theta_1(x_0, 2t), & \frac{1}{2} \le t \le 0, \\ \theta_2(x_0, 2t-1), & 1 \le t \le \frac{1}{2}. \end{cases} $$ + +On the other hand, we have, by the definition of the fundamental group, + +$$ \lambda([\theta_1] \times [\theta_2]) = [\hat{\xi}_{\theta_1 \times \theta_2}] = [\hat{\xi}_{\theta_1}] \circ [\hat{\xi}_{\theta_2}] = \lambda[\theta_1] \circ \lambda[\theta_2], $$ + +so that the homomorphism is established. + +Clearly $\lambda$ is an onto-homomorphism, because of the homotopy extension property (1.3) i). It remains only to prove that from $\xi_{\theta_1} \sim \xi_{\theta_2}$ follows $\theta_1 \sim \theta_2$. It may be assumed that $\theta_1(x, 0) = x$ and $\theta_2(x, 0) = 0$. Since $\xi_{\theta_1} = \xi_{\theta_2}$, a homotopy $h_s(t)$ ($1 \le s \le 0$) exists such that $h_0(t) = \theta_1(x_0, t)$, $h_1(t) = \theta_2(x_0, t)$ and $h_s(0) = h_s(1) = x_0$. A continuous function $h$ may be defined on the set $\{(X \times I)^s (0)^\tau [(X \times 0)^\tau (X \times 1)^\tau (x_0 \times I)] \times I\}$ as follows: + +$$ h(x, s, 0) = x, \quad x \in X, s \in I^s, \\ h(x, 0, t) = \theta_1(x, t), \quad x \in X, t \in I^t, \\ h(x, 1, t) = \theta_2(x, t), \quad x \in X, t \in I^t, \\ h(x_0, s, t) = h_s(t), \quad s \in I^s, t \in I^t. $$ + +If (1.3) ii) is assumed, it is proved by the aid of the extended map $h: X \times I^s \times I^t$ +---PAGE_BREAK--- + +→ X that $\theta_1$ is homotopic to $\theta_2$. This completes the proof. + +### § 3. Operation of $\mathfrak{A}(X)$ on the homotopy groups. + +Let $f$ be a representative of an element $\alpha \in \pi_n(X)$ and $\theta$ be a representative of an element $\vartheta \in \mathfrak{A}(X)$. Let us define $\vartheta\alpha = [h] \in \pi_n(X)$ by the rule, + +$$ (3.1) \qquad h(x) \equiv \theta(f(x), 1). $$ + +This definition has a definite meaning in the sense that $[h]$ depends only on $\alpha$ and $\vartheta$. Then we have, + +**THEOREM 5.** $\vartheta\alpha = (A\alpha)^{\xi}$ where $A = \varphi(\vartheta) \in \Sigma(X)$ and $\xi$ is an element of $\pi_1(X)$ represented by $\theta(x_0, t)$ ($1 \ge t \ge 0$). + +*Proof.* From the definition of homomorphism $\varphi$, $A$ is represented by $a_0(x) = \theta(x, 0)$, and therefore $\theta(f(x), 0) = a_0f(x)$. It is an immediate consequence of the operation of $A$ that $a_0f$ represents an element $A\alpha$ of $\pi_n(X)$. Moreover if $f(p) = x_0$ for a fixed point $p \in S^n$, $\theta(f(p), t) = \theta(x_0, t)$ represents an element $\xi$ of $\pi_1(X)$, so that according to the operation of $\pi_1$ on $\pi_n$ due to Eilenberg $h(x) = \theta(f(x), 1)$ represents an element $(A\alpha)^{\xi} \in \pi_n$. This completes the proof. + +As a direct consequence of Theorem 5 we have, + +**THEOREM 6.** $\mathfrak{A}(X)$ is a group of automorphisms of $\pi_n(X)$ for every integer $n \ge 1$. + +*Proof.* Because of the combination of automorphisms $A$ and $\xi$, the operation of $\vartheta \in \mathfrak{A}(X)$ on $\pi_n$ is also an automorphism of $\pi_n(X)$. + +### § 4. Algebraic construction of $\mathfrak{A}(X)$. + +Now that the operation of $\mathfrak{A}(X)$ on $\pi_n$ has been clarified by Theorem 5, we can construct the group $\mathfrak{A}(X)$ from a purely algebraic standpoint. Let $\chi(X) = \{(A, \xi)\}; A \in \Sigma(X), \xi \in \pi_1(X)\}$; the totality of all the ordered pairs consisting of an arbitrarily chosen element of $\Sigma(X)$ and of an arbitrarily chosen element of $\pi_1(X)$. Defining $(A, \xi)(\alpha) = (A\alpha)^{\xi}$ for any $\alpha \in \pi_n(X)$, $(A, \xi)$ operates on $\pi_n(X)$, for every integer $\pi \ge 1$, as an automorphism. If we define a multiplication in the set $\chi(X)$ of automorphisms just defined by the rule, + +$$ (B, \eta)(A, \xi)(\alpha) = (B, \eta)((A, \xi)(\alpha)), $$ + +then we have $(B, \eta)(A, \xi) \in \chi(X)$. In order to prove this, we need the following lemma. + +**LEMMA 4.1** $A(\alpha^{\xi}) = (A\alpha)^{\xi} = (A, A_{\xi})(\alpha)$ for any $\alpha \in \pi_n$, where $A_{\xi}$ can be interpreted in the sense that $\Sigma(X) \ni A$ operates on the homotopy group of any dimension, especially on the fundamental group too. + +*Proof.* Let $\alpha$ be represented by a mapping $f: S^n \to X, S^n \ni p_0 \to x_0$ and let +---PAGE_BREAK--- + +ξ = [e(t), 1 ≡ t ≡ 0]. We have a mapping F : {Sⁿ × (0) ⌣ (p₀) × I} → X such that F(x, 0) ≅ f(x) for any x ∈ Sⁿ, and F(p₀, t) ≅ e(t). From the homotopy extension property of a polyhedron we have an extended map $\bar{F}: S^n \times I \to X$ of F. Since $\bar{F}(x, 0) = f(x)$ and $\bar{F}(p_0, t) = e(t)$, $\bar{F}(x, 1)$ represents an element $a^t \in \pi_n(X)$. Let a be a representative of A. Putting $a(\bar{F}(x, t)) \equiv G(x, t): S^n \times I \to X$ we have $[G(x, 0)] = A\alpha$ from $G(x, 0) = a(f(x))$ and $[G(x, 1)] = A(\alpha^t)$ from $G(x, 1) = a(\bar{F}(x, 1))$. Also, from $G(x_0, t) = a(e(t))$ follows $[G(x_0, t)] = A\xi$. Thus we have $A(\alpha^t) = (A\alpha)^{A^t}$. Making use of the lemma, we have + +$$ +\begin{align*} +(B, \eta)(A, \xi)(\alpha) &\equiv (B, \eta)((A, \xi)(\alpha)) = (B, \eta)((A\alpha)^{\eta}) \\ +&= (B((A\alpha)^{\eta}))^{\eta} \\ +&= ((B(A\alpha))^{B\eta})^{\eta} \\ +&= (B(A\alpha))^{B\eta\cdot\eta} \equiv (A \cdot B, B\xi \cdot \eta)(\alpha). +\end{align*} +$$ + +Thus +$(B, \eta)(A, \xi) = (A \cdot B, B\xi \cdot \eta) \in \chi(X).$ + +**THEOREM 7.** By this multiplication $\chi(X)$ forms a group. + +*Proof.* As to the associative law we have + +$$ +\begin{align*} +(C, \zeta)(B, \eta)(A, \xi) &= (C, \zeta)(AB, B\xi \cdot \eta) \\ +&= (AB \cdot C, C(B\xi \cdot \eta) \cdot \zeta) \\ +&= (ABC, BC\xi \cdot C\eta \cdot \zeta) +\end{align*} +$$ + +$$ +\begin{align*} +((C, \zeta)(B, \eta))(A, \xi) &= (BC, C\eta \cdot \zeta)(A, \xi) \\ +&= (A \cdot BC, BC\xi(C\eta \cdot \zeta)) \\ +&= (ABC, BC\xi \cdot C\eta \cdot \zeta) +\end{align*} +$$ + +Thus +$$ +(C, \zeta)((B, \eta)(A, \xi)) = ((C, \zeta)(B, \eta))(A, \xi) +$$ + +The existence of the unity is proved as follows : + +($E$, $e$)(A, $\xi$) = ($AE$, $E\xi \cdot e$) = (A, $\xi$) where E, e are the unities of $\Sigma(X)$ and $\pi_1(X)$ respectively. + +The existence of an inverse element is proved thus : + +$$ +(A^{-1}, A^{-1}\xi^{-1})(A, \xi) = (AA^{-1}, A^{-1}\xi \cdot A^{-1}\xi^{-1}) = (E, A^{-1}(\xi\xi^{-1})) = (E, e). +$$ + +This completes the proof. + +Now the following MAIN THEOREM concerning the relation of two groups $\mathfrak{A}(X)$ and $\chi(X)$ imparts the complete analysis to the structure of $\mathfrak{A}(X)$ and also to the operation of $\mathfrak{A}(X)$ on $\pi_n(X)$ for every integer $n \ge 1$. + +**MAIN THEOREM 8.** $\mathfrak{A}(x)$ is isomorphic to the group $\chi(X)$. Moreover, an isomorphism can be established between these groups, preserving the operation on the homotopy groups. + +*Proof.* The method of proof being analogous as for Theorems 4, 5, we shall +---PAGE_BREAK--- + +restrict ourselves to show the correspondence between two groups. Let $\theta$ be a representative of $\partial \mathfrak{U}(X)$ and let $a_0 = \theta | X \times 0, \xi_0 = \theta | x_0 \times I$. Then to $\partial$ let there correspond $([a_0], [\xi_0]) \in \chi(X)$. It can be shown that this correspondence is an isomorphism and that the operations of $\partial$ and of the corresponding element $([a_0], [\xi_0])$ on $\pi_n$ are the same. + +§ 5. Some remarks on the group $\mathfrak{U}(X)$. + +By the aid of the main theorem it is advantageous to use $\chi(X)$ in place of $\mathfrak{U}(X)$ in calculating the invariant $\mathfrak{U}(X)$ of the space $X$. As is easily seen, two distinct elements of $\chi(X)$ do not always operate differently on $\pi_n$ so that as the group of the operation on $\pi_n$, $\chi(X)$ may be reduced to a smaller group. This reduction gives rise to an analogous classification of the space $X$ as the simplicity of a space due to Eilenberg. + +Let $\chi^*(X)$ be the totality of all elements in $\chi(X)$ whose operations on any element of $\pi_n(X)$ are trivial; i.e. $\chi^*(X) = \{(A, \xi) ; (A, \xi)(\alpha) = \alpha$ for any element $\alpha \in \pi_n(X)\}$. Then $\chi^*(X)$ is clearly a normal subgroup of $\chi(X)$. Similarly, put $\chi^{**}(X) = \{(A, e) ; (A, e)(\alpha) = \alpha$ for any $\alpha \in \pi_n(X)\}$ and $\chi^{***}(X) = \{(E, \xi) ; (E, \xi)(\alpha) = \alpha$ for any $\alpha \in \pi_n(X)\}$, then these two groups are also normal in $\Sigma(X)$ and $\pi_1(X)$ respectively as well as in $\chi(X)$. It is well known that the space is $n$-simple in the sense of Eilenberg if $\chi^{***}(X) \cong \pi_1(X)$. It may be an interesting problem to consider the spaces satisfying the conditions such as $\chi^*(X) = \chi(X)$ or $\chi^{**}(X) \cong \Sigma(X)$. + +BIBLIOGRAPHY + +[1] Eilenberg, S., On the relation between the fundamental group of a space and higher homotopy groups, Fundamenta Math. 22 (1939). + +[2] Hu, S. T., On the Whitehead Group of automorphisms of the relative homotopy groups, Portugaliae Math. 7 (1948). \ No newline at end of file diff --git a/samples_new/texts_merged/213815.md b/samples_new/texts_merged/213815.md new file mode 100644 index 0000000000000000000000000000000000000000..e1424766a2e4e6d709f1bb4b1ecef5aa4ee97e00 --- /dev/null +++ b/samples_new/texts_merged/213815.md @@ -0,0 +1,271 @@ + +---PAGE_BREAK--- + +Design and Performance of a 24 GHz Band FM-CW +Radar System and Its Application + +Kazuhiro Yamaguchi\*, Mitsumasa Saito\†, Kohei Miyasaka\* and Hideaki Matsue\* + +\* Tokyo University of Science, Suwa + +‡ CQ-S net Inc., Japan + +Email: yamaguchi@rs.tus.ac.jp, matsue@rs.suwa.tus.ac.jp, saitoh@kpe.biglobe.ne.jp + +*Abstract*—This paper describes a design and performance of a FM-CW (Frequency Modulated Continuous Wave) radar system using 24 GHz band. The principle for measuring the distance and the small displacement of target object is described, and the differential detection method for detecting the only target is proposed under the environments which multiple objects are located. In computer simulation, the basic performance of FM-CW radar system is analyzed about the distance resolution and error value according to the various sampling time and sweep bandwidth. Furthermore, the FM-CW radar system with the proposed differential detection method can clearly detect only the target object under the multiple object environment, and the small displacement within 3.11 mm can be measured. In experiment, the performance about measuring the distance and displacement is described by using the designed 24 GHz FM-CW radar system. As the results, it is confirmed that 24 GHz FM-CW radar system with the proposed differential detection method is effective for measuring target under the environments which multiple objects are located. + +Fig. 1. Sawtooth frequency modulation. + +I. INTRODUCTION + +Radar systems with 24 GHz band is based on ARIB standard T73 [1] as sensors for detecting or measuring mobile objects for specified low power radio station. And the 24 GHz band radar system can be applied in various field such as security, medical imaging and so on under indoor and outdoor environments. There are various radar systems have been proposed [2], [3], [4], [5]. The pulsed radar system measures the period between the signal is transmitted and received. The pulsed radar can detect the distance in far field, however, the target in near field can not be detected correctly. The Doppler radar system measures the frequency difference between the reflected and transmitted signals. The Doppler radar can detect the moving velocity of the target, however, the distance of the target can not be detected. The FM-CW (Frequency-Modulated Continuous-Wave) radar system [6], [7] is the most widely used for detecting the distance of the target object in near field and the small displacement of the target. + +In this paper, we used and developed the 24 GHz FM-CW radar system for measuring the distance and displacement of an object when the object is static or moves very slowly. The basic performance of the 24 GHz FM-CW radar system for measuring a target object is analyzed by using the computer simulation. Moreover, we proposed the differential detection method for signal processing in the FM-CW radar system in order to detect only the target object under the environments which multiple objects are located. Furthermore, an example of application with the 24 GHz FM-CW radar system is shown in experiment. + +This paper consists of the following sections. Section II describes the principle of a FM-CW radar system. Section III describes and analyses the basic performance and the proposed differential detection method in computer simulation. Section IV shows the experimental results with 24 GHz FM-CW radar system. Finally, Section V concludes this paper. + +II. PRINCIPLE FOR FMCW RADAR + +FM-CW (Frequency-Modulated Continuous-Wave) radar +is a radar transmitting a continuous carrier modulated by a +periodic function such as a sawtooth wave to provide range +data shown in Fig. 1. Fig. 2 shows the block diagram of a +FM-CW radar system [8]. + +In the FM-CW radar system, frequency modulated signal +at the VCO is transmitted from the transmitter Tx, then signals +reflected from the targets are received at the receiver Rx. +Transmitted and received signals are multiplied by a mixer, and +beat signals are generated as multiplying the two signals. The +beat signal pass through a low pass filter, then an output signal +is obtained. In this process, the frequency of the input signal +is varied with time at the VCO. The modulation waveform +with a linear sawtooth pattern [9] as shown in Fig. 1. This +figure illustrates frequency-time relation in the FM-CW radar, +and the red line denotes the transmitted signal and the blue +line denotes the received signal. Here, f₀ denotes the center +frequency, fₛ denotes the frequency bandwidth for sweep, and +tₛ denotes the period for sweep. + +We define that the transmitting signal $V_T(f, x)$ at the +transmitter Tx in Fig. 2 is represented as + +$$ +V_{\mathrm{T}}(f,x)=A e^{j \frac{2 \pi f}{c} x}, +\quad(1) +$$ +---PAGE_BREAK--- + +Fig. 2. Block diagram of a FM-CW radar system. + +where *f* denotes a frequency at a time, *x* denotes a distance between a target and the transmitter, *A* denotes an amplitude value and *c* denotes the speed of light. + +The reflected signal $V_R(f, x)$ at the receiver Rx in Fig. 2 is represented as + +$$ V_R(f, x) = \sum_{k=1}^{K} A \alpha_k \gamma_k e^{j \varphi_k} e^{j \frac{2\pi f}{c} (2d_k - x)} , \quad (2) $$ + +where $\gamma_k$ and $\varphi_k$ are the reflectivity coefficients for amplitude and phase on kth target, respectively. $\alpha_k$, denotes amplitude coefficient for transmission loss from kth target, and $d_k$ is the distance between the transmitter and the kth target. + +Here, at the receiver whose position is $x = 0$, Eq. (2) is rewritten as + +$$ V_R(f, 0) = \sum_{k=1}^{K} A \alpha_k \gamma_k e^{j \varphi_k} e^{j \frac{2\pi f}{c} (2d_k)} . \quad (3) $$ + +The beat signal are generated as multiplying the transmitted signal in Eq. (1) and the received signal in Eq. (3) at the position $x = 0$. After LPF, the output signal $V_{\text{out}}(f, 0)$ is generated by + +$$ V_{\text{out}}(f, 0) = \sum_{k=1}^{K} A^2 \alpha_k \gamma_k e^{j \varphi_k} e^{j \frac{4\pi f d_k}{c}} . \quad (4) $$ + +By using signal processing, a distance and a displacement for the target are given from the generated output signal in Eq. (4). By using the Fourier transform, the distance spectrum of the output signal $P(x)$ is calculated as follows. + +$$ +\begin{align} +P(x) &= \int_{f_0 - \frac{f_w}{2}}^{f_0 + \frac{f_w}{2}} V_{\text{out}} e^{-j \frac{4\pi f}{c} x} df \nonumber \\ +&= \int_{f_0 - \frac{f_w}{2}}^{f_0 + \frac{f_w}{2}} \sum_{k=1}^{K} A^2 \alpha_k \gamma_k e^{j \varphi_k} e^{j \frac{4\pi f d_k}{c}} e^{-j \frac{4\pi f x}{c}} df \nonumber \\ +&= A^2 \sum_{k=1}^{K} \alpha_k \gamma_k e^{j \varphi_k} \int_{f_0 - \frac{f_w}{2}}^{f_0 + \frac{f_w}{2}} e^{j \frac{4\pi f (d_k - x)}{c}} df \nonumber \\ +&= A^2 \sum_{k=1}^{K} \alpha_k \gamma_k e^{j \varphi_k} e^{j \frac{4\pi f_0 (d_k - x)}{c}} f_w \frac{\sin\left\{\frac{2\pi f_w (d_k - x)}{c}\right\}}{\frac{2\pi f_w (d_k - x)}{c}} . \tag{5} +\end{align} +$$ + +The amplitude value of the distance spectrum $|P(x)|$ in Eq. (5) is given as + +$$ +\begin{aligned} +|P(x)| &= A^2 \left| \sum_{k=1}^{K} \alpha_k \gamma_k e^{j \varphi_k} e^{j \frac{4\pi f_0 (d_k - x)}{c}} f_w \frac{\sin\left\{\frac{2\pi f_w (d_k - x)}{c}\right\}}{\frac{2\pi f_w (d_k - x)}{c}} \right| \\ +&\leq A^2 f_w \sum_{k=1}^{K} \alpha_k \gamma_k \left| \frac{\sin\left\{\frac{2\pi f_w (d_k - x)}{c}\right\}}{\frac{2\pi f_w (d_k - x)}{c}} \right|, \quad (6) +\end{aligned} +$$ + +and we have equality if and only if the phase components $\phi_k + \frac{4\pi f_0 (d_k - x)}{c}$ about all of $k$ are equal. + +Here, we assumed that the number of target is 1. The distance spectrum in Eq. (5) is rewritten as + +$$ P(x) = A^2 \alpha_1 \gamma_1 e^{j \varphi_1} e^{j \frac{4\pi f_0 (d_1 - x)}{c}} f_w \frac{\sin\left\{\frac{2\pi f_w (d_1 - x)}{c}\right\}}{\frac{2\pi f_w (d_1 - x)}{c}}, \quad (7) $$ + +and the amplitude value of distance spectrum is given as + +$$ |P(x)| = A^2 \alpha_1 \gamma_1 f_w \left| \frac{\sin\left\{\frac{2\pi f_w (d_1-x)}{c}\right\}}{\frac{2\pi f_w (d_1-x)}{c}} \right|. \quad (8) $$ + +This equation indicates that the distance for the target is generated by the amplitude value of distance spectrum. + +The phase value of distance spectrum $\angle P(x)$ is represented as + +$$ \angle P(x) = \varphi_1 + \frac{4\pi f_0 (d_1 - x)}{c} = \theta_1(x) . \quad (9) $$ + +Here, $\theta_1(x)$ satisfy $-\pi \leq \theta_1(x) \leq \pi$, then the displacement for the target is + +$$ -\frac{c(-\pi - \varphi_1)}{4\pi f_0} \leq d_1 \leq \frac{c(\pi - \varphi_1)}{4\pi f_0} . \quad (10) $$ + +If the phase value satisfies $\phi_1 = 0$, Eq. (10) is rewritten as $-3.11 [\text{mm}] \leq d_1 \leq +3.11 [\text{mm}]$ with $f_0 = 24.15 [\text{GHz}]$. That is, the small displacement of the target within $\pm 3.11 [\text{mm}]$ is generated by the phase value of distance spectrum. +---PAGE_BREAK--- + +TABLE I. PARAMETERS IN COMPUTER SIMULATIONS + +
ParametersValue
Center frequency24.15 GHz
Bandwidth50, 100, 200, 400 MHz
Sweep time1024 µs
Sampling time of sweep0.1, 1, 10 µs
Number of FFT points4096
Window functionhamming
+ +Fig. 3. Resolution for distance spectrum according to sweep bandwidth. + +On the other hands, the maximum distance for measuring +$d_{\max}$ is + +$$ +\begin{aligned} +\Delta f &= \frac{f_w}{t_w/t_s} [\text{Hz}] \, , \\ +d_{\max} &= \frac{c}{4\Delta f} [\text{m}] \, , +\end{aligned} +\quad (11) $$ + +where $t_w$ denotes the sweep time, $t_s$ denotes the interval time for sampling. For example, in the case with $t_w = 1024$ [µs] and $t_s = 1$ [µs], the maximum distance is $d_{\max} = 384$ [m]. + +III. COMPUTER SIMULATION + +A. Basic Performance + +At first, we describe the basic performance about the FM-CW radar with 24 GHz band. Parameters for computer simulation are listed in Table I. Center frequency is 24.15 GHz, bandwidth are 50, 100, 200, and 400 MHz. Note that the 400 MHz bandwidth is only used for the computer simulation because of standards in the Radio Law in Japan. Sweep time is 1024 µs, sampling times of sweep are 0.1, 1, 10 µs, number of FFT points is 4096, and the hamming windows is adapted as the window function in signal processing. + +We assumed that a static target is located at 10 m from the transmitter and receiver, and the distance spectrums are outputted with various parameters. Fig. 3 shows the amplitude value for distance spectrum versus measured distance with various sweep bandwidth. The result shows that the sweep bandwidth influences the distance resolutions and widely bandwidth can improve the resolution. In the case with $t_s = 1$ µs, the distance resolutions with $f_w = 50, 100, 200, 400$ MHz are ±5, ±1.5, ±1, ±0.5 m, respectively. Fig. 4 shows the amplitude value for distance spectrum versus measured distance with various sampling time. The result shows that + +Fig. 4. Error value for distance spectrum according to sampling interval. + +Fig. 5. Distance spectrum for measuring moving target. + +the sampling interval influences the error about the measured distance and shortly sampling interval can reduce the error value for distance. In the case with $f_w = 200$ MHz, the error values about the measured distance with $t_s = 10$ µs is about 0.5 m. + +Fig. 5 shows the result for measuring a slowly moving target with $f_w = 200$ MHz and $t_s = 1$ µs. The target moved from 10 m to 20 m at intervals of 0.5 m. Fig. 5(a) shows +---PAGE_BREAK--- + +Fig. 6. Measured displacement. + +the amplitude value versus measured distance versus target distance with 3-dimensional viewing, and Fig. 5(b) shows measured distance versus target distance with 2-dimensional viewing. The color in (b) is corresponding to the strength of the amplitude value in (a). From these figures, it is confirmed that the distance can be measured correctly according to the positions of the moving target. + +Fig. 6 shows the result for measuring a target with small displacement, and the measured displacement versus target displacement is outputted. The object is located at 10 m from the receiver, and the object moved from -5 mm to 5 mm at intervals of 0.1 mm. The small displacement can be measured by the phase value of distance spectrum, and the measured displacement is corresponding to the target displacement. Note that the measured displacement denotes the relative displacement and it is not corresponding to the absolute distance between the receiver and the target object. The small displacement within ±3.11 mm is correctly measured with the parameters of the FM-CW radar system in this paper, however, the displacement more than ±3.11 mm has uncertainty. + +## B. Proposed target detection + +As mentioned in the above section, the FM-CW radar system can measure the distance and the small displacement for 1 target object. However, it is a special case that only the reflected signal on a target can be received at the receiver. In general, the receiver may receive the reflected signals from many objects. Therefore, when there is some objects for measuring the target distance, signal processing for detecting the distance spectrum from the only target is required. + +The proposed method removes the signals from the other objects by using the differential detection of distance spectrum. Fig. 7 shows the distance spectrum when the target object moves from 10 m to 20 m and the other objects are located at 15 m and 20 m. The transmitted signal is reflected on the target and the other objects, the receiver receives several reflected signals. Therefore, the distance spectrum of the other objects are also generated by the FM-CW radar system in Fig. 7(a), and the distance spectrum of the target can not be detected clearly. In particular, when the reflection coefficient of the target is lower than that of the other objects, the distance spectrum of the other object has higher amplitude value than that of the target. + +Fig. 7. Distance spectrum for measuring moving target distance with / without the differential detection under the environments which multiple objects are located. + +In the proposed differential detection, at first, the distance spectrum of the other objects $P_0$ is generated beforehand in Fig. 7(a). Then, the distance spectrum of the target and the other object $P$ is subtracted by $P_0$. By using the differential detection, distance spectrum removed the distance spectrum of the other targets is generated as $P-P_0$. Therefore, the distance spectrum of the desired target is only detected. Fig. 7(b) shows the distance spectrum by using the proposed differential detection method, and the distance spectrum of the target is correctly measured. As compared with the measured distance spectrums in Fig. 7(a) and (b), it is clearly confirmed that the proposed method can detect target distance by using the difference detection. The proposed differential detection can effectively detect the moving or static target distance from multiple reflections of the background static objects. + +# IV. EXPERIMENTS + +In order to evaluate the effectiveness of the proposed method for detecting the target distance and displacement, we develop a FM-CW radar system and carried out the experiments with the radar system in actual environment. Table II lists the parameters, and the developed FM-CW radar system get a certificate of conformity with technical regulations in +---PAGE_BREAK--- + +TABLE II. PARAMETERS IN EXPERIMENTS + +
ParametersValue
Center frequency f024.15 GHz
Sweep bandwidth fw200 MHz
Sweep time tw1024 μs
Sampling time of sweep ts1 μs
Transmitter power output0.007 W
Antenna gain11 dBi
Range of distance0 - 100 m
Range of relative displacement±3.11 mm
+ +Fig. 8. Distance spectrum for measuring moving target distance with / without the differential detection. + +Article 38-6 Paragraph 1 of the Radio Law in Japan, and developed FM-CW radar system is accommodate to ARIB standard T73 in Japan [1]. + +## A. Distance Spectrum + +Fig. 8 shows the distance spectrum of a moving target. A person walked away from the FM-CW radar and then came close between 2 [m] to 10 [m]. In Fig. 8(a), several distance spectrums of the person and the background objects are outputted. The distance spectrum of the moving person is not clearly detected in Fig. 8(a). In order to detect the distance spectrum of the moving person with the differential + +detection method, the distance spectrum without the person is measured beforehand. By generating the distance spectrum of the background objects beforehand, the distance spectrum of the moving person is correctly detected in Fig. 8(b) with the proposed differential detection. Therefore, the FM-CW radar system can measure movement of the target person effectively. + +Fig. 9 shows the result of measuring the small displacement for human breathing. The human's chest movement is measured within the range of relative small displacement. In Fig. 9, it is detected that the period of breathing is about 4 [s] and the breathing movement is about within ±2 [mm]. + +## B. Example for application + +Finally, we show an example of application with 24 GHz FM-CW radar system. Fig. 10 shows a setup of the FM-CW radar system for detecting human breathing in actual environments. The FM-CW radar satisfies the safety guideline, and the details of the safety guideline is described in Appendix. + +Fig. 11 shows the example for detecting human breathing. + +Fig. 9. Displacement for measuring the movement of human breathing. + +Fig. 10. Setup of FM-CW Radar for detecting human breathing. + +Fig. 11. Example of application. +---PAGE_BREAK--- + +The distance spectrum in this example is measured as following flow. + +1) Measuring distance spectrum without any person. + +2) A person comes to the bed. The radar received signals from human's body. + +3) The person lies asleep on the bed. The radar detects the person's breathing movement. + +By generating the distance spectrum of the background objects without the person, the distance spectrum of the person is only detected. When the person comes within the range of radar, the radar system can detect reflected signals from the person, and the distance spectrums of the human's body are detected. After the person lies on the bed, the radar system can detect the small displacement for the person's breathing movement. By using the differential detection method, the distance and small displacement of the moving object is clearly detected. + +## V. CONCLUSION + +In this paper, design and performance of a FM-CW radar system with 24 GHz band is described. In computer simulations, basic performances of FM-CW radar system is analyzed about the distance resolution and error value according to the sweep time and the sampling interval, respectively. Moreover, the differential detection method for detecting only the target object is proposed for measuring the distance and the displacement of the target under the environments which multiple objects are located. In experiments, the distance spectrum of the target object is clearly detected by using the differential detection method under the environments which multiple objects are located. Furthermore, an example of application for detecting human's breathing movement is shown. As the result, the 24 GHz FM-CW radar with the proposed differential detection method effectively detect the distance and the small displacement under the environments which multiple objects are located. + +## ACKNOWLEDGMENT + +A part of this work was supported by “Ashita wo Ninau Kanagawa Venture Project” of Kanagawa in Japan. + +The authors appreciate Prof. Toshio Nojima at Hokkaido University in Japan getting the valuable advices for analyzing the safety properties of the developed FM-CW radar system according to the safety guideline. + +## REFERENCES + +[1] ARIB STD-T73 Rev. 1.1, *Sensors for Detecting or Measureing Mobile Objects for Specified Low Power Radio Station*, Association of Radio Industries and Businesses Std. + +[2] S. MIYAKE and Y. MAKINO, "Application of millimeter-wave heating to materials processing(special issue; recent trends on microwave and millimeter wave application technology)," *IEICE transactions on electronics*, vol. 86, no. 12, pp. 2365-2370, dec 2003. + +[3] M. Skolnik, *Introduction to Radar Systems*. McGraw Hill, 2003. + +[4] S. Fujimori, T. Uebo, and T. Iritani, "Short-range high-resolution radar utilizing standing wave for measuring of distance and velocity of a moving target," *ELECTRONICS AND COMMUNICATIONS IN JAPAN PART I-COMMUNICATIONS*, vol. 89, no. 5, pp. 52-60, 2006. + +[5] T. Uebo, Y. Okubo, and T. Iritani, "Standing wave radar capable of measuring distances down to zero meters," *IEICE TRANSACTIONS ON COMMUNICATIONS*, vol. 88, no. 6, pp. 2609-2615, jun 2005. + +[6] T. SAITO, T. NINOMIYA, O. ISAJI, T. WATANABE, H. SUZUKI, and N. OKUBO, "Automotive fm-cw radar with heterodyne receiver," *IEICE transactions on communications*, vol. 79, no. 12, pp. 1806-1812, dec 1996. + +[7] W. Butler, P. Poitevin, and J. Bjomholt, "Benefits of wide area intrusion detection systems using fmcw radar," in *Security Technology, 2007 41st Annual IEEE International Carnahan Conference on*, Oct 2007, pp. 176-182. + +[8] M. Skolnik, *Radar Handbook, Third Edition*. McGraw-Hill Education, 2008. + +[9] W. Sediono and A. Lestari, "2d image reconstruction of radar indera," in *Mechatronics (ICOM), 2011 4th International Conference On*, May 2011, pp. 1-4. + +[10] C95.1-2005, *IEEE Standard for Safety Levels with Respect to Human Exposure to Radio Frequency Electromagnetic Fields*, 3 kHz to 300 GHz, IEEE Std. + +[11] Ministry of Internal Affairs and Communications. [Online]. Available: http://www.tele.soumu.go.jp/resource/j/material/dwn/guide38.pdf + +# APPENDIX + +In general, electromagnetic wave must be satisfied the guidelines on human exposure to electromagnetic fields, where it have been instituted in various organizations. IEEE C95.1 in USA [10] and ICNIRP in Europe are the guidelines, and MIC also have instituted the guideline in Japan [11]. + +Developed 24 GHz FM-CW radar in this paper have the properties as follow. The power of the transmitter is 7 [mW], the transmitting antenna gain is 11 [dBi], the effective radiated power is 88 [mW], the radiation angle of the transmitting wave is about 50 [degree], and the distance between the transmitter and the human is 2.5 [m]. According to the radar equation, the electric field strength $E$ and the power density $P$ on the human body is calculated as + +$$ +\begin{aligned} +E &= \sqrt{\frac{30 \times 0.088}{2.5}} = 0.65 \text{ [V/m]} , \\ +P &= \frac{E^2}{z_0} = \frac{0.65^2}{120\pi} = 1.12 \times 10^{-4} \text{ [mW/cm}^2\text{]} . +\end{aligned} + $$ + +According to the guideline [11], these parameters must be satisfied as + +$$ +\begin{aligned} +&E \leq 61.4 \text{ [V/m]} , \\ +&P \leq 1 \text{ [mW/cm}^2\text{]} . +\end{aligned} + $$ + +Therefore, the developed 24 GHz FM-CW radar system in this paper sufficiently satisfies the conditions in the guideline. \ No newline at end of file diff --git a/samples_new/texts_merged/230879.md b/samples_new/texts_merged/230879.md new file mode 100644 index 0000000000000000000000000000000000000000..1245fa10e95c398c74053d62d9e26d147a24f084 --- /dev/null +++ b/samples_new/texts_merged/230879.md @@ -0,0 +1,885 @@ + +---PAGE_BREAK--- + +# Imaging Below the Diffraction Limit: A Statistical Analysis + +Morteza Shahram and Peyman Milanfar, Senior Member, IEEE + +**Abstract**—The present paper is concerned with the statistical analysis of the resolution limit in a so-called “diffraction-limited” imaging system. The canonical case study is that of incoherent imaging of two closely-spaced sources of possibly unequal brightness. The objective is to study how far beyond the classical Rayleigh limit of resolution one can reach at a given signal to noise ratio. The analysis uses tools from statistical detection and estimation theory. Specifically, we will derive explicit relationships between the minimum detectable distance between two closely-spaced point sources imaged incoherently at a given SNR. For completeness, asymptotic performance analysis for the estimation of the unknown parameters is carried out using the Cramér-Rao bound. To gain maximum intuition, the analysis is carried out in one dimension, but can be well extended to the two-dimensional case and to more practical models. + +**Index Terms**—Cramér-Rao bound, diffraction, estimation, hypothesis test, imaging, Rayleigh limit, resolution, super-resolution. + +## I. INTRODUCTION + +IN incoherent optical imaging systems the image of an ideal point source is captured as a spatially extended pattern known as the point-spread function (PSF), as shown for the one-dimensional case in Fig. 1. In two dimensions, this function is the well-known Airy diffraction pattern [1]. When two closely-located point sources are measured through this kind of optical imaging system, the measured signal is the incoherent sum of the respective shifted point spread functions. According to the classical Rayleigh criterion, two incoherent point sources are “barely resolved” when the central peak of the diffraction pattern generated by one point source falls exactly on the first zero of the pattern generated by the second one. A more detailed and complete explanation of incoherent imaging and related topics can be found in [1] and [2]. + +The Rayleigh criterion for resolution in an imaging system is generally considered as an accurate estimate of limits in practice. But under certain conditions related to signal-to-noise ratio (SNR), resolution beyond the Rayleigh limit is indeed possible. This can be called the super-resolution limit [3]. Indeed, at sufficiently high sampling rates, and in the absence of noise, arbitrarily small details can be resolved. + +To gain maximum intuition and perspective from the foregoing analysis, all discussion herein will be carried out in the + +Fig. 1. Image of point source captured by diffraction-limited imaging. + +one-dimensional case, which can later be extended to the two-dimensional case. To begin, let us assume that the original signal of interest is the sum of two impulse functions separated by a small distance $d$:¹ + +$$ \sqrt{\alpha\delta}\left(x - \frac{d}{2}\right) + \sqrt{\beta\delta}\left(x + \frac{d}{2}\right). \quad (1) $$ + +As mentioned before, the image will be the incoherent sum of two point spread functions, resulting from an imaging aperture (or slit in one-dimensional case, as seen in Fig. 2) + +$$ s(x; \alpha, \beta, d) = \alpha h\left(x - \frac{d}{2}\right) + \beta h\left(x - \frac{d}{2}\right) \quad (2) $$ + +where for our specific case of incoherent imaging $h(x) = \sin(\pi^2 x) = [\sin(\pi x)/\pi x]^2$, but other PSF's can also be considered. Finally, the measured signal includes discretized samples corrupted with additive (readout) noise. Given samples at $x_k$ ($k = 1, \dots, N$) of the measured signal, we can rewrite the measurement model as + +$$ g(x_k) = s(x_k; \alpha, \beta, d) + w(x_k) \\ = \alpha h\left(x_k - \frac{d}{2}\right) + \beta h\left(x_k - \frac{d}{2}\right) + w(x_k) \quad (3) $$ + +where $w(x_k)$ is assumed to be a zero-mean Gaussian white noise process with variance $\sigma^2$. + +With the present definition, the Rayleigh limit corresponds to $d=1$ as can be seen in Figs. 1 and 2. This means that for values $d < 1$, the two point sources are (in the classical Rayleigh sense) + +Manuscript received March 3, 2003; revised November 3, 2003. This work was supported in part by NSF CAREER Grant CCR-9984246. The associate editor coordinating the review of this manuscript and approving it for publication was Dr. Thierry Blu. + +The authors are with the Department of Electrical Engineering, University of California, Santa Cruz, CA 95064 USA (e-mail: shahram@ee.ucsc.edu; milanfar@ee.ucsc.edu). + +Digital Object Identifier 10.1109/TIP.2004.826096 + +¹From now on we refer to $\alpha$ and $\beta$ as intensities and also we assume that $\alpha, \beta > 0$. Also, note that this model (for now) assumes point sources symmetrically placed about the (known) origin. This model will be generalized later in the paper. +---PAGE_BREAK--- + +Fig. 2. Incoherent imaging of two closely located point sources. + +"unresolvable." It is important to note that the Rayleigh criterion does not consider the presence of noise. + +In the last forty years or so, there have been several attempts, and more recently surveys, of the problem of resolution from the statistical viewpoint. Of these, the most significant earliest works were done by Helstrom [4]–[6]. In particular, in [5] and [6], he derived lower bounds on the mean-squared error of unbiased estimators for the source positions, the distance between the sources, and the radiance values, using the Cramér-Rao inequality. In [5], he considered two separate situations. In the first, the problem of whether any signal was present or not was treated, whereas in the second, the question of whether one or two sources were present was treated. (This second scenario is, of course, what interests us in the present paper.) Helstrom described a geometrical optics field model of the problem involving a general radiance distribution and point spread function, for objects with arbitrary shape. To study the case of the circular aperture and point sources, he applied a complex and remarkable set of approximations and simplifications of the initial model. Also, he assumed that the distance between the point sources is known to the detector. + +In [3] and [7], an approximate statistical theory was given to compute the required number of detected photons (similar to the notion of signal to noise ratio) for a certain desired resolution, and the value of achievable resolution by image restoration techniques was also investigated by numerical and iterative deconvolution. In these papers the definition of resolution was made as the separation of the two point sources that can be resolved through a deconvolution procedure. In [7], the analysis of the achievable resolution in deconvolved astronomical images was studied based on a criterion similar to Rayleigh's. + +In [9] and [12] two-point resolution of imaging systems was studied using a model fitting theory where the probability of resolution was computed based on the structural change of the stationary points of the likelihood function. Also in [11] the Cramér-Rao lower bound formulation was used to study the limits to attainable precision of estimated distance between the two point sources. Assuming a Gaussian PSF, they determined a lower bound for the estimation error variance. Also, in [10], the reader can find a very comprehensive review of past and present approaches to the concept of resolution. In this paper, + +we also compute the Cramér-Rao (CR) lower bound in exact, closed form for two different cases. This analysis is in fact extendable to any point spread function. + +Finally, an interesting, more recent paper [13] views the resolution problem from the information theory perspective. This line of thinking, again with simplifying approximations, is used to compute limits of resolution enhancement using Shannon's theorem of maximum transferable information via a noisy channel. The paper [13] considers the case of equally bright nearby point sources and derives an expression relating resolution (here defined as the inverse of the discernable distance between two equally bright point sources), logarithmically to the SNR. + +The results of our paper extend, illuminate, and unify the earlier works in this field using more modern tools in statistical signal processing. Namely, we use locally optimal tests, which lead to more explicit, readily interpreted, and applicable results. In addition, we study various cases including unknown and/or unequal intensities, which have not been considered in their full complexity before.² The present results clarify, arguably for the first time, the specific effects of the relevant parameters on the definition of resolution, and its limits, as needed in practice. + +In this paper we formulate the problem of two-point resolution in terms of statistical estimation/detection. Our approach is to precisely define a quantitative measure of resolution in statistical terms by addressing the following question: what is the minimum separation between two point sources (maximum attainable resolution limit) that is detectable at a given signal-to-noise ratio (SNR). In contrast to earlier definitions of resolution, there is little ambiguity in our proposed definition, and all parameters (PSF, noise variance, sampling rate, etc.) will be explicitly present in the formulation. Our earlier work on this problem was presented in [14], which essentially covers the material in Section IV-A of this paper. + +The organization of the paper is as follows. Section II will explain and formulate our definition, and the corresponding statistical framework and models, in detail. In Section III, in order to use linear detection/estimation structures, we will discuss a signal approximation approach. In Section IV, we will present our statistical analysis for different cases of increasing generality. The asymptotic performance of the maximum likelihood estimate of the unknown parameters in terms of the Cramér-Rao lower bound will be discussed in Section V. Finally, some comments and conclusion will be presented in Section VI. + +## II. STATISTICAL ANALYSIS FRAMEWORK + +The question of whether one or two peaks are present in the measured signal can be formulated in statistical terms. Specifically, for the proposed model the equivalent question is whether the parameter *d* is equal to zero or not. If *d* = 0 then we only have one peak and if *d* > 1 then there are two resolved peaks according to the Rayleigh criterion. So the problem of interest revolves around values of *d* in the range of 0 ≤ *d* < 1. Therefore, we can define two hypotheses, which will form the basis of our statistical framework. Namely, let $\hat{H}_0$ denote the null hy- + +²Reference [9] considered the case of unequal intensities in a different framework. +---PAGE_BREAK--- + +pothesis that $d = 0$ (one peak present) and let $\Pi_1$ denote the +alternate hypothesis that $d > 0$ (two peaks present) + +$$ +\begin{equation} +\begin{cases} +H_0: d = 0 & \text{One peak is present} \\ +H_1: d > 0 & \text{Two peaks are present} +\end{cases} +\tag{4} +\end{equation} +$$ + +Given discrete samples of the measured signal, we can rewrite +the problem as + +$$ +\left\{ +\begin{array}{ll} +H_0: & \mathbf{g} = \mathbf{s}_0 + \mathbf{w} \\ +H_1: & \mathbf{g} = \mathbf{s} + \mathbf{w} +\end{array} +\right. +\qquad (5) +$$ + +where + +$$ +\begin{align*} +\mathbf{g} &= [g(x_1), \dots, g(x_N)]^T, \\ +\mathbf{w} &= [w(x_1), \dots, w(x_N)]^T, \\ +\mathbf{s} &= [s(x_1; \alpha, \beta, d), \dots, s(x_N; \alpha, \beta, d)]^T, \\ +\mathbf{s}_0 &= [s_0(x_1), \dots, s_0(x_N)]^T, +\end{align*} +$$ + +and + +$$ +s(x_k; \alpha, \beta, d) = \alpha h \left( x_k - \frac{d}{2} \right) + \beta h \left( x_k + \frac{d}{2} \right) \quad (6) +$$ + +$$ +s_0(x_k) = s(x_k; \alpha, \beta, d)|_{d=0} = (\alpha + \beta)h(x_k). \quad (7) +$$ + +This is a problem of detecting a deterministic signal with unknown parameters $(\alpha, \beta$, and $d$, in general). From (5), since the probability density function (PDF) under $H_1$ is not known exactly, it is not possible to design optimal detectors (in the Neyman-Pearson sense) by simply forming the likelihood ratio. The general structure of composite hypothesis testing is involved when unknown parameters appear in the PDF's [16, p. 248]. There are two major approaches for composite hypothesis testing. The first is to use explicit prior knowledge as to the likely values of parameters of interest and apply a Bayesian method to this detection problem. However, there is generally no such a priori information available. Alternately, the second approach, the Generalized Likelihood Ratio Test (GLRT) first computes maximum likelihood (ML) estimates of the unknown parameters, and then will use these estimated value to form the standard Neyman-Pearson (NP) detector. Our focus will be on GLRT-type methods because of less restrictive assumptions and easier computation and implementation; but most importantly, because uniformly most powerful (UMP) and locally most powerful (LMP) tests can be developed for the parameter range $0 \le d < 1$. + +To be a bit more specific, consider the case where it is known +that $\alpha = \beta = 1$, with the parameter $d$ unknown. The GLRT +approach offers to decide $\Pi_1$ if + +$$ +L(\mathbf{g}) = \frac{\max_{d} p(\mathbf{g}, d, H_1)}{p(\mathbf{g}, H_0)} = \frac{p(\mathbf{g}, \hat{d}, H_1)}{p(\mathbf{g}, H_0)} > \gamma \quad (8) +$$ + +where $\hat{d}$ denotes the ML estimate of $d$, and $p(\mathbf{g}, d; H_1)$ and $p(\mathbf{g}; H_0)$ are PDF's under $\Pi_1$ and $\Pi_0$, respectively. Assuming additive white Gaussian noise (AWGN) with variance $\sigma^2$ and $\hat{\mathbf{s}} = [s(x_1; 1, 1, \hat{d}), \dots, s(x_N; 1, 1, \hat{d})]^T$ we will have: + +$$ +\begin{align*} +L(\mathbf{g}) &= \frac{\frac{1}{(2\pi\sigma^2)^{N/2}} \exp\left(-\frac{1}{2\sigma^2} ||\mathbf{g} - \hat{\mathbf{s}}||^2\right)}{\frac{1}{(2\pi\sigma^2)^{N/2}} \exp\left(-\frac{1}{2\sigma^2} ||\mathbf{g} - \mathbf{s}_0||^2\right)} \\ +&= \exp\left(-\frac{1}{2\sigma^2}\left(-||\hat{\mathbf{s}}||^2 + ||\mathbf{s}_0||^2 + 2\mathbf{g}^T(\hat{\mathbf{s}} - \mathbf{s}_0)\right)\right). +\end{align*} +$$ + +Therefore, $\Pi_1$ will be chosen if + +$$ +- \| \hat{\mathbf{s}} \|^{2} + 2 \mathbf{g}^{T} (\hat{\mathbf{s}} - \mathbf{s}_{0}) > \gamma'. \quad (9) +$$ + +Equivalently, + +$$ +\sum_{k=1}^{N} & -\left[\alpha h\left(x_k - \frac{\hat{d}}{2}\right) + \beta h\left(x_k + \frac{\hat{d}}{2}\right)\right]^2 \\ +& + 2\left[\alpha h\left(x_k - \frac{\hat{d}}{2}\right) + \beta h\left(x_k + \frac{\hat{d}}{2}\right)\right] \\ +& - (\alpha + \beta)h(x_k) \\[-0.3em] +& g(x_k) > \gamma' \qquad (10) +$$ + +where the ML estimate of $d$ in the above involves solving the +following minimization problem + +$$ +\min_{d} \sum_{k=1}^{N} \left[ \alpha h \left( x_k - \frac{d}{2} \right) + \beta h \left( x_k + \frac{d}{2} \right) - g(x_k) \right]^2 \Rightarrow \hat{d} \quad (11) +$$ + +It should be clear from the above that this detection/estimation problem is highly nonlinear. However, since the range of interest are the values of $0 \le d < 1$, these representing resolution beyond the Rayleigh limit, it is quite appropriate for the purposes of the our analysis to consider approximating the model of the signal around $d = 0$, and to apply locally optimal detectors. This is the approach we take. + +III. (QUADRATIC) MODEL APPROXIMATION + +Much of the complexity we encountered in the earlier formu- +lation of the problem can be remedied by appealing to an ap- +proximation of the signal model. This approximate model is de- +rived by expanding the signal about the small parameter values +around $d = 0$. As alluded to earlier, this approximation is quite +adequate in the sense that all the parameter values of interest for +resolution beyond the Rayleigh diffraction limit are contained in +the range $[0, 1]$ anyway. + +We consider the Taylor series expansion of $s(x_k; \alpha, \beta, d)$ around $d = 0$, with all other variables fixed.³ More specifically, + +$$ +s(x_k; \alpha, \beta, d) \approx (\alpha + \beta)h(x_k) + \frac{\beta - \alpha}{2}dh_1(x_k) \\ + \frac{\alpha + \beta}{8}d^2h_2(x_k) \quad (12) +$$ + +where $h_1(\cdot)$ and $h_2(\cdot)$ denote the first and second order derivatives of $h(\cdot)$ and where for $h(x) = \sin c^2(x)$ + +$$ +\begin{align} +h_1(x_k) &= \left. \frac{\partial h(x)}{\partial x} \right|_{x=x_k} \\ +&= \frac{2\sin(\pi x_k)(\sin(\pi x_k) - \pi x_k \cos(\pi x_k))}{\pi^2 x_k^3} \tag{13} +\end{align} +$$ + +$$ +h_2(x_k) = \left. \frac{\partial^2 h(x)}{\partial x^2} \right|_{x=x_k} \\ += \frac{(4\pi^2 x_k^2 - 3) \cos(2\pi x_k) - 4\pi x_k \sin(2\pi x_k) + 3}{2\pi^2 x_k^4}. \quad (14) +$$ + +³It is important here to note that this is an approximation about the *parameter* of interest *d*, and not the variable *x*; as such it therefore is a global approximation of the function. +---PAGE_BREAK--- + +In the above approximation, we elect to keep terms up to order 2 of the Taylor expansion. This gives a rather more accurate representation of the signal, and more importantly, if we only kept the first order term, then in the case $\alpha = \beta$, the first order term would simply vanish and *no* term in $d$ would appear in the approximation. The reader can find a more detailed discussion on the accuracy of this approximation in Appendix A. The proposed approximation simplifies the hypothesis testing problem to essentially a linear detection problem (as we will see in the next section). The approximation is helpful in that we can carry out our analysis more simply. In addition, it leads to a general form of locally optimum detectors [16, p. 217] as will be discussed later. + +Continuing with vector notation we have: + +$$ s \approx (\alpha + \beta)\mathbf{h} + \frac{\beta - \alpha}{2} d\mathbf{h}_1 + \frac{\alpha + \beta}{8} d^2\mathbf{h}_2 \quad (15) $$ + +where + +$$ +\begin{aligned} +\mathbf{h} &= [h(x_1), \dots, h(x_N)]^T \\ +\mathbf{h}_1 &= [h_1(x_1), \dots, h_1(x_N)]^T \\ +\mathbf{h}_2 &= [h_2(x_1), \dots, h_2(x_N)]^T. +\end{aligned} + $$ + +Writing in the form of hypotheses described earlier in (5) + +$$ +\left\{ +\begin{array}{l} +H_0: \tilde{\mathbf{g}} = (\alpha + \beta)\mathbf{h} + \mathbf{w} \\ +H_1: \tilde{\mathbf{g}} = (\alpha + \beta)\mathbf{h} + \frac{\beta-\alpha}{2} d\mathbf{h}_1 + \frac{\alpha-\beta}{8} d^2\mathbf{h}_2 + \mathbf{w} +\end{array} +\right. +\quad (16) +$$ + +where we distinguish $\tilde{\mathbf{g}}$ from $\mathbf{g}$ due to the approximated model. According to this model, we define the measured signal-to-noise ratio (per sample) as follows: + +$$ \text{SNR} = \frac{1}{N\sigma^2} \left\| (\alpha + \beta)\mathbf{h} + \frac{\beta - \alpha}{2} d\mathbf{h}_1 + \frac{\alpha + \beta}{8} d^2\mathbf{h}_2 \right\|^2 . \quad (17) $$ + +For any symmetric PSF ($h(x)$) and in the case of above-Nyquist sampling, the following relations can be verified + +$$ +\begin{aligned} +\mathbf{h}^T \mathbf{h}_1 &= 0 \\ +\mathbf{h}_2^T \mathbf{h}_1 &= 0 \\ +\mathbf{h}^T \mathbf{h}_2 &= -\mathbf{h}_1^T \mathbf{h}_1. +\end{aligned} + $$ + +Therefore, we can rewrite (17) in the following form: + +$$ +\begin{aligned} +\text{SNR} ={}& \frac{1}{N\sigma^2} \left[ (\alpha + \beta)^2 E_0 + \left(\frac{\beta - \alpha}{2}\right)^2 d^2 E_1 \right. \\ +& \qquad \left. + \left(\frac{\alpha + \beta}{8}\right)^2 d^4 E_2 - \left(\frac{\alpha + \beta}{2}\right)^2 d^2 E_1 \right] \\ +={}& \frac{1}{N\sigma^2} \left[ (\alpha + \beta)^2 E_0 - \alpha\beta d^2 E_1 + \left(\frac{\alpha + \beta}{8}\right)^2 d^4 E_2 \right] +\end{aligned} +\quad (18) $$ + +where we define + +$$ E_0 = \mathbf{h}^T \mathbf{h} = f_s \int_{-\infty}^{+\infty} h^2(x) dx \quad (19) $$ + +$$ E_1 = h_1^T h_1 = f_s \int_{-\infty}^{-\infty} \left[ \frac{\partial h(x)}{\partial x} \right]^2 dx \quad (20) $$ + +$$ E_2 = h_2^T h_2 = f_s \int_{-\infty}^{-\infty} \left[ \frac{\partial^2 h(x)}{\partial x^2} \right]^2 dx \quad (21) $$ + +as energy terms.⁴ + +⁴In above-Nyquist sampling, SNR is independent of $N$ (and $f_s$) since energy terms are all proportional to $f_s$. See Appendix B for details and explicit computations of these energy terms for the case of $h(x) = \text{sinc}^2(x)$. + +IV. DETECTION THEORY FOR THE APPROXIMATED MODEL + +In this section, we develop detection strategies for the hypothesis testing problem of interest based upon the approximated model. It is illuminating to study the various cases of interest in order. Our earlier assumptions were equal, known intensities, symmetrically located point sources about a given center, and the energy constraint $\alpha + \beta = 2$. In the interest of clarity and ease of exposition, we start with the case when all these assumptions hold. Then we will extend the discussion in order of increasing levels of generality by relaxing an assumption in each step. Namely, we will treat the problem for the following cases: + +• the case of equal, known intensities $\alpha = \beta = 1$, with symmetrically located point sources; + +• the case of unknown intensities but $\alpha + \beta = 2$, with symmetrically located point sources; + +• the case of unknown intensities but $\alpha + \beta = 2$, asymmetrically located point sources; + +• the case of unknown intensities, asymmetrically located point sources. + +By considering (16), we notice that when $\alpha + \beta = 2$ is known to the detector (the first three cases), $(\alpha+\beta)\mathbf{h}$ is a common known term in both hypotheses and it is independent from $d$. Therefore, we may simplify further + +$$ +\left\{ +\begin{array}{l} +H_0: y = w \\ +H_1: y = \frac{\beta-\alpha}{2} d\mathbf{h}_1 + \frac{\alpha-\beta}{8} d^2\mathbf{h}_2 + w +\end{array} +\right. +\quad (22) +$$ + +where $y = \tilde{\mathbf{g}} - (\alpha + \beta)\mathbf{h}$. As we began to describe earlier, when $\alpha = \beta$, the hypothesis test will be reduced to the case of detecting a known signal with unknown positive amplitude ($D = d^2$). For this case, there exist well-known optimal detection strategies. + +A. The Case of Equal Intensities, Symmetrically Located Point Sources + +When $\alpha = \beta = 1$, (22) is reduced to + +$$ +\left\{ +\begin{array}{l} +H_0: y = w \\ +H_1: y = \frac{d^2}{16}\mathbf{h}_2 + w +\end{array} +\right. +\quad (23) +$$ + +It is readily shown that given this model, the ML estimate for the parameter $d^2$ is given by + +$$ d^2 = 4 (\mathbf{h}_2^T \mathbf{h}_2)^{-1} \mathbf{h}_2^T y. \quad (24) $$ + +Next, the test statistic resulting from the (generalized) Neyman-Pearson likelihood ratio is given by + +$$ T'(y) = \frac{1}{\sigma^2} (\mathbf{h}_2^T \mathbf{h}_2)^{-1} (\mathbf{h}_2^T y)^2 . \quad (25) $$ + +We note that the expression for the test-statistic is essentially an energy detector with the condition that the value of $d^2$ is in fact estimated from the data itself. The detector structure, due to our knowledge of the sign of the unknown distance parameter, is effectively producing a one-sided test, and hence is in fact a Uniformly Most Powerful (UMP) detector in the sense that it produces the highest detection probability for all values of the unknown parameter, and for a given false-alarm rate [16, p. 194]. Therefore, the above test-statistic can be simply replaced by + +$$ T'(y) = \sqrt{T(y)} = \sqrt{\frac{1}{\sigma^2} (\mathbf{h}_2^T \mathbf{h}_2)^{-1} (\mathbf{h}_2^T y)}. \quad (26) $$ + +⁵Where point sources are located at $-d_1$ and $+d_2$ instead of $-(d/2)$ and $(d/2)$. +---PAGE_BREAK--- + +For any given data set y, we decide $H_1$ if the statistic exceeds a specified threshold + +$$T'(y) > \gamma. \quad (27)$$ + +The choice of $\gamma$ is motivated by the level of tolerable false alarm (or false-positive) in a given problem, but is typically kept very low.⁶ The detection rate ($P_d$) and false-alarm rate ($P_f$) for this detector are related as [16, p. 254] + +$$P_d = Q(Q^{-1}(P_f) - \sqrt{\eta}) \quad (28)$$ + +where + +$$\eta = \frac{d^2}{4} \sqrt{\frac{E_2}{\sigma^2}} \quad (29)$$ + +and $Q$ is the right-tail probability function for a standard Gaussian random variable (zero mean and unit variance); and $Q^{-1}$ is the inverse of this function [16, p. 20]. A particularly intriguing and useful relationship is the behavior of the smallest peak separation $d$, which can be detected with very high probability (say 0.99), and very low false alarm rate (say $10^{-6}$) at a given SNR. According to (18), (28), and (29), the relation between $d_{min}$ and required SNR can be made explicit + +$$ \begin{align} \text{SNR} &= (Q^{-1}(P_f) - Q^{-1}(P_d))^2 \frac{64E_0 - 16d^2E_1 + d^4E_2}{Nd^4E_2} \tag{30} \\ &= \frac{1}{N}(Q^{-1}(P_f) - Q^{-1}(P_d))^2 \nonumber \\ &\quad \times \left( \frac{64E_0}{E_2} \frac{1}{d^4} - \frac{16E_1}{E_2} \frac{1}{d^2} + 1 \right). \tag{31} \end{align} $$ + +The above expression gives an implicit relation between the smallest detectable distance between the two (equal intensity) sources, at the particular SNR. As an example, for $h(x) = \operatorname{sinc}^2(x)$ and for the specified choice of $P_d = 0.99$ and $P_f = 10^{-6}$, if we collect $N$ equally spaced samples at $\{x_k\}$ within the interval $[-10, 10]$, at the Nyquist rate, we have + +$$ \begin{aligned} \text{SNR} &= 50.12 \frac{\frac{140}{\pi^4} - \frac{14}{\pi^2}d^2 + d^4}{Nd^4} \\ &= \frac{72.04 - 71.1d^2 + 50.12d^4}{Nd^4} \end{aligned} $$ + +A plot of this function is shown in Fig. 3. It is worth noting that in (31), the term involving $d^{-1}$ dominates for small $d$. Therefore, a reasonably informative (but approximate) way to write SNR is + +$$\text{SNR} \approx \frac{1}{N} (Q^{-1}(P_f) - Q^{-1}(P_d))^2 \frac{E_0}{E_2} \frac{1}{d^4} = \frac{c}{Nd^4} \quad (32)$$ + +where the coefficient $c$ is a function only of the selected $P_f$ and $P_d$. It is worth noting that for any sampling rate higher than the Nyquist rate, we can rewrite $c$ in (32) as follows: + +$$c = 64(Q^{-1}(P_f) - Q^{-1}(P_d))^2 \frac{\int_{-\infty}^{-\infty} h^2(x) dx}{\int_{-\infty}^{-\infty} \left[ \frac{\partial^2 h(x)}{\partial x^2} \right]^2 dx} \quad (33)$$ + +⁶In [9] and [12] a similar criterion (in a different framework) has been proposed, where they applied a sign test (i.e., a fixed threshold) to decide if there is one or two point sources present. This approach gives a detector with a fixed false alarm rate. + +Fig. 3. Minimum detectable *d* as a function of SNR (in dB) at the Nyquist rate (exact and approximate). + +Fig. 4. Minimum detectable *d* versus SNR (in dB) at Nyquist rate, and at twice Nyquist rate. + +A plot of the approximate expression in (32) is also shown in Fig. 3 to be compared against the exact expression (31). The above relation (32) is a neat and rather intuitive power law that one can use to, for instance, understand the required SNR to achieve a particular resolution level of interest below the diffraction limit. Fig. 4 shows the curves defined by (30) for different sampling rates; namely Nyquist rate and twice Nyquist. As one would expect, the minimum detectable *d* becomes smaller as the number of samples increases, but it does not do so at a very fast rate because of the proportionality between SNR and the sampling rate.⁷ + +## B. The Case of Unknown α and β, Symmetrically Located Point Sources + +In this section we discuss a more general case where neither the intensities α and β, nor the distance *d*, are known.⁸ Equation + +⁷Similar analysis for the two-dimensional extension of this problem is presented in [22]. + +⁸But we assume that $\alpha + \beta = 2$ is known to the detector. +---PAGE_BREAK--- + +(22) leads to a detection problem defined in terms of a linear +model over the parameter set $\theta$ defined as follows: + +$$ +\begin{align} +& y = H\theta + w \tag{34} \\ +& H = [h_1; h_2] \tag{35} \\ +& \theta = \begin{bmatrix} d(\alpha - \beta) \\ \frac{d^2}{4} \end{bmatrix} \tag{36} +\end{align} +$$ + +where we note that the matrix $H$ has orthogonal columns. +Specifically, the detection problem is now posed as + +$$ +\left\{ +\begin{array}{ll} +H_0: & A\theta = b \\ +H_1: & A\theta \neq b +\end{array} +\right. +\tag{37} +$$ + +where + +$$ +A = \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix} \quad b = \begin{bmatrix} 0 \\ 0 \end{bmatrix} \qquad (38) +$$ + +The GLRT for this problem is given by ([16], p. 274): + +$$ +T(y) = \frac{1}{\sigma^2} \hat{\theta}' A^{-T} [A(H' H)^{-1} A']^{-1} A \hat{\theta} \quad (39) +$$ + +$$ += \frac{1}{\sigma^2} \left( \frac{(h_1^T y)^2}{E_1} + \frac{(h_2^T y)^2}{E_2} \right) \quad (40) +$$ + +where + +$$ +\hat{\theta} = (\mathbf{H}^T \mathbf{H})^{-1} \mathbf{H} \mathbf{y}. \quad (41) +$$ + +The performance of this detector is characterized by + +$$ +P_f = Q_{\chi_2^2}(\gamma) \qquad (42) +$$ + +$$ +P_d = Q_{\chi_2^2}(\lambda) \quad (43) +$$ + +$$ +\lambda = \frac{1}{\sigma^2} \theta^T A^{-1} [A(H^T H)^{-1} A']^{-1} A \theta \quad (44) +$$ + +$$ += \frac{1}{\sigma^2} \left( \left( \frac{\alpha - \beta}{2} \right)^2 d^2 E_1 + \frac{1}{16} d^4 E_2 \right) \quad (45) +$$ + +where $Q_{\chi_2^2}$ is the right tail probability for a Central Chi-Squared PDF with 2 degrees of freedom, and $Q_{\chi_2^2(\lambda)}$ is the right tail probability for a noncentral Chi-Squared PDF with 2 degrees of freedom and noncentrality parameter $\lambda$. In order to perform the same analysis as Section 4.1 (i.e., $d_{min}$ versus SNR curve), we start by computing the required $\lambda$ from the above expressions, based on the fixed values of $P_d$ and $P_f$. Then, using the relation (18), we will have + +$$ +SNR = \frac{\lambda(P_f, P_d)}{\bar{N}} \frac{64E_0 - 16\alpha\beta d^2 E_1 + d^4 E_2}{4(\alpha - \beta)^2 d^2 E_1 + d^4 E_2} \quad (46) +$$ + +where $\lambda(P_f, P_d)$ represents the required value of noncentrality parameter as a function of the desired $P_f$ and $P_d$. For instance, for the case of $h(x) = \text{sinc}^2(x)$, with $P_d = 0.99$ and $P_f = 10^{-6}$ we have + +$$ +SNR = \frac{56.29 \frac{140}{\pi^4} - \frac{14}{\pi^2} \alpha \beta d^2 + d^4}{N \frac{7}{2\pi^2} (\alpha - \beta)^2 d^2 + d^4}. \quad (47) +$$ + +It is useful to compare the performance of this detector (in terms +of minimum detectable *d*) against the "best" case where the pa- +rameters *d*, *α* and *β* are actually known. In fact, a comparison in +Fig. 5 demonstrates that, happily (and perhaps rather unexpect- +edly), the curves are very close, implying that the performance + +Fig. 5. $d_{\min}$ versus SNR (dB) for $\alpha = 1.2$ and $\beta = 0.8$. + +Fig. 6. GLRT for $\alpha \neq \beta$ and the case $\alpha = \beta$, symmetric sources; $d_{\min}$ versus SNR(dB). + +of GLRT is very close to the optimal detector for which all pa- +rameters are known. + +An interesting observation arises from a comparison of the +minimum detectable *d* for the cases *α* = *β* and *α* ≠ *β*, shown +in Fig. 6. It is seen that unequal *α* and *β* yield better detec- +tion. That is, for a fixed *d*, the required SNR for resolving two +closely-spaced unequally bright point sources is *smaller* than +the SNR required to resolve two *equally spaced* sources. This +result seems counter-intuitive. Yet, the reason behind it is some- +what clear in hindsight. Equal *α* and *β* produce a perfectly +symmetric signal (without noise) and therefore result in redun- +dancy in the measured signal content. With unequal *α* and *β*, +an anti-symmetric part is added to the signal information and +better decision is made possible. This phenomenon is a result +of by the assumption of symmetry of point sources around the +origin (*x* = 0). If the center of the point sources is not known, +the results can be different, as we will explain in the next section. + +C. The Case of Unknown Intensities But α + β = 2 with Asymmetrically Located Point Sources + +With the earlier machinery in place, in this section, we study +the case where the point sources are not located symmetrically +---PAGE_BREAK--- + +around the origin ($x=0$). We consider the following model for this case: + +$$ +\begin{aligned} +g(x_k) &= s(x_k; \alpha, \beta, d_1, d_2) + w(x_k) \\ +&= \alpha h(x_k - d_1) + \beta h(x_k + d_2) + w(x_k) +\end{aligned} +\quad (48) $$ + +where $d_1$ and $d_2$ are unknown and $d = d_1 + d_2$ is the distance between the point sources. The Taylor expansion for the signal term in (48) around $(d_1, d_2) = (0, 0)$ is given by + +$$ s(x_k; \alpha, \beta, d_1, d_2) = (\alpha + \beta)h(x_k) + \frac{(\alpha d_1 + \beta d_2)h_1(x_k)}{2} + \frac{\alpha d_1^2 + \beta d_2^2}{2}h_2(x_k). \quad (49) $$ + +Here we consider the general case of unknown $\alpha$ and $\beta$ but $\alpha+\beta=2$ is known to the detector. However, we assume that the test for determining whether one peak is present or two peaks are present is performed at some point located between the two point sources. Hence, the hypothesis test can be expressed as + +$$ H_0: [d_1 \ d_2] = [0 \ 0] \\ H_1: [d_1 \ d_2] \neq [0 \ 0] \quad (50) $$ + +or equivalently (see (51) at the bottom of the page). By removing the known common term $(\alpha + \beta)h(x_k)$, we have the following linear model: + +$$ y = H\theta_a + w $$ + +where + +$$ +\begin{align*} +H &= [\mathbf{h}_1, \mathbf{h}_2] \\ +\theta_a &= \begin{bmatrix} -\alpha d_1 + \beta d_2 \\ \frac{\alpha d_1^2 + \beta d_2^2}{2} \end{bmatrix} \tag{52} +\end{align*} $$ + +and where the subscript “a” an $\theta_a$ is denoting the asymmetric case, to be distinguished from (36). Then, the corresponding hypotheses are given by + +$$ H_0: A\theta_a = b \\ H_1: A\theta_a \neq b \quad (53) $$ + +where + +$$ A = \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix}, \quad b = \begin{bmatrix} 0 \\ 0 \end{bmatrix} $$ + +just as in Section IV-B. The GLRT for (53) will be + +$$ T(y) = \frac{1}{\sigma^2} \left( \frac{(\mathbf{h}_1^t y)^2}{E_1} + \frac{(\mathbf{h}_2^t y)^2}{E_2} \right). \quad (54) $$ + +From (54), the performance of this detector is characterized by + +$$ +\begin{align*} +P_f &= Q_{\chi_d^2}(\gamma) \\ +P_d &= Q_{\lambda_d^2(\lambda)}(\gamma) \\ +\lambda &= \frac{1}{\sigma^2} \left( (-\alpha d_1 + \beta d_2)^2 E_1 + \left( \frac{\alpha d_1^2 + \beta d_2^2}{2} \right)^2 E_2 \right). \quad (55) +\end{align*} $$ + +Now, to obtain the relation between SNR and ($d_1, d_2$), we first need to compute the SNR for the model of (48), which is given by + +$$ \text{SNR} = \frac{1}{N\sigma^2} \left[ (\alpha + \beta)^2 E_0 - (\alpha + \beta)(d_1 + d_2)^2 E_1 + \left( \frac{\alpha d_1^2 + \beta d_2^2}{2} \right)^2 E_2 \right]. \quad (56) $$ + +The value of $\sigma^2$ in (55) can be obtained for the desired $P'_d$ and $P'_f$. By substituting this value in (56) we will have (57), shown at the bottom of the page. In order to present the results in this case, let us assume that⁹ $\alpha d_1 \approx \beta d_2$ (i.e., we perform the test at a point which is closer to the stronger peak.). It can be easily shown that the value of $\lambda$ in (55) is maximized for the case of $\alpha = \beta$. This shows that when $\alpha d_1 \approx \beta d_2$, the performance for the case of equal intensities is better than the performance of the case with unequal intensities. Fig. 7 confirms this result by showing the curves for $d_{\min}$ versus SNR for two cases: equal intensities and unequal intensities (we assume $h(x) = \sin c^2(x)$). By comparing this results and that of the previous section, we conclude that the assumption of symmetrically located point sources around the test point plays a very important role in the performance of the detector. Also, it is worth mentioning that with the assumption of $\alpha d_1 \approx \beta d_2$, we can approximate (57) for the range of small $d_1$ and $d_2$ in the following informative ways: + +$$ +\begin{align} +\text{SNR} &= \frac{\lambda(P_f, P_d)}{N} \frac{4(\alpha + \beta)^2}{(\alpha d_1^2 + \beta d_2^2)^2} \frac{E_0}{E_2} = \frac{\lambda(P_f, P_d)}{N} \frac{4}{d_1^2 d_2^2} \frac{E_0}{E_2} \nonumber \\ +&= \frac{\lambda(P_f, P_d)}{N} \frac{4(\alpha + \beta)^4 E_0}{\alpha^2 \beta^2 d^4 E_2} \tag{58} +\end{align} $$ + +⁹See Appendix C for a justification. + +$$ +\begin{cases} +H_0: \tilde{g}(x_k) = (\alpha + \beta)h(x_k) + w(x_k) \\ +H_1: \tilde{g}(x_k) = (\alpha + \beta)h(x_k) + (-\alpha d_1 + \beta d_2)h_1(x_k) + \frac{\alpha d_1^2 - \beta d_2^2}{2}h_2(x_k) + w(x_k) +\end{cases} +\quad (51) $$ + +$$ \text{SNR} = \frac{\lambda(P_f, P_d)}{N} \frac{(\alpha + \beta)^2 E_0 - (\alpha\beta(d_1 + d_2))^2 E_1 + \left(\frac{\alpha d_1^2 + \beta d_2^2}{2}\right)^2 E_2}{(-\alpha d_1 + \beta d_2)^2 E'_1 + \left(\frac{\alpha d_1^2}{2}\right)^2 E'_2}. \quad (57) $$ +---PAGE_BREAK--- + +Fig. 7. $d_{\min}$ versus SNR(dB); $d = d_1 + d_2$ and $\alpha d_1 = \beta d_2$; equal intensities and unequal intensities. + +Fig. 8. $d_{\min}$ versus SNR(dB); $d = d_1 + d_2$ and $\alpha d_1 = \beta d_2$ detectors with and without the assumption of $\alpha + \beta = 2$. + +### D. The Case of Unknown Intensities, Asymmetrically Located Point Sources + +Here, we analyze the most general case in which we assume that the energy of point sources ($\alpha + \beta$) is unknown to the detector, as well as the individual $\alpha, \beta, d_1$, and $d_2$. Recalling (51), we can set up another linear model as follows: + +$$ \tilde{\mathbf{g}} = \mathbf{H}_u \boldsymbol{\theta}_u + \mathbf{w} $$ + +where + +$$ \begin{aligned} \mathbf{H}_u &= [\mathbf{h}, \mathbf{h}_1, \mathbf{h}_2] \\ \boldsymbol{\theta}_u &= \begin{bmatrix} \alpha + \beta \\ -\alpha d_1 + \beta d_2 \\ \frac{\alpha d_1^2 - \beta d_2^2}{2} \end{bmatrix} \end{aligned} \quad (59) $$ + +and the subscript "u" denotes the completely unknown parameters. The above setup leads to the following hypothesis test: + +$$ \begin{cases} H_0: & \mathbf{A}_u \boldsymbol{\theta}_u = \mathbf{b} \\ H_1: & \mathbf{A}_u \boldsymbol{\theta}_u \neq \mathbf{b} \end{cases} \quad (60) $$ + +where + +$$ \mathbf{A}_u = \begin{bmatrix} 0 & 1 & 0 \\ 0 & 0 & 1 \end{bmatrix} , \quad \mathbf{b} = \begin{bmatrix} 0 \\ 0 \end{bmatrix} . $$ + +The GLRT for (60) will be + +$$ T'(\tilde{\mathbf{g}}) = \frac{1}{\sigma^2} \left( \frac{(\mathbf{h}_1^T \tilde{\mathbf{g}})^2}{E_1} + \frac{(E_2 \mathbf{h}_1^T \tilde{\mathbf{g}} + E_0 \mathbf{h}_2^T \tilde{\mathbf{g}})^2}{E_0(E_0 E_2 - E_1^2)} \right). \quad (61) $$ + +The performance of this detector is given by¹⁰ + +$$ P_f = Q_{\chi_2^2}(\gamma) \\ P_d = Q_{\chi_2^2(\lambda)}(\gamma) \\ \lambda = \frac{1}{\sigma^2} \left( (-\alpha d_1 + \beta d_2)^2 E_1 + \left( \frac{\alpha d_1^2 + \beta d_2^2}{2} \right)^2 \left( E_2 - \frac{E_1^2}{E_0} \right) \right). \quad (62) $$ + +Consequently, the relation between ($d_1, d_2$) and SNR is given by (63) as shown at the bottom of the page. By comparing (57) and (63), it can be readily shown that because of the negative term $-(E_1^2/E_0)$, the detector without the knowledge of $\alpha + \beta$ performs more poorly than the detector which knows $\alpha + \beta = 2$. Fig. 8 displays the performance of these two different detectors in terms of the minimum detectable $d$ versus SNR for the case of $h(x) = \text{sinc}^2(x)$. + +## V. THE CRAMÉR-RAO LOWER BOUND ON ESTIMATION OF THE UNKNOWN PARAMETERS + +In the interest of completeness, in this section we present results on the estimation of the unknown parameters of the model. In particular, we study the asymptotic performance of ML estimate of the unknown parameters, using the Cramér-Rao lower bound (CRLB). CRLB [15, p. 27] is a covariance inequality bound which treats the parameters as unknown deterministic quantities and provides a local bound on the mean square error (MSE) of their estimate. Being able to compute a lower bound + +¹⁰Note that according to the Cauchy-Schwarz inequality $E_0 E_2 \ge E_1^2$. + +$$ \text{SNR} = \frac{\lambda(P_f; P_d)}{N} \frac{(\alpha + \beta)^2 E_0 - \alpha \beta (d_1 + d_2)^2 E_1 + \left(\frac{\alpha d_1^2 + \beta d_2^2}{2}\right)^2 E_2}{(-\alpha d_1 + \beta d_2)^2 E_1 + \left(\frac{\alpha d_1^2 - \beta d_2^2}{2}\right)^2 (E_2 - \frac{E_1^2}{E_0})} \quad (63) $$ +---PAGE_BREAK--- + +Fig. 9. $\sqrt{\text{CRLB}(\hat{d})}$ versus $\hat{d}$ for two different cases. + +on the variance of the parameter $d$, in particular, is rather helpful +in verifying and confirming the earlier results of this paper. For +example we shall see how the difference between $\alpha$ and $\beta$ af- +fects the variance of the estimate in different cases. Here, we +compute the CRLB for following cases: + +• the signal model in (3), i.e., known intensities but un- +known *d*; + +* the signal model in (48), i.e., unknown α, β, d₁, and d₂. +To verify the details of the calculations (carried out mostly in +the frequency domain), we refer the reader to Appendix B. Re- +calling (3), the CRLB for the parameter *d* (assuming α and β +known), is given by (64) and (65) at the bottom of the page. To +compute the CRLB for the second case, when α, β, d₁, and d₂ +are unknown, the Fisher Information matrix is computed.¹¹ We +have + +$$ +\operatorname{cov}(\hat{d}_1, \hat{d}_2, \hat{\alpha}, \hat{\beta}) \geq \Psi^{-1}(d_1, d_2, \alpha, \beta) \quad (66) +$$ + +where $\Psi$ is the 4 × 4 symmetric Fisher Information matrix with +its elements defined by the equations at the bottom of the next +page. The bound on the variance of $\hat{d}_1$ and $\hat{d}_2$ can be obtained +by taking the elements (1, 1) and (2, 2) of the inverse Fisher +information matrix $\Psi^{-1}$, respectively. Also, the CRLB on $d =$ +$d_1 + d_2$ is computed from + +$$ +\mathrm{CRLB}(\hat{d}) = [\Psi^{-1}]_{11} + [\Psi^{-1}]_{22} + 2[\Psi^{-1}]_{12}. \quad (67) +$$ + +¹¹We thank Prof. Jeff Fessler for sharing with us his calculations for the con- +tinuous data case. + +Fig. 10. $\sqrt{\text{CRLB}(\hat{d})}$ versus $\alpha$ for two different cases. + +Fig. 9 shows the square-root of the CRLB (to maintain the same units as *d*) for *d*, for fixed values of the intensities *α* and *β*, versus the parameter value *d*, for two different cases; namely, the known intensity case with symmetrically located point sources, and the unknown *α*, *β*, *d*₁ and *d*₂ case. In this figure, we observe that the curves in each case are rather close for *d* > 0.5, and they are distinct when *α* is unknown and *d* is smaller than 0.5. In Fig. 10, the value of *d* = 0.3 is fixed, and the square-root of CRLB for *d̂* is shown over a range of values of *α*. The graph demonstrates the effect of the difference of *α* and *β* on the CRLB. As seen in this figure, the CRLB for the second case (unknown *α*, *β*, *d*₁ and *d*₂) increases rapidly when moving away from (*α*, *β*) = (1, 1); but for known *α* and *β*, there is a (rather slow) decay away from the position *α* = *β* = 1. The observed phenomenon is counter-intuitive, but can be readily explained by looking at the derivatives we computed in the calculation of the CRLB. When point sources are located symmetrically, with unequal intensities, the shape of the overall signal is dramatically different than the case when *α* = *β* = 1. This difference is accentuated further as the value of *α* − *β* becomes larger. Whereas for second case, because of uncertainty about the center and intensities of point sources, if *α* − *β* ≠ 0, the overall shape looks more like a single peak is present. The observed behavior is consistent with what we saw before where we demonstrated that unequal *α* and *β* yields improved detection if the center is known and vice versa. + +VI. CONCLUSION + +We have set out in this paper to address the question of +resolution from a sound statistical viewpoint. In particular, we + +$$ +\begin{align} +\operatorname{var}(\hat{d}) &\ge \frac{\sigma^2}{\sum_k \left( \frac{\partial S(x_k, d)}{\partial d} \right)^2} = \frac{\sigma^2}{\frac{1}{2\pi} \int_{-\pi}^{\pi} \left| \frac{\partial S(\omega, d)}{\partial d} \right|^2 d\omega} \tag{64} \\ +&= \frac{\sigma^2}{f_s \frac{\pi^2}{15} (\alpha^2 + \beta^2) + \frac{\alpha^3}{\pi^3 d^3} \left[ (\pi^2 d^2 - 3) \sin(2\pi d) + 3\pi d \cos(2\pi d) + 3\pi d \right]} \tag{65} +\end{align} +$$ +---PAGE_BREAK--- + +have explicitly answered a very practical question: What is the minimum detectable distance between two point sources imaged incoherently at a given signal-to-noise ratio? Or equivalently, what is the minimum SNR required to discriminate two point sources separated by a distance smaller than the Rayleigh limit? Based on different assumptions and models, we explicitly studied four different cases in our detection-theoretic approach, from the simplest to the most general case. We employed a hypothesis testing framework using like locally + +most powerful tests, where the original highly nonlinear problem was approximated using a quadratic model in the parameter *d*. We also discussed asymptotic performance for estimation of the unknown parameters. The analysis has been carried out in one dimension to facilitate the presentation and to yield maximum intuition. We have begun the analysis in 2-D, including studies as a function of different aperture shapes and lenses, and the complete 2-D (spatial integration) sampling model. This 2-D analysis is not so different in spirit from the + +$$ +\Psi(1, 1) = \frac{1}{\sigma^2} \sum_k \left( \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_1} \right)^2 = \frac{\alpha^2}{2\pi\sigma^2} \int_{-\pi}^{\pi} |\omega f_s H(\omega, f_s)|^2 d\omega = \frac{f_s}{\sigma^2} \frac{4\pi^2 \alpha^2}{15} +$$ + +$$ +\Psi(2, 2) = \frac{1}{\sigma^2} \sum_k \left( \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_2} \right)^2 = \frac{\beta^2}{2\pi\sigma^2} \int_{-\pi}^{\pi} |\omega f_s H(\omega, f_s)|^2 d\omega = \frac{f_s}{\sigma^2} \frac{4\pi^2 \beta^2}{15} +$$ + +$$ +\Psi(3, 3) = \frac{1}{\sigma^2} \sum_k \left( \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial \alpha} \right)^2 = \frac{1}{2\pi\sigma^2} \int_{-\pi}^{\pi} |H(\omega, f_s)|^2 d\omega = \frac{f_s}{\sigma^2} \frac{2}{3} +$$ + +$$ +\Psi(4, 4) = \frac{1}{\sigma^2} \sum_k \left( \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial \beta} \right)^2 = \frac{1}{2\pi\sigma^2} \int_{\pi}^{\pi} |H(\omega, f_s)|^2 d\omega = \frac{f_s}{\sigma^2} \frac{2}{3} +$$ + +$$ +\Psi(1, 2) = \frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2) \partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_1 \partial d_2} +$$ + +$$ += -\frac{\alpha\beta}{2\pi\sigma^2} \int_{-\pi}^{\pi} |\omega f_s H(\omega, f_s)|^2 \cos(\omega f_s (d_1 + d_2)) d\omega +$$ + +$$ += \frac{f_s 2\alpha\beta (\pi^2(d_1+d_2)^2 - 3)\sin(2\pi(d_1+d_2)) + 6\pi(d_1+d_2)\cos^2(\pi(d_1+d_2))}{\sigma^2 \pi^3 (d_1+d_2)^5} +$$ + +$$ +\Psi(1, 3) = \frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2) \partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_1 \partial \alpha} = -\frac{\alpha}{2\pi\sigma^2} \int_{-\pi}^{\pi} \omega f_s |H(\omega, f_s)|^2 d\omega = 0 +$$ + +$$ +\Psi(1, 4) = \frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_1} \frac{\partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial \beta} +$$ + +$$ += -\frac{\alpha}{2\pi\sigma^2} \int_{-\pi}^{\pi} \omega f_s |H(\omega, f_s)|^2 \sin(\omega f_s (d_1 + d_2)) d\omega +$$ + +$$ += \frac{f_s}{\sigma^2} \frac{\alpha}{2\pi^3} \frac{3\sin(2\pi(d_1 + d_2)) - 4\pi(d_1 + d_2)\cos^2(\pi(d_1 + d_2)) - 2\pi(d_1 + d_2)}{(d_1 + d_2)^4} +$$ + +$$ +\Psi(2, 3) = \frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2) \partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_2 \partial \alpha} +$$ + +$$ += -\frac{\beta}{2\pi\sigma^2} \int_{-\pi}^{\pi} |\omega f_s| H(\omega, f_s|^2) \sin(\omega f_s (d_1 + d_2)) d\omega +$$ + +$$ += -\frac{f_s - \beta}{\sigma^2 2\pi^3} - -\frac{3\sin(2\pi(d_1 + d_2)) - 4\pi(d_1 + d_2)\cos^2(\pi(d_1 + d_2)) - 2\pi(d_1 + d_2)}{(d_1 + d_2)^4} +$$ + +$$ +\Psi(2, 4) = -\frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2) \partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial d_2 \partial \beta} = -\frac{\beta}{2\pi\sigma^2} \int_{-\pi}^{\pi} w f_s |H(w; f_s)|^2 dw = 0 +$$ + +$$ +\Psi(3, 4) = -\frac{1}{\sigma^2} \sum_k \frac{\partial s(x_k; \alpha, \beta, d_1, d_2) \partial s(x_k; \alpha, \beta, d_1, d_2)}{\partial a \\ \partial b} +$$ + +$$ += -\frac{1}{2\pi\sigma^2} \int_{-\pi}^{\pi} |H(\omega, f_s)|^2 \cos(\omega f_s (d_1 + d_2)) d\omega +$$ + +$$ += -\frac{f_s}{\sigma^2 2\pi^3} - -\frac{-\sin(2\pi(d_1 + d_2)) + 2\pi(d_1 + d_2)}{(d_1 + d_2)^4} +$$ +---PAGE_BREAK--- + +1-D case, but is significantly more messy; so we have elected to defer its presentation to the near future. + +The major conclusion of this paper is that for a given imaging scenario (in this case, incoherent imaging through a slit), with required probabilities of detection and false alarm, the minimum resolvable separation between two sources from uniformly sampled data can be derived explicitly as a function of the SNR per sample of the imaging array, and the sampling rate. The most useful rule of thumb we glean from these results is that for the case of equal intensities (or for the case of unequal intensities with a proper choice of test point), the minimum resolvable distance is essentially proportional to the inverse of the SNR to the fractional power of 1/4. The proportionality constant was shown to be a function of the probabilities of detection and false alarm, and the point spread function. In deriving these results, we have unified and generalized much of the literature on this topic that, while sparse, has spanned the course of roughly four decades. + +Many interesting questions remain to be studied. Of these, the analysis of the problem as a function of the sampling rate and sampling strategy come to mind. For instance, it is useful to study the performance in the presence of aliasing (i.e., sub-Nyquist sampling). It would also be interesting to study the effect of nonuniform sampling on performance. + +It is important to note that the strategy for the analysis of resolution we have put forward here is very generally applicable to other types of imaging systems. Once the point-spread function of the imaging system is known, the signal model $s(x; d)$ is determined, and the same line of reasoning can be carried out. The optical imaging scenario we have described here should really be thought of as a canonical example of the application of the general strategy we propose for studying resolution. Extensions of these ideas can also be considered to study limits to resolution for indirect imaging such as in computed tomography. + +As for other extensions and applications in optical imaging, an appealing direction is to study the limits to super-resolution from video [23]–[25]. The analysis presented here can help answer questions regarding the ability of image super-resolution methods to integrate multiple low resolution frames to produce a high resolution image from aliased data. + +Finally, we wish to mention that this paper, we hope, represents one step forward in an overall methodology for studying imaging and image processing that appeals directly to concepts in information theory. This approach and point of view has been sorely lacking in the imaging community, and we hope that it will become more pervasive in the years to come. + +## APPENDIX A + +### ON THE ACCURACY OF THE QUADRATIC APPROXIMATION + +Here, we present an analysis to demonstrate the accuracy of the Taylor expansion proposed in Section 3. We consider the general model of (48) and its Taylor expansion in (49). Let us define residual percentage error of the approximation as follows: + +$$ \epsilon = \frac{\left\| s - (\alpha + \beta)\mathbf{h} - (-\alpha d_1 + \beta d_2)\mathbf{h}_1 - \frac{\alpha d_1^2 - \beta d_2^2}{2}\mathbf{h}_2 \right\|^2}{\|\mathbf{s}\|^2} \quad (68) $$ + +Fig. 11. Residual percentage error of the quadratic model; $\alpha d_1 = \beta d_2$. + +Fig. 12. Residual percentage error of the quadratic model; $\alpha = \beta = 1$. + +Consider the case when $\alpha d_1 = \beta d_2$ (See Appendix C). Fig. 11 shows the upper bound when $(d = d_1 + d_2 = 1)$ for $\epsilon$ as a function of $\alpha$ for $h(x) = \text{sinc}^2(x)$ (Note that again for above-Nyquist sampling, $\epsilon$ is independent from the sampling rate.). The maximum of $\epsilon$ is less than 20% in any case. Also, as seen in this figure, the approximation error for $d = 0.7$ is always less than 2.5%. Fig. 12 shows the curve for $\epsilon$ versus $d$ which indicates that the approximation error is quite acceptable for the range of interest near $d = 0$. To have a picture of the local error in the approximation, the error term + +$$ \epsilon(x; \alpha, \beta, d_1, d_2) = s(x; \alpha, \beta, d_1, d_2) - (\alpha + \beta)h(x) \\ - (-\alpha d_1 + \beta d_2)h_1(x) - \frac{\alpha d_1^2 + \beta d_2^2}{2}h_2(x) $$ + +is shown in Fig. 13 for two different values of $d$ over the range of the variable $x$ in [-10, 10]. + +## APPENDIX B + +### FREQUENCY DOMAIN REPRESENTATION; PARSEVAL'S THEOREM FOR THE SIGNAL $s(x; d)$ + +Considering the sampled signal of the general model, where the point sources are located at $-d_1$ and $d_2$ we have + +$$ \begin{aligned} s(n; \alpha, \beta, d_1, d_2) &= s(x; \alpha, \beta, d_1, d_2)|_{x=\frac{n}{f_s}} \\ &= \alpha h\left(\frac{n}{f_s} - d_1\right) + \beta h\left(\frac{n}{f_s} + d_2\right). \end{aligned} \quad (69) $$ +---PAGE_BREAK--- + +Fig. 13. Difference between the actual signal and the quadratic model; $\alpha = \beta = 1$. + +For the case of above-Nyquist sampling,¹² in the frequency domain we will have the following 2π-periodic representation (see (70) at the bottom of the page) where $H(\omega, f_s) = (f_s^2/2\pi)((2\pi/f_s) - |\omega|)$ is the DTFT of $h(x_k)$ when $h(x) = \text{sinc}^2(x)$ and sampling rate is $f_s$. Correspondingly, for this case, the functions $h_1(x)$ and $h_2(x)$ can be written in the frequency domain as + +$$ H_1(\omega, f_s) = \begin{cases} j \frac{\omega f_s}{2\pi} \left( \frac{2\pi}{f_s} - |\omega| \right) & |\omega| < \frac{2\pi}{f_s} \\ 0 & \frac{2\pi}{f_s} \le |\omega| \le 2\pi \end{cases} \quad (71) $$ + +$$ H_2(\omega, f_s) = \begin{cases} -\frac{\omega^2 f_s^4}{2\pi} \left(\frac{2\pi}{f_s} - |\omega|\right) & |\omega| < \frac{2\pi}{f_s} \\ 0 & \frac{2\pi}{f_s} \le |\omega| \le 2\pi \end{cases} \quad (72) $$ + +Using Parseval's identities [19]: + +$$ \sum_{n=-\infty}^{\infty} |x(n)|^2 = \frac{1}{2\pi} \int_{-\pi}^{\pi} |X(\omega)|^2 d\omega \quad (73) $$ + +$$ \sum_{n=-\infty}^{\infty} x(n)y^{*}(n) = \frac{1}{2\pi} \int_{-\pi}^{\pi} X(\omega)Y^{*}(n) d\omega \quad (74) $$ + +we can easily compute the following terms: + +$$ E_0 = h^T h = f_s \frac{2}{3} \quad (75) $$ + +$$ E_1 = h_1^T h_1 = f_s \frac{4\pi^2}{15} \quad (76) $$ + +$$ E_2 = h_2^T h_2 = f_s \frac{32\pi^4}{105} \quad (77) $$ + +and + +$$ h_1^T s_0 = h_1^T h_2 = 0 \quad (78) $$ + +¹²To recover exactly $s(x; d)$ would mathematically require an infinite number of measurements (or samples) $s(n; d)$ [21]. But since we have considered a fairly large range (−10 to 10) for sampling, and since the energy in the tails of the function in the range is very small, the effect of aliasing is essentially negligible. + +Note that in every case the energy terms are proportional to the sampling rate. It can be shown [20] that the energy of any uniformly (super-critically) sampled version of a band-limited signal is proportional to the sampling rate. + +## APPENDIX C +IS $\alpha d_1 \approx \beta d_2$ A REASONABLE ASSUMPTION? + +Suppose that we first wish to determine a location at which we carry out our hypothesis test. A reasonable way to find a good candidate is to compute the correlation of the signal with a shifted version of $h(x)$ and find the point where the correlation is maximum (this would yield a point near the brighter of the two peaks). Consider + +$$ R_{sh}(|\tau|, \alpha, \beta, d_1, d_2) = \int_{-\infty}^{+\infty} (s(x; \alpha, \beta, d_1, d_2) + w(x))h(x + \tau) dx \quad (79) $$ + +$$ = \int_{-\infty}^{+\infty} (\alpha h(x - d_1) + \beta h(x + d_2) + w(x))h(x + \tau) dx \quad (80) $$ + +$$ = \alpha R_{hh}(|\tau| - d_1) + \beta R_{hh}(|\tau| + d_2) + u(|\tau|) \quad (81) $$ + +where $R_{sh}$ and $R_{hh}$ are the cross-correlation and autocorrelation functions, respectively, and + +$$ u(|\tau|) = \int_{-\infty}^{-\infty} w(x)h(x + \tau) dx \quad (82) $$ + +is a noise term (with zero mean). It should be clear from the model that $R_{sh1}$ would be maximized at $\tau = 0$. Also, since $d_1$ and $d_2$ are assumed to be small, by using the Taylor expansion around $|\tau| - d_1 = 0$ and $|\tau| + d_2 = 0$, we will have + +$$ R_{hh}(|\tau| - d_1) = \xi_0 + (|\tau| - d_1)\xi_1 + (|\tau| - d_1)^2\xi_2 \quad (83) $$ + +$$ R_{hh}(|\tau| + d_2) = \xi_0 + (|\tau| + d_2)\xi_1 + (|\tau| + d_2)^2\xi_2 \quad (84) $$ + +where $\xi_0$, $\xi_1$, and $\xi_2$ are some constant coefficients of the above Taylor expansion. Also, it can be shown that $\xi_1 = 0$. Therefore, we can write (81) as follows: + +$$ R_{sh}(|\tau|, \alpha, \beta, d_1, d_2) = (\alpha + \beta)\xi_0 + (\alpha|\tau| - d_1)^2 + \beta(|\tau| - d_2)^2\xi_2 + u(|\tau|) \quad (85) $$ + +Taking derivative of $R_{sh}(|\tau|, \alpha, \beta, d_1, d_2)$ with respect to $\tau$ and setting it to zero will result in: + +$$ (\alpha + \beta)|\tau| = \alpha d_1 - \beta d_2 \quad (86) $$ + +Hence, a proper selection of $\tau$ (i.e., the test point) will lead to $\alpha d_1 \approx \beta d_2$. + +$$ S(\omega, d) = \begin{cases} H(\omega, f_s)(\alpha \exp(-j\omega f_s d_1) + \beta \exp(j\omega f_s d_2)) & |\omega| < \frac{2\pi}{f_s} \\ 0 & \frac{2\pi}{f_s} \le |\omega| \le 2\pi \end{cases} \quad (70) $$ +---PAGE_BREAK--- + +ACKNOWLEDGMENT + +The authors wish to acknowledge Prof. A. Shakouri of U.C., Santa Cruz, for providing the early practical inspiration from the laboratory bench that led them to consider the questions addressed in this paper. They thank Prof. J. Fessler of the University of Michigan for his helpful suggestions for the CRLB analysis. They also thank the reviewers for their constructive comments and suggestions. + +REFERENCES + +[1] J. W. Goodman, *Introduction to Fourier Optics*. New York: McGraw-Hill, 1996. + +[2] J. D. Gaskill, *Linear Systems, Fourier Transforms, and Optics*. New York: Wiley, 1978. + +[3] L. B. Lucy, "Statistical limits to super-resolution," *Astron. Astrophys*, vol. 261, pp. 706-710, 1992. + +[4] C. W. Helstrom, "The detection and resolution of optical signals," *IEEE Trans. Inf. Theory*, vol. IT-10, pp. 275-287, 1964. + +[5] ——, "Detection and resolution of incoherent objects by a background-limited optical system," *J. Opt. Soc. Amer.*, vol. 59, pp. 164-175, 1969. + +[6] ——, "Resolvability of objects from the standpoint of statistical parameter estimation," *J. Opt. Soc. Amer.*, vol. 60, pp. 659-666, 1970. + +[7] L. B. Lucy, "Resolution limits for deconvolved images," *Astron. J.*, vol. 104, pp. 1260-1265, 1992. + +[8] A. van den Bos, "Ultimate resolution: A mathematical framework," *Ultramicroscopy*, vol. 47, pp. 298-306, 1992. + +[9] A. J. den Dekker, "Model-based optical resolution," *IEEE Trans. Instrum. Meas.*, vol. 46, pp. 798-802, 1997. + +[10] A. J. den Dekker and A. van den Bos, "Resolution, a survey," *J. Opt. Soc. Amer.*, vol. 14, pp. 547-557, 1997. + +[11] E. Bettens, D. Van Dyck, A. J. den Dekker, J. Sijbers, and A. van den Bos, "Model-based two-object resolution from observations having counting statistics," *Ultramicroscopy*, vol. 77, pp. 37-48, 1999. + +[12] A. van den Bos, "Resolution in model-based measurements," *IEEE Trans. Instrum. Meas.*, vol. 51, pp. 1055-1060, 2002. + +[13] E. L. Kosarev, "Shannon's superresolution limit for signal recovery," *Inverse Problem*, vol. 6, pp. 55-76, 1990. + +[14] P. Milanfar and A. Shakouri, "A Statistical analysis of diffraction-limited imaging," in *Proc. Int. Conf. Image Processing*, Sept. 2002, pp. 864-867. + +[15] S. M. Kay, *Fundamentals of Statistical Signal Processing, Estimation Theory*: Prentice-Hall, Inc., 1998. + +[16] ——, *Fundamentals of Statistical Signal Processing, Detection Theory*. Englewood Cliffs, NJ: Prentice-Hall, 1998. + +[17] ——, *Modern Spectral Estimation, Theory and Application*. Englewood Cliffs, NJ: Prentice-Hall, 1988. + +[18] ——, "Spectrum analysis, a modern perspective," *Proc. IEEE*, vol. 69, no. 11, pp. 1380-1418, 1981. + +[19] A. V. Oppenheim and R. W. Schafer, *Discrete-Time Signal Processing*. Englewood Cliffs, NJ: Prentice-Hall, 1993. + +[20] P. P. Vaidyanathan, "Generalizations of the sampling theorem: Seven decades after Nyquist," *IEEE Trans. Circuits Syst.*, vol. 48, pp. 1094-1109, Sept. 2001. + +[21] M. Vetterli, P. Marziliano, and T. Blu, "Sampling signals with finite rate of innovation," *IEEE Trans. Signal Processing*, vol. 50, pp. 1417-1428, June 2002. + +[22] M. Shahram and P. Milanfar, "A statistical analysis of achievable resolution in incoherent imaging," in *Proc. SPIE Annual Meeting*, San Diego, CA, Aug. 2003, URL: http://www.soe.ucsc.edu/~milanfar/publications.htm. + +[23] M. Elad and A. Feuer, "Restoration of single super-resolution image from several blurred, noisy and down-sampled measured images," *IEEE Trans. Image Processing*, vol. 6, pp. 1646-1658, Dec. 1997. + +[24] N. Nguyen, P. Milanfar, and G. H. Golub, "A computationally efficient image superresolution algorithm," *IEEE Trans. Image Processing*, vol. 10, pp. 573-583, Apr. 2001. + +[25] S. Farsiu, D. Robinson, M. Elad, and P. Milanfar, "Fast and robust multi-frame superresolution," *IEEE Trans. Image Processing*, to be published. + +**Morteza Shahram** received the B.S. degree from the Amir-Kabir University of Technology, Tehran, Iran, in 1996 and the M.S. degree from the Sharif University of Technology, Tehran, in 1998 both in electrical engineering. He is currently pursuing the Ph.D. degree in electrical engineering at the University of California, Santa Cruz. +He was with the Signal Company, Tehran, as a Research Engineer from 1996 to 2001. His research interests are statistical signal and image processing and information-theoretic imaging. + +**Peyman Milanfar** (S'90-M'93-SM'98) received the B.S. degree in electrical engineering/matematics from the University of California, Berkeley, in 1988, and the S.M., E.E., and Ph.D. degrees in electrical engineering from the Massachusetts Institute of Technology, Cambridge, in 1990, 1992, and 1993, respectively. +Until 1999, he was a Senior Research Engineer at SRI International, Menlo Park, CA. He is currently Associate Professor of Electrical Engineering at the University of California, Santa Cruz. He was a Consulting Assistant Professor of computer science at Stanford University from 1998-2000, and a visiting Associate Professor there from June to December 2002. His technical interests are in statistical signal and image processing, and inverse problems. +Dr. Milanfar won a National Science Foundation CAREER award in 2000. +He was an associate editor for the IEEE SIGNAL PROCESSING LETTERS from 1998 to 2001. \ No newline at end of file diff --git a/samples_new/texts_merged/2634535.md b/samples_new/texts_merged/2634535.md new file mode 100644 index 0000000000000000000000000000000000000000..5c1d29510d139920ba5a3fe363182c31e081e67c --- /dev/null +++ b/samples_new/texts_merged/2634535.md @@ -0,0 +1,447 @@ + +---PAGE_BREAK--- + +ORIGINAL ARTICLE + +WILEY + +# Why is free education so popular? A political economy explanation + +Juan A. Correa¹ | Yijia Lu² | Francisco Parro³ | Mauricio Villena³ + +¹Facultad de Economía y Negocios, +Universidad Andres Bello, Santiago, +Chile + +²School of Law, New York University, +New York, New York + +³School of Business, Universidad Adolfo +Ibáñez, Santiago, Chile + +**Correspondence** + +Francisco Parro, School of Business, +Universidad Adolfo Ibáñez, 7941169, +Santiago, Chile. +Email: fjparrog@gmail.com + +## Abstract + +This paper analyzes the political support for different funding regimes of education in a one-person, one-vote democracy. We focus the analysis on four systems that have had a preponderant presence in the political debate on education: a private system, a public system that delivers the same resources to each student (universal-free education), a public system that intends to equalize results, and a public system that aims to maximize the output of the economy. We show that a system of universal free education is the Condorcet winner. The level of income inequality and the degree to which income distribution is skewed to the right are key factors behind this conclusion. We also show that the voting outcome of public versus private funding for education depends crucially on the type of public funding under consideration. + +## 1 | INTRODUCTION + +Universal free education has become popular in several regions of the world. Western democracies have it at different stages of the educational ladder. European countries, such as France, provide free tuition to European students, and Germany offers free tuition even to international students. Argentina, the Czech Republic, and Greece supply free education at all educational levels. Most of the United States primary and secondary students attend public schools, which provide free education, funded by a mix of federal, regional, and local resources.¹ In other countries, such as Chile, South Africa, and the United Kingdom, where + +¹An extensive cross-country analysis of education's tuition fee schemes can be found in Bentaouet Kattan (2006). +---PAGE_BREAK--- + +higher education is not free, social movements have pressured the authorities to implement a scheme of universal free education for higher education.² In this paper, we give a political economy explanation for the popularity of free education. + +A system of universal free education allocates public funds equally across students. This system, however, is not completely consistent with the main implications of a strand of the literature that emphasizes, first, the importance of economic growth to improve living standards and, second, human capital investments as the engine to promote growth (Benhabib & Spiegel, 1994; Hanushek & Kimko, 2000; among others). This branch of the literature points to a system in which public resources for education should be allocated to students with higher skills (so as to maximize aggregate output), relying on alternative instruments for redistribution. Universal free education also implies that public funds are allocated regardless of the student's family income. However, studies such as Samoff (1996) and Larkin and Staton (2001) highlight the importance of equity in the allocation of public resources spent on education. This implies that disadvantaged students should be supported with more resources, which would allow equalizing human capital across students. Hence, universal free education does not point in the direction suggested by these two strands of the literature. + +A third strand of the literature suggests that different public funding systems should be implemented at different stages of the educational system. Empirical studies document low returns to interventions targeting disadvantaged adolescents, but high economic returns for remedial investments targeting young disadvantaged children (Cunha & Heckman, 2007; Cunha, James, Lochner, & Masterov, 2006; Heckman, 2008; Heckman & Masterov, 2007). This evidence implies an equity-efficiency trade-off for late child investments but not for early investments (Cunha & Heckman, 2007). Thus, public resources for education should focus on low-income students at earlier stages. However, at later stages, when human capital inequalities are difficult to undo, public resources should be shifted toward high-human capital students so as to maximize output, relying on an alternative instrument for socially desirable redistribution. The popularity of free education at different stages of education is not completely aligned with the implications derived from this third strand of the literature. + +Then, why is universal free education so popular in the world? This paper gives a political economy explanation for this popularity. We model a static economy populated by a continuum of heterogeneous agents or *parents*, and each of them has one child and must vote for the funding regime that will finance the education of the child. Parents are heterogeneous in terms of human capital, which equals the family income. The parents' human capital is exogenously given and distributed according to a lognormal distribution function, as in Glomm and Ravikumar (1992) and Becker (1993). We study the Condorcet winner among four funding regimes that frequently appear in the political debate: a private system, a public system that delivers the same resources to all students, a public system that intends to equalize results, and a public system that aims to maximize the output of the economy. + +Our analysis shows that a public system that universally invests the same resources in each student is the Condorcet winner in a one-person, one-vote democracy. The intuition behind our + +²In Chile, the Confederation of Chilean Student Federations (CONFECH), a national body made up of students at Chilean universities, led a series of student protests across the country in 2011. The student movement demanded, among other things, an increase in state support for public universities and free public education. In South Africa, the "Fees Must Fall" movement emerged in 2015 after the government announced an increase in mandatory fees at the universities. Students were placated after the proposal for the increase was dropped. The 2010 United Kingdom student protests were a series of demonstrations held in opposition to the planned increase of the cap on tuition fees by the Conservative-Liberal Democrat coalition government. The biggest demonstration occurred in November 2010, officially known under the phrase of "Fund Our Future: Stop Education Cuts," where thousands of students marched through central London demanding free education. +---PAGE_BREAK--- + +key finding relies on the lognormal distribution of income, which is skewed to the right. A public system that equalizes outcomes will channel more resources per student to a minority of poor students. The efficiency-oriented system, in contrast, diverts more resources per student to a minority of wealthy students. The majority therefore does not favor public systems that disproportionally benefit a small group of either poor or rich agents, in comparison to the system that equalizes resources across students. In addition, the lognormal income distribution also implies that the median income is below the per capita level. A proportional tax on income that is then redistributed evenly between all students benefits those whose income falls below the mean. Then, the latter agents, who are the majority, prefer the public system that invest the same amount in each student, rather than the private system. + +Therefore, our paper provides a political economy explanation for the popularity of universal free education. We show that an ex ante egalitarian public funding system for education is the Condorcet winner when it is confronted by a private system, an ex post egalitarian public system, and an output-maximizing public system. In addition, we show that the voting outcome of public versus private funding for education depends crucially on the type of public funding under consideration. Concretely, we prove that voters might choose a private system when a government proposes as a single alternative either a public system that intends to equalize results or a public system that aims to maximize the output of the economy. Thus, the voting outcome of the public versus private funding systems is not a trivial issue. We also discuss extensions to the baseline model, to show that our main result holds in democracies with a limited degree of either elitism or populism and in a type of top-up education system. + +Our work builds upon earlier studies of the political economy of education funding. Creedy and Francois (1990) examine the conditions under which an uneducated majority of individuals support the financing of a proportion of the costs of education through the tax system. Glomm and Ravikumar (1992) analyze the political support for private versus public education, but in their model, voters face only one public funding design. Fernandez and Rogerson (1995) claim that the net effect of public support for higher education is a transfer of resources from poor to rich agents. They show that the underlying factor behind this result is the fact that education is only partially publicly provided. Then, the rich and the middle class may vote for relatively low subsidies to exclude poorer agents from education in the presence of credit constraints to privately finance education. Epple and Romano (1996) study the existence and properties of voting equilibria over public school expenditure in the presence of a private alternative. + +More recently, De Fraja (2001) studies the voting equilibrium when voters must choose between two higher education reforms: the imposition of an ability test for admission to a university and a uniform subsidy to university attendance financed by a proportional tax on income. In a similar line, Anderberg and Balestrino (2008) study the voting equilibrium when there are two options to finance higher education in an economy with credit constraints: A subsidy to those who participate in education and a proportional income tax. Borck and Wimbersky (2014) study the political determination of higher education finance. The authors focus their analysis on the factors that might contribute toward higher education reforms from a traditional tax-subsidy scheme to income-contingent loan schemes or graduate taxes. + +These previous studies have not analyzed the political support for education funding systems when private education competes with public funding alternatives aiming at equalizing resources, equalizing results, or maximizing output. Including a complete list of public funding, alternatives is important since, as we show explicitly in this paper, the Condorcet winner indeed depends on the specific design for the public funding alternative. In this sense, the analysis developed by Glomm and Ravikumar (1992), who consider a single public funding system, does +---PAGE_BREAK--- + +not contain straightforward implications about the Condorcet winner for the case in which the pool of alternatives for the voters includes several public funding schemes. + +The rest of this paper is organized as follows. Section 2 presents the model and derives human capital formation under different education funding systems. Section 3 analyzes the political support for alternative education funding systems. Section 4 discusses extensions to our model. Finally, Section 5 concludes. + +## 2 | THE MODEL + +Consider a static economy populated by a continuum of heterogeneous agents or *parents*, each with only one child.³ Children are differentiated by the human capital they inherit from their parents. This initial human capital of the child is an input for the child's formal education. Parent *i*'s initial human capital, $h_P^i$, is exogenously given and distributed according to a lognormal distribution function $G$ with parameters $\mu$ and $\sigma^2$ over support $(0, +\infty)$.⁴ We normalize the size of the population to 1. + +Children do not make any decisions. They only receive education, which is used to accumulate human capital. Each parent decides how to allocate her income $h_P^i$ between consumption $c^i$ and her child's education $y^i$. We set labor to 1; thus, an agent's labor earnings equal her human capital. Parents cannot borrow against the future earnings of their children, since there is no capital market in this economy.⁵ + +All individuals have identical preferences. The preferences are for own consumption and for the total human capital they pass on to their descendants, as in Banerjee and Newman (1991).⁶ Specifically, agent *i* has the following utility function, + +$$U(c^i, h_c^i) = \ln c^i + \lambda \ln h_c^i, \quad (1)$$ + +where $c^i$ is the agent's consumption and $h_c^i$ is the total human capital passed on to the child, discounted by $\lambda \in (0,1)$. The human capital passed on is determined by the following equation:⁷ + +$$h_c^i = \Theta(v^i + y^i)^{\gamma} (h_P^i)^{\delta}, \quad (2)$$ + +which depends upon agent *i*'s human capital $h_P^i$ and the total amount $v^i + y^i$ of resources invested in the education of the child, where $v^i$ are the resources (or voucher) invested in education by the government in the child of agent *i* and $y^i$ are the resources invested in education by agent *i*, the parent. The parameter $\Theta > 0$ is an exogenous constant. The parameter + +³Sleebos (2003) reports that the average fertility rate in OECD countries is about 1.6 children per woman. Docquier (2004) shows that there is no clear relation between income and fertility in developed countries. However, a more general model with endogenous fertility rates would be an interesting avenue for future research. + +⁴Since Gibrat (1931), the lognormal distribution has been extensively used to describe within- or between-country income distributions. The lognormal distribution has been empirically shown to explain most of the income distribution (see Clementi & Gallegati, 2005; Neal & Rosen, 2000; among others). + +⁵Several studies have highlighted capital market imperfections as an important aspect of the investment in human capital (e.g., Aghion & Bolton, 1992; Becker, 1993; Becker & Tomes, 1979; Galor, 2000; Moav, 2002; among others). + +⁶A more sophisticated formulation for altruism (Kohlberg, 1976; Loury, 1981; Becker, 1986; Banerjee & Newman, 1991; Becker, 1993, among others) leads to an untractable formulation when comparing different regimes. + +⁷The human capital that parents pass on their children can be interpreted either as the initial skills of preprimary students who starts formal education or as the amount of human capital with which a secondary student starts her tertiary education. +---PAGE_BREAK--- + +$\gamma \in (0,1)$ captures the returns to investment in education and the parameter $\delta > 0$ captures the returns to the parental human capital. + +The only difference between the educational systems studied is made by the constraints imposed upon $v^i$ and $y^i$. Under a purely private system, the government makes no investment in education, so $v^i = 0$. Agent $i$, therefore, divides her income $h_p^i$ between consumption $c^i$ and private investment in the education of her child $y^i$, with $h_p^i = c^i + y^i$. Under public education, only the government invests in education, so $y^i = 0$. Since agent $i$ spends nothing on education, all of the post-tax income $(1-\tau)h_p^i$ goes into consumption: $c^i = (1-\tau)h_p^i$, where $\tau$ is the tax rate on the agent's income. The total revenue raised by the government is $\tau H_p$, where $H_p = \int h_p dG(h)$. This revenue is distributed among the students in the following three ways in the public education systems we study: (a) equally ex ante, with $v^i = v^j$, $\forall i,j$; (b) equally ex post, so that $h_c^i = h_c^j, \forall i,j$; and (c) output maximizing, so that $dh_c^i/dv^i = dh_c^j/dv^j, \forall i,j$. In all three cases, budget balance requires $\mathbb{E}[v] = \tau H_p$, where $\mathbb{E}$ denotes the expectation operator. + +## 2.1 | The private education system (S1) + +In this section, we study the optimal investment in education under a purely private funding system, where the government's investment in education is absent ($v^i = 0, \forall i$). Agent $i$, therefore, chooses $c^i$ and $y^i$ to maximize $U(c^i, h_c^i)$ subject to the technology of human capital formation $h_c^i = \theta(y^i)'(h_p^i)^{\delta}$ and the feasibility constraint $h_p^i = c^i + y^i$. The first order condition with respect to $y^i$ is + +$$y^i = \left( \frac{\lambda\gamma}{1 + \lambda\gamma} \right) h_p^i. \quad (3)$$ + +Therefore, parents invest a constant fraction $\lambda\gamma/(1+\lambda\gamma)$ of their income in the education of their children. We prove later that the fraction of the income that parents privately invest in education is identical to the majority's preferred tax rate. + +## 2.2 | The public education systems + +Now suppose that education is financed publicly. No private acquisition of education is allowed, so $y^i = 0$. Thus, agents consume their after-tax income $c^i = (1-\tau)h_p^i$. Public education is financed by a proportional income tax $\tau$. The resources collected by the government are used to provide education to children. We focus on three different public funding systems. In the first, the government invests an equal amount of money in each student. In the second, the government invests resources to equalize the human capital of the students at the end of the education stage. In the third, the government seeks to maximize the total human capital of the economy. + +### 2.2.1 | The ex ante egalitarian public education system (S2) + +In this public system, the government invests the same amount of resources in each student. The subsidy given to each student is denoted by $v$. Under the constraint that total expenditures must be equal to the total resources collected by the proportional income tax, the equilibrium investment in each student is + +$$v = \tau \mathbb{E}[h_p]. \quad (4)$$ +---PAGE_BREAK--- + +Hence, the government gives a flat subsidy to all students. The amount of this subsidy is equal to a fraction $\tau$ of the per capita income of the economy. Moreover, since agent i's utility is $\ln(1 - \tau) + \gamma\lambda \ln \tau + (\text{terms independent of } \tau)$, the tax rate $\tau^i$ that maximizes agent i's utility is $\tau^i = \gamma\lambda/(1 + \gamma\lambda)$. Since $\tau^i$ is independent of agent i's characteristics, the same tax rate maximizes all agents' utilities. Therefore, we have that the government chooses $\tau = \gamma\lambda/(1 + \gamma\lambda)$, which is the tax rate preferred by all parents. + +### 2.2.2 | The ex post egalitarian public education system (S3) + +In this system, the government seeks to remedy initial inequalities in human capital through investments in education that equalize ex post human capital. To do so, the government invests in agents i and j the amounts $v^i$ and $v^j$, respectively, such that $h_c^i = h_c^j$. Therefore, the relative public investment in students from different families must satisfy $v^i/v^j = (h_p^j/h_p^i)^{\delta/\gamma}$. Taking expectations with respect to j and imposing the balanced-budget constraint $\mathbb{E}[v] = \tau\mathbb{E}[h_p]$, we have that the amount invested by the government on a student from family i is + +$$ v^i = \tau \mathbb{E}[h_p] \left( \frac{(h_p^i)^{\delta/\gamma}}{\mathbb{E}[h_p^{\delta/\gamma}]} \right). \quad (5) $$ + +Therefore, each student receives a proportion of the per capita subsidy delivered under regime S2. This proportion decreases with the initial level of the human capital of the student (or, equivalently, with the family income). Specifically, the proportion of the per capita voucher that each student receives varies according to some measure of the gap between the initial human capital of the student and the average human capital of the economy. Poorer students receive more resources to compensate for their initial lower levels of human capital so that the results of the educational process are equalized across all students. + +Additionally, as in the case of an ex ante egalitarian system, the same argument applies to show that the tax rate chosen is $\tau = \gamma\lambda/(1 + \gamma\lambda)$. + +### 2.2.3 | The output maximizing public education system (S4) + +In the third public system, the government invests the collected resources to maximize the total human capital of the economy. Given this goal, the efficient expenditure is achieved when the marginal product of investment in each student is equalized, that is, $dh_c^i/dv^i = dh_c^j/dv^j$, $\forall i, j$. Therefore, the relative amount of resources invested in each family is $v^i/v^j = (h_p^i/h_p^j)^{\delta/(1-\gamma)}$. As we did before, taking expectations with respect to j and imposing the balanced-budget constraint on the government $\mathbb{E}[v] = \tau\mathbb{E}[h_p]$, we obtain⁸ + +$$ v^i = \tau \mathbb{E}[h_p] \left( \frac{(h_p^i)^{\delta/(1-\gamma)}}{\mathbb{E}[h_p^{\delta/(1-\gamma)}}} \right). \quad (6) $$ + +⁸Equation (6) characterizes a maximum only if the second-order condition holds: $\gamma(\gamma-1)(v^i v^j - 2(h_p^i)^2)^{\delta/2} < 0$, $\forall i$. This condition holds since we have assumed that $\gamma \in (0, 1)$. +---PAGE_BREAK--- + +In this regime, each student receives a voucher that is increasing in the level of the student's initial human capital, since the marginal product of public investment in education is higher in students with a greater initial human capital. Therefore, output maximization requires providing larger subsidies to better-endowed students. As in the previous cases, it is straightforward to show that the tax rate chosen by the majority is $\tau = \gamma\lambda/(1 + \gamma\lambda)$. + +# 3 | POLITICAL SUPPORT FOR THE EDUCATION FUNDING SYSTEMS + +In this section, we analyze the political support for different education funding systems in a one-person, one-vote democracy. Concretely, we study the existence and identity of the Condorcet winner among the four funding systems described in Section 2. The game is solved by backward induction. First, the taxes are determined for each system. Then, the systems are compared in pairwise elections and the Condorcet winner is elected. + +## 3.1 | Utility comparison + +We first derive the indirect utility $V(h_p^i)$ of an agent $i$ under the four funding systems. In the expressions below, we group the terms to facilitate a comparison of the channels through which the agent's human capital $h_p^i$ impacts the agent's utility. In addition, we discuss each of these channels and assess the ones that matter in our comparison. + +$$V^{S1}(h_p^i) = \ln\left(\frac{1}{1+\lambda\gamma}\right)h_p^i + \lambda \ln \theta + \lambda\delta \ln h_p^i + \lambda\gamma \ln\left(\frac{\lambda\gamma}{1+\lambda\gamma}\right)h_p^i, \quad (7)$$ + +$$V^{S2}(h_p^i) = \ln(1 - \tau)h_p^i + \lambda \ln \theta + \lambda\delta \ln h_p^i + \lambda\gamma \ln \tau \mathbb{E}[h_p], \quad (8)$$ + +$$V^{S3}(h_p^i) = \ln(1-\tau)h_p^i + \lambda \ln \theta + \lambda\delta \ln h_p^i + \lambda\gamma \left[ -\frac{\delta}{\gamma} \ln h_p^i + \ln \tau \mathbb{E}[h_p] - \ln \mathbb{E}[(h_p)^{-\delta/\gamma}] \right], \quad (9)$$ + +$$V^{S4}(h_p^i) = \ln(1-\tau)h_p^i + \lambda \ln \theta + \lambda\delta \ln h_p^i + \lambda\gamma \left[ \left(\frac{\delta}{1-\gamma}\right) \ln h_p^i + \ln \tau \mathbb{E}[h_p] - \ln \mathbb{E}[(h_p)^{\delta/(1-\gamma)}] \right]. \quad (10)$$ + +Human capital influences the utility of an agent through three channels. First, human capital determines the income of the agent and, thus, the agent's consumption. The equilibrium of disposable income for consumption under the private system is $(1/(1+\lambda\gamma))h_p^i$, and it is $(1-\tau)h_p^i$ under each of the public systems. We have already shown that the chosen tax rate is $\lambda\gamma/(1+\lambda\gamma)$. Thus, the amount invested by each parent in the private system equals the taxes paid by them to finance a public system. It follows that the equilibrium consumption level reached by any agent is the same under each of the four education funding systems. +---PAGE_BREAK--- + +We, therefore, conclude that the impact of a funding system on the disposable income of an agent is not a decisive factor to tilt the balance in favor of one of the funding systems. + +Human capital also affects the indirect utility of agents through the production technology of human capital, described by Equation (2). Agents have preferences not only on consumption but also on the human capital they pass on to their children. Thus, the human capital of a parent directly determines the child's human capital and, through this channel, influences the parent's indirect utility. The latter effect is equal to $\lambda\delta\ln h_p^i$ and is identical under the four systems. Hence, neither does this channel play a role in the choice of the education funding system. + +The third channel through which human capital affects the utility of an agent is the parental income's impact on the resources for education that the child receives under each of the funding systems. In the private system, parents invest a fixed fraction of their income, as reflected by the term $\ln y^i = \ln(\lambda\gamma/(1 + \lambda\gamma))h_p^i$ in Equation (7). Thus, there is a positive relationship between parental income and the resources invested in the student of the corresponding family. The ex ante egalitarian public education system (S2) invests the same resources in each family, as captured by the term $\ln v^i = \ln \tau \mathbb{E}[h_p]$ in Equation (8). Thus, there is no relationship between one family's income and the resources that the system invests in the student from that family. The ex post egalitarian public education system (S3) seeks to equalize ex post human capital. Thus, this system invests more in students from low-income families, generating a negative relationship between parental income and the resources invested by the system in the student. This relationship is expressed by $\ln v^i = -(\delta/\gamma)\ln h_p^i + \ln \tau \mathbb{E}[h_p] - \ln \mathbb{E}[(h_p)^{-(\delta/\gamma)}]$ in Equation (9). The opposite occurs with the efficient system (S4), which invests more in students from high-income families, as expressed by the term $\ln v^i = (\delta/(1-\gamma))\ln h_p^i + \ln \tau \mathbb{E}[h_p] - \ln \mathbb{E}[(h_p)^{(\delta/(1-\gamma))}]$ in Equation (10). Therefore, different systems invest differently in the student of a given family, even though the resources that the family disburses under each of the funding systems are identical. + +The previous discussion implies that parents will support the system that invests the most in their children. The private system (S1) and the efficient system (S4) invest more in students from richer families, whereas the opposite occurs under the ex post egalitarian public education system (S3). The ex ante egalitarian public education system (S2) is neutral as it invests exactly the same amount in each student. + +As an intermediate step in our analysis, we express Equations (7)–(10) in a simpler and more informative form. To do so, note that the resources invested by each of the systems in a student depend on first and second moments of the income distribution, that is, the average income and how unequally it is distributed over the families. We use the properties of the lognormal distribution to derive an expression for $\mathbb{E}[h_p]$, $\mathbb{E}[(h_p)^{-(\delta/\gamma)}]$, and $\mathbb{E}[(h_p)^{(\delta/(1-\gamma))}]$. For a lognormal distribution, we know that $\mathbb{E}[(h_p)^n] = \exp(n\mu + (1/2)n^2\sigma^2)$ for any $n \in \mathbb{R}$. Therefore, + +$$ \mathbb{E}[h_p] = \exp\left(\mu + \frac{1}{2}\sigma^2\right), \qquad (11) $$ + +$$ \mathbb{E}[(h_p)^{-\delta/\gamma}] = \exp\left(-\frac{\delta}{\gamma}\mu + \frac{1}{2}\left(\frac{\delta}{\gamma}\right)^2\sigma^2\right), \qquad (12) $$ +---PAGE_BREAK--- + +$$ +\mathbb{E}[(h_p)^{\delta/(1-\gamma)}] = \exp\left(\left(\frac{\delta}{1-\gamma}\right)\mu + \frac{1}{2}\left(\frac{\delta}{1-\gamma}\right)^2\sigma^2\right). \quad (13) +$$ + +We substitute (11)–(13) into Equations (7)–(10) and obtain the utility of an agent *i* as a function of the first and second moments of the income distribution. To do so, we use the fact that $\tau = \lambda\gamma/(1 + \lambda\gamma)$ and let $\omega^i = \ln(1 - \tau)h_p^i + \lambda \ln \theta + \lambda\delta \ln h_p^i + \lambda\gamma \ln \tau$. Observe that $\omega^i$ is the same for all the education funding systems. Thus, we can focus the analysis on the elements of the indirect utility function that are affected by the investment that the funding system makes in the students, as we have already concluded in the earlier discussion. + +$$ +V^{S1}(h_p^i) = \omega^i + \lambda\gamma \ln h_p^i, \quad (14) +$$ + +$$ +V^{\mathcal{S}2}(h_p^i) = \omega^i + \lambda\gamma \left( \mu + \frac{1}{2} \sigma^2 \right), \quad (15) +$$ + +$$ +V^{\mathcal{S}3}(h_p^i) = \omega^i + \lambda\gamma \left[ -\frac{\delta}{\gamma} \ln h_p^i + \left(1 + \frac{\delta}{\gamma}\right) \mu + \frac{1}{2} \left(1 - \left(\frac{\delta}{\gamma}\right)^2\right) \sigma^2 \right], \quad (16) +$$ + +$$ +V^{\mathcal{S}4}(h_p^i) = \omega^i + \lambda\gamma \left[ \left( \frac{\delta}{1-\gamma} \right) \ln h_p^i + \left( 1 - \frac{\delta}{1-\gamma} \right) \mu + \frac{1}{2} \left( 1 - \left( \frac{\delta}{1-\gamma} \right)^2 \right) \sigma^2 \right]. \quad (17) +$$ + +Note that $\sigma = 0$ in a completely egalitarian economy, in which the four systems give agent $i$ the same utility if $h_p^i = \exp(\mu)$; that is, $V^\ell(\exp(\mu)) = \omega^i + \lambda\gamma\mu$, for all $j \in \{S1,S2,S3,S4\}$. This agent with income $h_p^i = \exp(\mu)$ is the one with the median income of a lognormal distribution. Positive levels of inequality, however, break this indifference between the systems and make the choice of the Condorcet winner nontrivial. + +**3.2 | Pairwise elections and the Condorcet winner** + +In this section, we use Equations (14)–(17) to study pairwise voting among the four regimes. Define $h^{\text{Sa,Sb}}$ as the income level at which the indirect utilities of the agent under systems Sa and Sb are the same, where $a, b \in \{1,2,3,4\}$. We compute this income threshold for the pairs $\{S2,S1\}$, $\{S2,S3\}$, and $\{S2,S4\}$. For each of these pairwise comparisons involving S2, we assess whether a majority coalition exists to elect S2. We show that in any pairwise election involving S2, this system emerges as the winner. + +Using Equations (14)–(17), we obtain + +$$ +h^{\mathcal{S}2,\mathcal{S}1} = \exp\left(\mu + \frac{1}{2}\sigma^2\right), \qquad (18) +$$ + +$$ +h^{\mathcal{S}2,\mathcal{S}3} = \exp\left(\mu - \frac{1}{2}\frac{\delta}{\gamma}\sigma^2\right), \tag{19} +$$ +---PAGE_BREAK--- + +TABLE 1 Condorcet winner, $\delta > (1 - \gamma)$ and $\sigma > 0$ + +
ElectionIIIIIIIVOutcome
{S2, S1}S2S2S1S1S2
{S2, S3}S3S2S2S2S2
{S2, S4}S2S2S2S4S2
+ +$$h^{S2,S4} = \exp\left(\mu + \frac{1}{2}\left(\frac{\delta}{1-\gamma}\right)^2\right). \quad (20)$$ + +We examine the cases for which $\sigma > 0$. We divide our analysis into three cases, $\delta > (1 - \gamma)$, $\delta < (1 - \gamma)$, and $\delta = (1 - \gamma)$, since the ranking of the $h^{S1,S3}$ terms above change depending on the relative values of $\delta$ and $\gamma$.⁹ + +Suppose first $\delta > (1 - \gamma)$. It follows that $h^{S2,S3} < h^{S2,S1} < h^{S2,S4}$. Therefore, Equations (18)–(20) divide the population into four groups depending on their income $h_p^i$: Group I for income level $h_p^i \le h^{S2,S3}$; Group II for income level $h^{S2,S3} < h_p^i \le h^{S2,S1}$; Group III for income level $h^{S2,S1} < h_p^i \le h^{S2,S4}$; and Group IV for income level $h_p^i > h^{S2,S4}$. The median voter is the agent $m$ with an income level $h_p^m = \exp(\mu)$. Thus, this division of the income space implies that the median voter belongs to group II. We analyze the majority voting equilibria in the following pairwise elections: {S2, S1}, {S2, S3}, and {S2, S4}. + +Consider first the {S2, S1} election. The indirect utility functions $V^{S1}(h_p^i)$ and $V^{S2}(h_p^i)$ imply that $V^{S2}(h_p^i) \ge V^{S1}(h_p^i)$ for all $h_p^i \le h^{S2,S1}$. Then, S2 provides a greater level of utility than S1 for all agents in Groups I and II. Thus, these agents with incomes below $h^{S2,S1}$ strictly prefer the ex ante egalitarian public education system (S2) to the private system (S1). Since the median voter is in Group II, it follows that Groups I and II form a majority who prefers S2 to S1. Intuitively, the public system invests a fraction $\tau = \lambda\gamma / (1 + \lambda\gamma)$ of the mean income of the economy in each student's education. By contrast, the private system puts a fraction $\lambda\gamma / (1 + \lambda\gamma)$ of the family's income into the student's education. Thus, agents with incomes below the mean income prefer S2, since the S2 public system invests more in their children than these agents' investment levels under the private system S1. + +Consider next the {S2, S3} election. In this case, we have that $V^{S2}(h_p^i) \ge V^{S3}(h_p^i)$ for all $h_p^i \ge h^{S2,S3}$. Then, agents with an income level above $h^{S2,S3}$ strictly support S2 over S3. Therefore, all agents from Groups II, III, and IV form a majority to elect S2 from the {S2, S3} election. Intuitively, S3 invests more in students from low-income families and less in students from high-income families than S2. Therefore, students from richer families (with $h_p^i \ge h^{S2,S3}$) receive more resources under a public system that delivers a flat subsidy (S2) than under a public system that attempts to equalize ex post results (S3). + +Lastly, consider the {S2, S4} election. We have that $V^{S2}(h_p^i) \ge V^{S4}(h_p^i)$, for all $h_p^i \le h^{S2,S4}$. Then, agents with an income level below $h^{S2,S4}$ strictly prefer S2 over S4. Therefore, agents from Groups I, II, and III form a majority that strictly prefers S2 to S4. Intuitively, in comparison to + +⁹The cases $\delta > (1 - \gamma)$, $\delta < (1 - \gamma)$, and $\delta = (1 - \gamma)$ correspond to increasing, decreasing, and constant returns to scale in the production function of human capital. We show that S2 is always the Condorcet winner under each of these cases. However, the political support for system S2 in the {$S2,S4$} election becomes more pronounced under increasing returns. The latter is a direct consequence of the fact that, under system S4, resources become much more concentrated on the richest students as returns to scale increase. +---PAGE_BREAK--- + +**TABLE 2** Condorcet winner, $\delta < (1 - \gamma)$ and $\sigma > 0$ + +
ElectionIIIIIIIVOutcome
{S2, S1}S2S2S2S1S2
{S2, S3}S3S2S2S2S2
{S2, S4}S2S2S4S4S2
+ +S2, system S4 invests more in students from high-income families at the expense of all of the agents in Groups I, II and III. + +Table 1 summarizes the voting outcome in the one-on-one elections {S2, S1}, {S2, S3}, and {S2, S4} for the case $\delta > (1 - \gamma)$. + +We perform now an analogous analysis for the case $\delta < (1 - \gamma)$. In this case, $h^{S2,S3} < h^{S2,S4} < h^{S2,S1}$. This again divides the agents into four groups, depending on income $h_p^i$: Group I with $h_p^i \le h^{S2,S3}$; Group II with $h^{S2,S3} < h_p^i \le h^{S2,S4}$; Group III with $h^{S2,S4} < h_p^i \le h^{S2,S1}$; and Group IV with $h_p^i > h^{S2,S1}$. By the same analysis as the one above, we can show that a majority coalition exists to support S2 in each of the three pairwise elections (see Table 2). + +Lastly, consider the case in which $\delta = (1 - \gamma)$. It follows that $h^{S2,S3} < h^{S2,S4} = h^{S2,S1}$, dividing the population into three groups: Group I with $h_p^i \le h^{S2,S3}$; Group II with $h^{S2,S3} < h_p^i \le h^{S2,S4} = h^{S2,S1}$; and Group III with $h_p^i > h^{S2,S4} = h^{S2,S1}$. The median income is again in Group II. Then, this is a case in which the private system (S1) and the output maximizing system (S4) generate a subsidy schedule such that the indifference between these systems and the system S2 is observed for the same threshold agent: the one with income $h_p^i = h^{S2,S4} = h^{S2,S1}$. Proceeding analogously to what we did previously, we show in Table 3 the results for this case. + +Therefore, we conclude from the results of Tables 1 through 3 that a public funding system that collects taxes to invest the same amount in each student is the Condorcet winner. + +## 3.3 | Public versus private funding for education: The type of public funding matters + +We have shown that the ex ante egalitarian public education system S2 is the Condorcet winner in pairwise elections pitching S2 against private system S1 and the other two public systems, S3 and S4. In this section, we explore whether the other two public education funding schemes S3 and S4 also beat private education S1 in pairwise elections. That is, we study the Condorcet winner when the private system is confronted by only one public funding alternative that is different from S2. This analysis will shed light on whether the design and number of public funding alternatives matter for political support of public education over private education. + +**TABLE 3** Condorcet winner, $\delta = (1 - \gamma)$ and $\sigma > 0$ + +
ElectionIIIIIIOutcome
{S2, S1}S2S2S1S2
{S2, S3}S3S2S2S2
{S2, S4}S2S2S4S2
+---PAGE_BREAK--- + +Consider first the {S1, S3} pairwise election. In this case, the threshold for the indifference between the systems is + +$$h^{\mathrm{S1,S3}} = \exp\left(\mu + \frac{1}{2}\left(1 - \frac{\delta}{\gamma}\right)\sigma^2\right). \quad (21)$$ + +We have three cases: $\delta > \gamma$, $\delta < \gamma$, and $\delta = \gamma$. As we did before, we analyze the nontrivial case in which $\sigma > 0$. Suppose $\delta > \gamma$. Then, we have two income groups: Group I consists of agents with income levels $h_p^i \le h^{\mathrm{S1,S3}}$ and Group II consists of agents with income levels $h_p^i > h^{\mathrm{S1,S3}}$. In this case, the voter with the median income belongs to Group II since $\exp(\mu) > h^{\mathrm{S1,S3}}$. Equations (14) and (16) imply that $V^{\mathrm{S3}}(h_p^i) \ge V^{\mathrm{S1}}(h_p^i)$, for all $h_p^i \le h^{\mathrm{S1,S3}}$. Then, all agents with income levels below $h^{\mathrm{S1,S3}}$ support the public system S3. Intuitively, the private system (S1) results in greater investment for the wealthier students, whereas the public system (S3) leads to greater investment in the poorer students. Therefore, Group I votes for the public system, whereas Group II votes for the private system. Since the median-income voter belongs to Group II, it follows that the majority chooses the private system in the {S1, S3} election. + +Suppose now $\delta < \gamma$. In this case, the median income belongs to Group I because $\exp(\mu) < h^{\mathrm{S1,S3}}$. As before, Group I votes for the public system S3, whereas Group II votes for the private system. However, with the median-income voter now in Group I, the majority chooses the public system S3. Lastly, when $\delta = \gamma$, the median income coincides with the threshold $h^{\mathrm{S1,S3}}$. Thus, half of the voters support the private system and the other half support the public system, resulting in a tie. Table 4 summarizes these results. + +Our analysis shows that when public education is pitched against private education, political support indeed depends on the type of public education under consideration. When the private system (S1) and the ex post egalitarian public education system (S3) is proposed to the voters, the majority votes for the private system when the returns to investment in education are relatively low compared with the returns to endowed human capital, as expressed by the condition $\gamma < \delta$. A greater influence of endowed human capital on the formation of the students' human capital requires that an ex post egalitarian public education system (S3) redistribute even more resources to the poor, since $\nu^i/\nu^j = (h_F^j/h_P^i)^{\delta/\gamma}$. Thus, the public resources for education become more concentrated on a minority of low-income students, making the public system S3 less popular than the private system S1 for the majority. + +We show next that a similar conclusion results when the private system (S1) and the output-maximizing public system (S4) are the only alternatives for the voters. In this case, the income threshold for the indifference between the systems is + +$$h^{\mathrm{S1,S4}} = \exp\left(\mu + \frac{1}{2}\left(1 + \left(\frac{\delta}{1-\gamma}\right)\sigma^2\right)\right), \quad (22)$$ + +TABLE 4 Condorcet winner, {S1, S3} Election + +
ParametersIIIOutcome
δ > γ and σ > 0S3S1S1
δ < γ and σ > 0S3S1S3
δ = γ and σ > 0S3S1S1-S3
+---PAGE_BREAK--- + +**TABLE 5** Condorcet winner, {S1, S4} Election + +
ParametersIIIOutcome
δ > 1 − γ and σ > 0S1S4S1
δ < 1 − γ and σ > 0S4S1S4
δ = 1 − γ and σ > 0S1–S4S1–S4S1–S4
+ +for $\delta \neq (1 - \gamma)$. Then, we have Group I with income levels $h_p^i \le h^{S1,S4}$ and Group II with income levels $h_p^i > h^{S1,S4}$. We again have three cases: $\delta > 1 - \gamma$, $\delta < 1 - \gamma$, and $\delta = 1 - \gamma$. Consider first the case $\delta > 1 - \gamma$. Equations (14) and (17) imply that $V^{S1}(h_p^i) \ge V^{S4}(h_p^i)$, for all $h_p^i \le h^{S1,S4}$. Thus, agents in Group I support S1 whereas agents in Group II support system S4. The median-income voter is in Group I. Therefore the majority votes for the private system S1. The intuition behind this result again relies on the relative importance of parental human capital on the formation of the human capital of their children. A higher value of $\delta$ increases the marginal product of endowed resources for the richer students relative to poorer students. As a result, the S4 public system channels even more resources to a minority of rich students, making the private system S1 more appealing to the majority. + +Suppose now $\delta < 1 - \gamma$. In this case, we have that $V^{S1}(h_p^i) \ge V^{S4}(h_p^i)$, for all $h_p^i \ge h^{S1,S4}$. Thus, agents in Group I support S4 whereas agents in Group II support S1. The median-income voter is in Group I and, therefore, the majority now votes for the public funding system S4. In this case, even though the public system S4 invests less in students from poorer families than in richer students, the amount received by the poor students under S4 is greater than the amount received by them under S1. The reason for this is that a relatively lower $\delta$ implies that the difference in the marginal product of investment across students is smaller. Hence, differences in the resources delivered across students by the system that aims at equalizing marginal product are not so pronounced as the ones that would be observed under the private system. + +Lastly, suppose $\delta = 1 - \gamma$. Then, we have $V^{S1}(h_p^i) = V^{S4}(h_p^i)$, for all $h_p^i \in (0, \infty)$, the public system and the private system lead to the same outcome for each family, resulting in a tie. Table 5 summarizes the results. + +The previous analysis again reinforces the important message that the voting outcome of public versus private funding for education depends crucially on the type of public funding under consideration. As we have shown, when the public funding alternative employs a design that aims to equalize ex post results or maximize output, the majority may elect private education. We also demonstrate that the introduction of an ex ante egalitarian public funding system can resolve this indeterminacy. + +# 4 | EXTENSIONS OF THE MODEL + +In this section, we discuss two extensions to the baseline model. First, we address the case of incomplete democracies, where a fraction of the agents do not participate in politics. Second, we introduce an example to consider the complementarity between private and public education. + +## 4.1 | Incomplete democracies + +Our main result hinges upon the assumption that voters fully participate in a democracy in practice. However, voting turnout is never complete. In some democracies, the rich are more +---PAGE_BREAK--- + +likely to participate in politics than the poor; in other democracies, the opposite can be true. We define democracy as incomplete when the voting turnout is less than 100%. An incomplete democracy can be biased toward either the rich or the poor. We define a democracy as “elitist” if it excludes a fraction of the poorest agents of the economy. Analogously, we define a democracy as “populist” if it excludes a fraction of the richest agents of the economy. We show in the appendix that our main result holds for democracies with a limited degree of either elitism or populism. We now discuss the intuition behind this result. + +Consider first the case of an elitist democracy, which excludes a fraction of the poorest agents of the economy. Since parents will support the system that invests the most in their children, the poorest parents will support S3 over S2 because the ex post egalitarian system gives more resources to children from low-income families than the system that invests equally across students. Thus, the fact that S2 is preferred to S3 in a complete democracy immediately implies that S2 is also chosen when a number of the poorest agents do not participate in politics. In addition, the poorest parents support S2 over S1 and S2 over S4 in pairwise elections because the ex ante egalitarian system invests more in their children than the private system and the efficient public system. In the appendix, we prove that a limited degree of elitism still leaves S2 as the winner in the {$S2,S1$} and {$S2,S4$} elections. + +Consider now the case of a populist democracy, which excludes a fraction of the richest agents of the economy. The richest parents support S1 over S2 and S4 over S2. Both the private system and the output maximizing system invest more heavily in their children than the ex ante egalitarian system. In complete democracies, S2 is elected in pairwise elections {$S2,S1$} and {$S2,S4$.} Therefore, S2 would also be supported by the majority when a fraction of the richest parents are excluded from voting. In addition, the richest parents prefer system S2 in the {$S2,S3$} election. The appendix shows that the ex ante egalitarian system still wins the {$S2,S3$} election when a democracy's degree of populism is limited. + +## 4.2 | Private and public education as complements + +The analysis so far has assumed that private and public education are perfect substitutes in the human capital formation of students; note that the production technology of human capital is $h_c^i = \theta(v^i + y^i)^{\gamma}(h_p^i)^{\delta}$. The perfect substitutability between different systems is a realistic setting to study the political outcome when voters must choose a single alternative from a pool of purely private and public funding schemes. + +The case in which public and private education are complements introduces two types of changes in the baseline model developed in Section 2. First, the production technology of human capital must address the complementarity between private and public education. Second, the information flow between private and public players must be precisely stated. Several modeling options arise from these considerations. + +We sketch an example that modifies the production technology to show how our analysis can readily accommodate the complementarity between private and public education. Suppose that the educational process has two stages. In the first stage, agents carry out optimal private investment leading to $h_c^i$, which is the human capital of the student belonging to family i at the end of the first stage. Equations (2) and (3) imply that $h_c^i = \theta(\lambda\gamma/(1+\lambda\gamma))^{\gamma}(h_p^i)^{\delta+1}$. In the second stage, politicians present the three public funding alternatives to the voters, who then choose the winner. Suppose the level of the human capital of the student at the end of the first +---PAGE_BREAK--- + +stage becomes her initial human capital for the second stage. Substituting $h_c^i$ into Equation (2), we obtain + +$$h_c^i = \theta^{1+\delta} (\nu^i)^{\gamma} \left( \frac{\lambda^{\gamma}}{1+\lambda^{\gamma}} \right)^{\delta\gamma} (h_p^i)^{(\gamma+\delta)\delta}. \quad (23)$$ + +Note that, compared to Equation (2), this setting could exacerbate or mitigate differences in human capital across families, depending on whether $\gamma + \delta \le 1$. This has implications for the amount of resources that the ex post egalitarian public education system (S3) allocates to students from low-income families and the amount that the efficient system (S4) allocates to students from wealthy families. However, the analysis performed in Section 3.2 still holds once we reparametrize $\delta$ as $\tilde{\delta} = (\gamma + \delta)\delta$ and we consider the one-on-one elections that only include the public systems for the second stage. + +We have shown one possible way to address the complementarity between private and public education. Interesting avenues for future research include the study of sequential voting, with agents first choosing from a pool of different private education schemes followed by a second-round election to choose from a pool of public funding systems. This could shed light on how the design of public funding schemes can affect agents’ choices of private investment in education. + +# 5 | CONCLUSIONS + +This paper analyzed the political support for different education funding regimes in a one-person, one-vote political system. We showed that a public system that collects taxes and delivers the same amount of resources to each family is the Condorcet winner. In economies with some degree of income inequality, a system that seeks to equalize or maximize educational outcomes concentrates resources on a minority of the population and, therefore, lacks majority support. In addition, families with an income level below the mean receive more net resources under a public system that employs flat subsidies than under a private system. Therefore, a private system also lacks majority support. + +The results of this paper provide a political economy explanation for the observation that governments tend to favor free education for all students (i.e., to spend the same amount on each student). Our paper also highlights the importance of specifying the type of public education under discussion. In particular, we show that voters may favor private education over public education when the latter equalizes or maximizes ex post educational outcomes. + +## ORCID + +Francisco Parroz http://orcid.org/0000-0002-4395-9540 + +## REFERENCES + +Aghion, P., & Bolton, P. (1992). Distribution and growth in models of imperfect capital markets. *European Economic Review*, 36(2–3), 603–611. + +Anderberg, D., & Balestrino, A. 2008. The political economy of post-compulsory education policy with endogenous credit constraints (CESifo Working Paper Series 2304). Munich; CESifo Group. + +Banerjee, A. V., & Newman, A. F. (1991). Risk-bearing and the theory of income distribution. *Review of Economic Studies*, 58(2), 211–235. +---PAGE_BREAK--- + +Becker, G. S., & Tomes, N. (1979). An equilibrium theory of the distribution of income and intergenerational mobility. *Journal of Political Economy*, **87**(6), 1153-1189. + +Becker, G. S. (1986). Human capital and the rise and fall of families. *Journal of Labor Economics*, **4**(3), 1-39. + +Becker, G. S. (1993). *Human capital: A theoretical and empirical analysis with special reference to education* (3rd ed.). Chicago, IL: University of Chicago Press. + +Benhabib, J., & Spiegel, M. M. (1994). The role of human capital in economic development evidence from aggregate cross-country data. *Journal of Monetary Economics*, **34**(2), 143-173. + +Bentaouet Kattan, R. (2006). *Implementation of free basic education policy* (World Bank Education Working Papers Series No. 7). + +Borck, R., & Wimbersky, M. (2014). Political economics of higher education finance. *Oxford Economic Papers*, **66**(1), 115-139. + +Clementi, F., & Gallegati, M. (2005). Pareto's law of income distribution: Evidence for Germany, the United Kingdom, and the United States. In A. Chatterjee, S. Yarlagadda, & B. K. Chakrabarti (Eds.), *Econophysics of wealth distributions* (pp. 3-14). Milano: New Economic Windows, Springer. + +Creedy, J., & Francois, P. (1990). Financing higher education and majority voting. *Journal of Public Economics*, **43**(2), 181-200. + +Cunha, F., Heckman, J., Lochner, L. J., & Masterov, D. V. (2006). Interpreting the evidence on life cycle skill formation. In E. A. Hanushek, & F. Welch (Eds.), *Handbook of the Economics of Education* (pp. 697-812). Amsterdam: North-Holland. + +Cunha, F., & Heckman, J. (2007). The technology of skill formation. *American Economic Review*, **97**(2), 31-47. + +De Fraja, G. (2001). Education policies: Equity, efficiency and voting equilibrium. *Economic Journal*, **11**(471), 104-119. + +Docquier, F. (2004). Income distribution, non-convexities and the fertility: Income relationship. *Economica*, **71**(282), 261-273. + +Epple, D., & Romano, R. E. (1996). Ends against the middle: Determining public service provision when there are private alternatives. *Journal of Public Economics*, **62**(3), 297-325. + +Fernandez, R., & Rogerson, R. (1995). On the political economy of education subsidies. *Review of Economic Studies*, **62**(2), 249-262. + +Galor, O. (2000). Income distribution and the process of development. *European Economic Review*, **44**(4-6), 706-712. + +Gibrat, R. 1931. *Les Inégalités Économiques*. Paris: Librairie du Recueil Sirey. + +Glomm, G., & Ravikumar, B. (1992). Public versus private investment in human capital: Endogenous growth and income inequality. *Journal of Political Economy*, **100**(4), 818-834. + +Hanushek, E. A., & Kimko, D. D. (2000). Schooling, labor-force quality, and the growth of nations. *American Economic Review*, **90**(5), 1184-1208. + +Heckman, J. J. (2008). Schools, skills and synapses. *Economic Inquiry*, **46**(3), 289-324. + +Heckman, J. J., & Masterov, D. V. (2007). The productivity argument for investing in young children. *Review of Agricultural Economics*, **29**(3), 446-493. + +Kohlberg, E. (1976). A model of economic growth with altruism between generations. *Journal of Economic Theory*, **13**(1), 1-13. + +Larkin, J., & Staton, P. (2001). Access, inclusion, climate, empowerment (AICE): A framework for gender equity in market-driven education. *Canadian Journal of Education*, **26**(3), 361-376. + +Loury, G. C. (1981). Intergenerational transfers and the distribution of earnings. *Econometrica*, **49**(4), 843-867. + +Moav, O. (2002). Income distribution and macroeconomics: The persistence of inequality in a convex technology framework. *Economics Letters*, **75**(2), 187-192. + +Neal, D., & Rosen, S. (2000). Theories of the distribution of earnings. In A. B. Atkinson, & F. Bourguignon (Eds.), *Handbook of Income Distribution* (Vol. 1, pp. 379-427). Amsterdam: Elsevier North-Holland. + +Samoff, J. (1996). Which priorities and strategies for education? *International Journal of Educational Development*, **16**(3), 249-71. + +Sleebos, J. (2003). Low fertility rates in OECD countries: Facts and policy responses (OECD Labour Market and Social Policy Occasional Papers No. 15). +---PAGE_BREAK--- + +**How to cite this article:** Correa JA, Lu Y, Parro F, Villena M. Why is free education so popular? A political economy explanation. *Journal of Public Economic Theory*. 2019;1–19. +https://doi.org/10.1111/jpet.12396 + +APPENDIX + +In this appendix, we formally prove that our main result holds for democracies with a limited +degree of either elitism or populism. Consider first the percentiles of the income distribution in +which the agents with human capital $h^{S2,S1}$, $h^{S2,S3}$, and $h^{S2,S4}$ are located. These agents are +indifferent between the funding systems in the corresponding pairwise elections analyzed in +Section 3.2. The lognormal income distribution implies that an agent with income $h_P^i$ is located +in the $\Phi((\ln h_P^i - \mu)/\sigma) \times 100\%$ percentile of the income distribution, where $\Phi$ is the +cumulative function of the standard normal distribution. For instance, an agent with income +$h_P^i = \exp(\mu)$ is in the $\Phi(0) \times 100\%$ = 50th percentile of the income distribution. Let +$p^{S\alpha,S\beta} \times 100\%$ be the income percentile of an agent with income $h^{S\alpha,S\beta}$. Then, Equations +(18)–(20) imply + +$$p^{S2,S1} = \Phi\left(\frac{\sigma}{2}\right), \qquad (A1)$$ + +$$p^{S2,S3} = \Phi\left(-\frac{\delta\sigma}{2\gamma}\right), \qquad (A2)$$ + +$$p^{S2,S4} = \Phi\left(\frac{\delta\sigma}{2(1-\gamma)}\right), \qquad (A3)$$ + +We now use Equations (A1)–(A3) to examine whether the ex ante egalitarian public education system (S2) remains the Condorcet winner in democracies with some degree of elitism or populism. + +In Section 3.2, we concluded that all agents with an income below $h^{S2,S1}$ prefer the ex ante egalitarian public education system (S2) over the private system (S1) in a pairwise election. Thus, Equation (A1) implies that $\Phi(\sigma/2) \times 100\% > 50\%$ of voters prefer S2. Suppose an elitist democracy excludes a fraction $x$ of the poorest agents from voting. We can compute the $x$ such that S2 is still the winner of the {S2, S1} election¹⁰: + +$$\frac{\Phi(\sigma/2) - x}{1-x} > 0.5. \qquad (A4)$$ + +Therefore, an elitist democracy that excludes less than $\tilde{x}^1 = 2(\Phi(\sigma/2) - 0.5)$ of the poorest agents still votes for the ex ante egalitarian public education system (S2) in the pairwise election {S2, S1}. +---PAGE_BREAK--- + +We proceed analogously for the other two pairwise elections: {$S2, S3$} and {$S2, S4$.} As shown in Section 3.2, all agents with an income above $h_t^{S2,S3}$ prefer the ex ante egalitarian public education system (S2) over the ex post egalitarian public education system (S3) in a one-on-one election. Then, Equation (A2) implies that $100\% - \Phi(-\delta\sigma/2\gamma) \times 100\% > 50\%$ of voters prefer S2 to S3. Then, we can use an equation analogous to (A4) to derive the fraction of the richest agents that could be excluded from voting without affecting the selection of S2 in the {$S2, S3$} comparison: + +$$ \frac{1 - \Phi(-\delta\sigma/2\gamma) - z}{1 - z} > 0.5. \quad (A5) $$ + +Therefore, a populist democracy that excludes less than $\bar{z} = 2(0.5 - \Phi(-\delta\sigma/2\gamma))$ of the richest agents still elects S2 over S3. + +Lastly, we know from Section 3.2 that all agents with an income level below $h_t^{S2,S4}$ prefer the ex ante egalitarian public education system (S2) over the output maximizing system (S4) in a one-on-one election. Therefore, Equation (A3) implies that $\Phi(\delta\sigma/(2(1-\gamma))) \times 100\% > 50\%$ of the voters vote for S2. The equation analogous to (A4) is + +$$ \frac{\Phi(\delta\sigma/(2(1-\gamma))) - x}{1-x} > 0.5. \quad (A6) $$ + +Thus, from Equation (A6) we conclude that an elitist democracy that excludes less than $\tilde{x}^2 = 2(\Phi(\delta\sigma/(2(1-\gamma)))) - 0.5$ of the poorest agents still selects the ex ante egalitarian public education system (S2) in the one-on-one election {$S2, S4$}. + +We show now that the ex ante egalitarian public education system (S2) is still the Condorcet winner in democracies with a limited degree of elitism and populism. Consider first an elitist democracy that excludes less than $\min\{\tilde{x}^1, \tilde{x}^2\}$ of the poorest agents of the economy. By construction, the ex ante egalitarian public education system (S2) wins the pairwise elections {$S2, S1$} and {$S2, S4$}. Moreover, the ex post egalitarian public education system (S3) invests more resources in students from low-income families. Thus, the fact that S2 is preferred to S3 in a complete democracy immediately implies that S2 is also selected when a number of the poorest agents do not participate in politics. Formally, the political support for system S2 in the {$S2, S3$} election when $x$ of the poorest agents are excluded from voting is $(1 - \Phi(-\delta\sigma/2\gamma))/(1-x) \times 100\%$. We have already established that in a complete democracy ($x=0$), $(1 - \Phi(-\delta\sigma/2\gamma)) \times 100\% > 50\%$. Since $((1 - \Phi(-\delta\sigma/2\gamma))/(1-x)) \times 100\% > 1 - \Phi(-\delta\sigma/2\gamma) \times 100\% > 50\%$ for any positive value of $x$, it follows that S2 will also be selected in the {$S2, S3$} election within an incomplete democracy that excludes less than $\min\{\tilde{x}^1, \tilde{x}^2\}$ of the poorest agents. Hence, S2 remains the Condorcet winner even if a fraction of the poorest agents do not participate in elections. + +Similarly, consider a populist democracy that excludes less than $\tilde{z}$ of the richest agents. By construction, the ex ante egalitarian public education system (S2) wins the {$S2, S3$} election. In addition, we know that systems S1 and S4 invest more resources in students from richer families, which makes these funding systems especially popular among the richest agents. We have shown that system S2 wins the one-on-one elections {$S2, S1$} and {$S2, S4$} in the context of a complete democracy. Then, it will also win in an incomplete democracy that excludes a fraction of the richest agents. Formally, the political support for system S2 in the {$S2, S1$} and {$S2, S4$} elections when a fraction $z$ of the richest agents are excluded from voting is $((\Phi(\sigma/2))/(1-z)) \times 100\%$ +---PAGE_BREAK--- + +and ((Φ(δσ/(2(1 − γ))))/(1 − z)) × 100%, respectively. We have already established that in +complete democracies (z = 0), Φ(σ/2) × 100% > 50% and Φ(δσ/(2(1 − γ))) × 100% > 50%. +These two conditions imply that ((Φ(σ/2))/(1 − z)) × 100% > 50% and (((Φ(δσ/ +(2(1 − γ))))/(1 − z)) × 100% > 50%, for any positive fraction z. Thus, S2 wins the pairwise +elections {S2, S1} and {S2, S4} in a populist democracy that excludes less than $\bar{z}$ of the richest agents. +Hence, S2 remains the Condorcet winner even if a fraction of the richest agents do not participate in +elections. \ No newline at end of file diff --git a/samples_new/texts_merged/2865847.md b/samples_new/texts_merged/2865847.md new file mode 100644 index 0000000000000000000000000000000000000000..27836c1085f6dd285fdd442f6289cf500cea97c6 --- /dev/null +++ b/samples_new/texts_merged/2865847.md @@ -0,0 +1,129 @@ + +---PAGE_BREAK--- + +# Join Decompositions for Efficient Synchronization +of CRDTs after a Network Partition + +[Work in progress report] + +Vitor Enes + +Carlos Baquero + +Paulo Sérgio Almeida + +Ali Shoker + +HASLab/INESC TEC and Universidade do Minho + +## Abstract + +State-based CRDTs allow updates on local replicas without remote synchronization. Once these updates are propagated, possible conflicts are resolved deterministically across all replicas. $\delta$-CRDTs bring significant advantages in terms of the size of messages exchanged between replicas during normal operation. However, when a replica joins the system after a network partition, it needs to receive the updates it missed and propagate the ones performed locally. Current systems solve this by exchanging the full state bidirectionally or by storing additional metadata along the CRDT. We introduce the concept of join-decomposition for state-based CRDTs, a technique orthogonal and complementary to delta-mutation, and propose two synchronization methods that reduce the amount of information exchanged, with no need to modify current CRDT definitions. + +## 1. Introduction + +The concept of Conflict-free Replicated Data Type (CRDT) was introduced in (Shapiro et al. 2011) and presents two flavors of CRDTs: state-based and operation-based. A state-based CRDT can be defined as a triple $(S, \bar{=}^*, \sqcup)$ where $S$ is a join-semilattice, $\bar{=}^*$ its partial order, and $\sqcup$ is a binary join operator that derives the least upper bound for every two elements of $S$. + +With $\delta$-CRDTs (Almeida et al. 2016), every time a replica performs an update, it will only send the information needed to reflect this update in other replicas, with the anti-entropy algorithm keeping at each node metadata tracking which deltas still need to be propagated to current peers. However, after a long partition, such metadata is discarded. In this situation, when a replica goes online again, the other remote replicas typically send their full state so this replica sees the updates it missed. + +(Linde et al. 2016) introduces the concept of $\Delta$-CRDTs where replicas exchange metadata used to calculate a $\Delta$ that reflects the missed updates. As this metadata is typically smaller than the full state, less is demanded from the network. In this approach CRDTs need to be extended to maintain the additional metadata for $\Delta$ derivation, and if this metadata needs to be garbage collected the mechanism will fall-back to standard full state transmission. + +In this paper we will present a mechanism that does not add additional metadata to standard state-based CRDTs, but instead is able to decompose the state into smaller states than can be selected and grouped in a $\Delta$ for efficient transmission. + +## 1.1 Problem Statement + +Consider replica *A* with state *a* and replica *B* with state *b*, which at some point stop disseminating updates but keep updating their local state. When these replicas go online, what should replica *A* send to replica *B* so that *B* sees the updates performed on *a* since they stopped communicating? We could try to find *c* such that: + +$$a = b \sqcup c$$ + +but if both replicas performed updates while they were offline, their states are concurrent, and there's no such *c*. (We say two states *a* and *b* are concurrent if *a* is not less than *b* and *b* is not less than *a* in the partial order: $a \parallel b \iff a \supseteq b \land b \supseteq a$..) The trick is how to find *c* ($\Delta$ from now on) which reflects the updates in the join of *a* and *b* still missing in *b*: + +$$a \sqcup b = b \sqcup \Delta$$ + +The trivial example would be $\Delta = a$, but we would like to send less information than the full state. So, how can replica *A* calculate a smaller $\Delta$ to be sent to replica *B*, reflecting the missed updates? + +## 1.2 Contributions + +Firstly, we introduce the concept of join-decomposition for state-based CRDTs, a technique orthogonal and complementary to delta-mutation. Then, we propose two synchronization techniques. *State Driven*: replica *B* sends its full state *b* to replica *A* and replica *A* is able to derive $\Delta$. *Digest Driven*: replica *B* sends some information about its state *b*, smaller than *b* itself, but enough to allow replica *A* to compute $\Delta$. + +## 2. Join Decompositions + +We now explain how the concept of join-decomposition (Birkhoff 1937) can be applied to state-based CRDTs. Given state $r \in S$, we say that $D \in \mathcal{P}(S)$ is a join-decomposition of $r$ if: + +$$\sqcup D = r \qquad (i)$$ + +$$\forall s \in D \cdot \sqcup (D \setminus \{s\}) \subseteq r \qquad (ii)$$ + +Property (i) states that the join of all elements in a join-decomposition of $r$ should be $r$. Property (ii) says that each element in a join-decomposition is not redundant: joining the remaining elements is not enough to produce $r$. +---PAGE_BREAK--- + +We are interested in decompositions made up of “basic” irreducible elements. An element $s$ is join-irreducible if it cannot result from a join of two elements other than itself, i.e.: + +$$t \sqcup u = s \Rightarrow t = s \lor u = s$$ + +We say $D$ is a join-irreducible decomposition if $D$ is a join-decomposition and: + +$$\forall s \in D \cdot s \text{ is join-irreducible} \qquad (iii)$$ + +States in common CRDTs typically have join-irreducible decompositions, and we now present some examples of decomposition functions, which take a state and return a join-irreducible decomposition. + +## 2.1 Example Decompositions + +A GCounter is a simple replicated counter where its value can only increase (Almeida et al. 2016). It is represented as a map from ids to naturals, i.e., $GCounter = I \hookrightarrow N$, and each replica can only increase the value of the counter in its position of the map. The value of the counter is the sum of all increments. For example, $p = \{A \mapsto 3, B \mapsto 5\}$ means replica A has incremented the counter three times, replica B five times, hence the value is eight. For each state $s$, a join-irreducible decomposition can be obtained by function: + +$$D^{GCounter}(s) = \{\{i \mapsto v\} | (i, v) \in s\}$$ + +The decomposition for the GCounter $p$ above would be $\{{A \mapsto 3}, \{B \mapsto 5\}\}$. + +To allow both increments and decrements we can compose two GCounter by pairing them (Baquero et al. 2015) and we have a PNCounter $= (I \hookrightarrow N) \times (I \hookrightarrow N)$. Join-irreducible decompositions can be obtained through: + +$$D^{PNCounter}((p,n)) = \{(\{i \mapsto v\}, \{} | (i,v) \in p\} \\ \cup \{\{\}, \{i \mapsto v\} | (i,v) \in n\}$$ + +As a final example, an Add-Wins set has state $\mathit{AWSet} = (E \hookrightarrow \mathcal{P}(D)) \times \mathcal{P}(D)$. This CRDT is a pair where the first component is a map (from element, in $E$, to a set of supporting dots (unique event identifiers), in $\mathcal{P}(D)$) and the second component is a causal context represented as a set of dots $\mathcal{P}(D)$ (Almeida et al. 2016). When an element is added to the set, a new entry in the map is created, if needed, mapping this element to a new dot, and current dots for the element, if any, are discarded. This new dot is also added to the causal context. To remove an element, we remove its entry from the map. An example for this data type where two elements $(x$ and $y)$ were added and another (initially marked with unique dot $a2$) was removed is $s = (\{x \mapsto \{a1\}, y \mapsto \{b1, c1\}\}, \{a1, a2, b1, c1\})$. (The *range* function `rng` returns all sets of supporting dots in the mapping.) The join-irreducible decomposition of state $(m, c)$ can be obtained through function: + +$$D^{\mathit{AWSet}}((m,c)) = \{(\{e \mapsto \{d\}\}, \{d\}) | (e,s) \in m, d \in s\} \\ \cup \{\{\}, \{d\} | d \in c \setminus \bigcup \mathrm{rang} m\}$$ + +The join-irreducible decomposition for the state $s$ above is: + +$$\{(\{x \mapsto \{a1\}\}, \{a1\}), \\ (\{y \mapsto \{b1\}\}, \{b1\}), \\ (\{y \mapsto \{c1\}\}, \{c1\}), \\ (\{\}, \{a2\})\}$$ + +## 3. Efficient Synchronization + +**State Driven** The State Driven approach can be applied to all state-based CRDTs as long as we have a corresponding join-decomposition. We define $\min^\Delta : S \times S \to S$ as a function that given two states (the local state $a$ and the remote replica state $b$) will produce a $\Delta$. Join-irreducible decompositions will in general produce smaller $\Delta$s. Let $D : S \to \mathcal{P}(S)$ be a function that produces a join-decomposition. + +$$\min^{\Delta}(a, b) = \bigcup\{s | s \in D(a) \land b \sqsubseteq b \sqcup s\}$$ + +This $\min^\Delta$ function joins all $s$ in the local state join-decomposition that strictly inflate the remote state. If the local replica ships the resulting $\Delta$, to be joined to the remote replica, and joins the state received from the remote replica to its local state, both these replicas will reach convergence (if in the meantime no new update was performed). + +**Digest Driven** With the Digest Driven approach we achieve the same results of State Driven but by exchanging less information. We re-define $\min^\Delta : S \times M \to S$ as a function that given the local state $a$ and some digest $m$ related to the remote state will produce a $\Delta$. + +$$\min^{\Delta}(a,m) = \bigcup\{s | s \in D(a) \land \inf(s,m)\}$$ + +This digest will be data-type specific, which means that $\min^\Delta$ will use a type-specific function $\inf(s,m)$ to check if $s$ inflates the remote state summarized by the received digest $m$. + +A digest extraction function digest: $S \to M$ and the inflation test $\inf: S \times M \to B$ for the causal $\mathit{AWSet}$ CRDT can be defined as: + +$$\begin{align*} +\operatorname{digest}^{\mathit{AWSet}}((m,c)) &= (\bigcup \operatorname{rang} m, c) \\ +\operatorname{inf}^{\mathit{AWSet}}((e,\{d\}), (a,c)) &= +\begin{cases} +T & \text{if } d \notin c \lor (e = \{\} \land d \in a) \\ +F & \text{otherwise} +\end{cases} +\end{align*}$$ + +The function digestAWSet returns a pair where the first component is the set of active dots (the supporting dots of elements that were added and not yet removed) and the second component is the full causal context. The inflation check $\inf_{\mathit{AWSet}}$ will return $T$ for $s \in D(a)$ if the dot in $s$ has not been seen in the other replica or $s$ represents a removed element (i.e., $(\{\}, \{d\})$) that has been added and not yet removed in the other replica ($d$ is still in the active dots). + +If the Digest Driven technique is performed bidirectionally and no updates occurred, both replicas will converge (otherwise, they can still be collected separately in a dedicated buffer for further transmission). + +## References + +P. S. Almeida, A. Shoker, and C. Baquero. Delta State Replicated Data Types. CoRR, abs/1603.01529, 2016. URL http://arxiv.org/abs/1603.01529. + +C. Baquero, P. S. Almeida, A. Cunha, and C. Ferreira. Composition of State-based CRDTs. 2015. + +G. Birkhoff. Rings of sets. Duke Math. J., 3(3):443–454, 1937. + +A. Linde, J. Leitão, and N. Preguiça. Δ-CRDTs: Making δ-CRDTs Delta-Based. PaPoc 2016, 2016. + +M. Shapiro, N. Preguiça, C. Baquero, and M. Zawirski. Conflict-free Replicated Data Types. Technical Report RR-7687, July 2011. URL http://hal.inria.fr/inria-00609399/en/. \ No newline at end of file diff --git a/samples_new/texts_merged/2909063.md b/samples_new/texts_merged/2909063.md new file mode 100644 index 0000000000000000000000000000000000000000..3cffcf4a9b45e118692dc4f07f6c024322afd28a --- /dev/null +++ b/samples_new/texts_merged/2909063.md @@ -0,0 +1,56 @@ + +---PAGE_BREAK--- + +# y⁺ Calculation, Example 6D + +Example 6D: Consider a high-velocity fluid over a flat plate. It is desired to find the thickness of the viscous sublayer at $y^+=1$. The fluid is H₂O at 395 K and 1 MPa. Its free stream velocity is 700 m/s, and has a boundary layer $\delta=0.1$ m. + +## Solutions: + +1) Use the "Yplus_LIKE_Eddy_Scales_Book_Version.m" application found in my CFD/turbulence book, "Applied Computational Fluid Dynamics and Turbulence Modeling", Springer International Publishing, 1st Ed., ISBN 978-3-030-28690-3, 2019, DOI: 10.1007/978-3-030-28691-0. + +or + +2) Get a free copy of "Yplus_LIKE_Eddy_Scales_Book_Version.m" at www.cfdturbulence.com, or email me at tayloreddydk1@gmail.com. + +or + +3) Use the free $y^+$ estimation GUI tool offered by cfd-online, which is at http://www.cfd-online.com/Tools/yplus.php + +or + +4) Follow the step-by-step solution shown in the next slide. +---PAGE_BREAK--- + +$y^+$ Calculation, Example 6D + +From $P$ and $T$, $\rho = 942 \text{ kg/m}^3$ and $\mu = 2.28 \times 10^{-4} \text{ kg/m-s}$. + +$$v = \frac{\mu}{\rho} = \frac{2.28 \times 10^{-4}}{942} = 2.43 \times 10^{-7} \text{ m}^2/\text{s}$$ + +$$Re_x = \frac{U_\infty \delta(x)}{v} = \frac{700 * 0.1}{2.43 \times 10^{-7}} = 2.87 \times 10^{8}, < 10^{9}$$ + +$$C_f = [2 \log_{10}(Re_x) - 0.65]^{-2.3} = [2 \log_{10}(2.87 \times 10^8) - 0.65]^{-2.3} = 1.60 \times 10^{-3}$$ + +$$\tau_w = C_f \frac{\rho U_\infty^2}{2} = 1.60 \times 10^{-3} \frac{942 * 700^2}{2} = 3.78 \times 10^5$$ + +$$u_* = \sqrt{\frac{\tau_w}{\rho}} = \sqrt{\frac{3.78 \times 10^5}{942}} = 20.0$$ + +$$y(\text{at } y^+=1) = \frac{y^+ v}{u_*} = \frac{1 * 2.43 \times 10^{-7}}{20} = 1.22 \times 10^{-8} \text{ m}$$ +---PAGE_BREAK--- + +# y⁺ Calculation, Example 6D Solutions + +## Approach 1 and 2 (the Matlab script, Yplus_LIKE_Eddy_Scales_Book_Version.m) + +$$Re_x = 2.89 \times 10^8$$ + +$$y(\text{at } y^+=1) = 1.23 \times 10^{-8} \text{ m}$$ + +## Approach 4 (previous slide) + +$$Re_x = 2.87 \times 10^8$$ + +$$y(\text{at } y^+=1) = 1.22 \times 10^{-8} \text{ m}$$ + +## Approach 3 (cfd-online tool) \ No newline at end of file diff --git a/samples_new/texts_merged/3147359.md b/samples_new/texts_merged/3147359.md new file mode 100644 index 0000000000000000000000000000000000000000..81a302507d821cdf1449bc2fbf440b3e53c08e29 --- /dev/null +++ b/samples_new/texts_merged/3147359.md @@ -0,0 +1,589 @@ + +---PAGE_BREAK--- + +Conference Paper + +# Implementing Hybrid Semantics: From Functional to Imperative + +Sergey Goncharov +Renato Neves +José Proença* + +*CISTER Research Centre +CISTER-TR-201008 + +2020/11/30 +---PAGE_BREAK--- + +# Implementing Hybrid Semantics: From Functional to Imperative + +Sergey Goncharov, Renato Neves, José Proença* + +*CISTER Research Centre +Polytechnic Institute of Porto (ISEP P.Porto) +Rua Dr. António Bernardino de Almeida, 431 +4200-072 Porto +Portugal +Tel.: +351.22.8340509, Fax: +351.22.8321159 +E-mail: sergey.goncharov@fau.de, nevrenato@di.uminho.pt, pro@isep.ipp.pt +https://www.cister-labs.pt + +## Abstract + +Hybrid programs combine digital control with differential equations, and naturally appear in a wide range of application domains, from biology and control theory to real-time software engineering. The entanglement of discrete and continuous behaviour inherent to such programs goes beyond the established computer science foundations, producing challenges related to e.g. infinite iteration and combination of hybrid behaviour with other effects. A systematic treatment of hybridness as a dedicated computational effect has emerged recently. In particular, a generic idealized functional language HybCore with a sound and adequate operational semantics has been proposed. The latter semantics however did not provide hints to implementing HybCore as a runnable language, suitable for hybrid system simulation (e.g. the semantics features rules with uncountably many premises). We introduce an imperative counterpart of HybCore, whose semantics is simpler and runnable, and yet intimately related with the semantics of HybCore at the level of hybrid monads. We then establish a corresponding soundness and adequacy theorem. To attest that the resulting semantics can serve as a firm basis for the implementation of typical tools of programming oriented to the hybrid domain, we present a web-based prototype implementation to evaluate and inspect hybrid programs, in the spirit of GHCI for Haskell and UTop for OCaml. The major asset of our implementation is that it formally follows the operational semantic rules. +---PAGE_BREAK--- + +# Implementing Hybrid Semantics: From Functional to Imperative + +Sergey Goncharov¹, Renato Neves² and José Proença³ + +¹ Dept. of Comp. Sci., FAU Erlangen-Nürnberg, Germany + +² University of Minho & INESC-TEC, Portugal + +³ CISTER/ISEP, Portugal + +**Abstract.** Hybrid programs combine digital control with differential equations, and naturally appear in a wide range of application domains, from biology and control theory to real-time software engineering. The entanglement of discrete and continuous behaviour inherent to such programs goes beyond the established computer science foundations, producing challenges related to e.g. infinite iteration and combination of hybrid behaviour with other effects. A systematic treatment of *hybridness* as a dedicated computational effect has emerged recently. In particular, a generic idealized functional language HYBCORE with a sound and adequate operational semantics has been proposed. The latter semantics however did not provide hints to implementing HYBCORE as a runnable language, suitable for hybrid system simulation (e.g. the semantics features rules with uncountably many premises). We introduce an imperative counterpart of HYBCORE, whose semantics is simpler and runnable, and yet intimately related with the semantics of HYBCORE at the level of *hybrid monads*. We then establish a corresponding soundness and adequacy theorem. To attest that the resulting semantics can serve as a firm basis for the implementation of typical tools of programming oriented to the hybrid domain, we present a web-based prototype implementation to evaluate and inspect hybrid programs, in the spirit of GHCI for HASKELL and UTOP for OCAML. The major asset of our implementation is that it formally follows the operational semantic rules. + +## 1 Introduction + +**The core idea of hybrid programming.** Hybrid programming is a rapidly emerging computational paradigm [26,29] that aims at using principles and techniques from programming theory (e.g. compositionality [12,26], Hoare calculi [29,34], theory of iteration [2,8]) to provide formal foundations for developing computational systems that interact with physical processes. Cruise controllers are a typical example of this pattern; a very simple case is given by the hybrid program below. + +```c +while true do { + if v ≤ 10 then (v' = 1 for 1) else (v' = -1 for 1) (cruise controller) +} +``` +---PAGE_BREAK--- + +In a nutshell, the program specifies a digital controller that periodically measures and regulates a vehicle's velocity (v): if the latter is less or equal than 10 the controller accelerates during 1 time unit, as dictated by the program statement $v' = 1 \text{ for } 1$ ($v' = 1$ is a differential equation representing the velocity's rate of change over time. The value 1 on the right-hand side of for is the duration during which the program statement runs). Otherwise, it decelerates during the same amount of time ($v' = -1 \text{ for } 1$). Figure 1 shows the output respective to this hybrid program for an initial velocity of 5. + +Note that in contrast to standard programming, the cruise controller involves not only classical constructs (while-loops and conditional statements) but also differential ones (which are used for describing physical processes). This cross-disciplinary combination is the core feature of hybrid programming and has a notably wide range of application domains (see [29,30]). However, it also hinders the use of classical techniques of programming, and thus calls for a principled extension of programming theory to the hybrid setting. + +Fig. 1: Vehicle's velocity + +As is already apparent from the (cruise controller) example, we stick to an *imperative* programming style, in particular, in order to keep in touch with the established denotational models of physical time and computation. A popular alternative to this for modelling real-time and hybrid systems is to use a *declarative* programming style, which is done e.g. in real-time Maude [27] or Modelica [10]. A well-known benefit of declarative programming is that programs are very easy to write, however on the flip side, it is considerably more difficult to define what they exactly mean. + +**Motivation and related work.** Most of the previous research on formal hybrid system modelling has been inspired by automata theory and Kleene algebra (as the corresponding algebraic counterpart). These approaches led to the well-known notion of hybrid automaton [17] and Kleene algebra based languages for hybrid systems [28,18,19]. From the purely semantic perspective, these formalizations are rather close and share such characteristic features as *nondeterminism* and what can be called *non-refined divergence*. The former is standardly justified by the focus on formal verification of safety-critical systems: in such contexts overabstraction is usually desirable and useful. However, coalescing *purely hybrid* behaviour with nondeterminism detaches semantic models from their prototypes as they exist in the wild. This brings up several issues. Most obviously, a nondeterministic semantics, especially not given in an operational form, cannot directly serve as a basis for languages and tools for hybrid system testing and simulation. Moreover, models with nondeterminism baked in do not provide a clear indication of how to combine hybrid behaviour with effects other +---PAGE_BREAK--- + +than nondeterminism (e.g. probability), or to combine it with nondeterminism in a different way (van Glaabeek's spectrum [36] gives an idea about the diversity of potentially arising options). Finally, the Kleene algebra paradigm strongly suggests a relational semantics for programs, with the underlying relations connecting a state on which the program is run with the states that the program can reach. As previously indicated by Höfner and Möller [18], this view is too coarse-grained and contrasts to the trajectory-based one where a program is associated with a trajectory of states (recall Figure 1). The trajectory-based approach provides an appropriate abstraction for such aspects as notions of convergence, periodic orbits, and duration-based predicates [5]. This potentially enables analysis of properties such as *how fast* our (cruise controller) example reaches the target velocity or for *how long* it exceeds it. + +The issue of *non-refined divergence* mentioned earlier arises from the Kleene algebra law $p;0 = 0$ in conjunction with Fischer-Ladner's encoding of while-loops `while b do { p }` as $(b;p)*; \neg b$. This creates a havoc with all divergent programs `while true do { p }` as they become identified with divergence 0, thus making the above example of a (cruise controller) meaningless. This issue is extensively discussed in Höfner and Möller's work [18] on a *nondeterministic* algebra of trajectories, which tackles the problem by disabling the law $p;0 = 0$ and by introducing a special operator for infinite iteration that inherently relies on nondeterminism. This iteration operator inflates trajectories at so-called 'Zeno points' with arbitrary values, which in our case would entail e.g. the program + +$$ x := 1; while true do { wait x; x := x/2 } \quad (\text{zeno}) $$ + +to output at time instant 2 all possible values in the valuation space (the expression `wait t` represents a wait call of t time units). More details about Zeno points can be consulted in [18,14]. + +In previous work [12,14], we pursued a *purely hybrid* semantics via a simple *deterministic functional* language HYBCORE, with while-loops for which we used Elgot's notion of iteration [8] as the underlying semantic structure. That resulted in a semantics of finite and infinite iteration, corresponding to a refined view of divergence. Specifically, we developed an operational semantics and also a denotational counterpart for HYBCORE. An important problem of that semantics, however, is that it involves infinitely many premisses and requires calculating total duration of programs, which precludes using such semantics directly in implementations. Both the above examples (cruise controller) and (zeno) are affected by this issue. In the present paper we propose an *imperative* language with a denotational semantics similar to HYBCORE's one, but now provide a clear recipe for executing the semantics in a constructive manner. + +**Overview and contributions.** Building on our previous work [14], we devise operational and denotational semantics suitable for implementation purposes, and provide a soundness and adequacy theorem relating both these styles of semantics. Results of this kind are well-established yardsticks in the programming language theory [37], and beneficial from a practical perspective. For example, small-step operational semantics naturally guides the implementation of compilers for +---PAGE_BREAK--- + +programming languages, whilst denotational semantics is more abstract, syntax-independent, and guides the study of program equivalence, of the underlying computational paradigm, and its combination with other computational effects. + +As mentioned before, in our previous work [14] we introduced a simple functional hybrid language HYBCORE with operational and denotational monad-based semantics. Here, we work with a similar imperative while-language, whose semantics is given in terms of a global state space of trajectories over $\mathbb{R}^n$, which is a commonly used carrier when working with solutions of systems of differential equations. A key principle we have taken as a basis for our new semantics is the capacity to determine behaviours of a program p by being able to examine only some subterms of it. In order to illustrate this aspect, first note that our semantics does not reduce program terms p and initial states $\sigma$ (corresponding to valuation functions $\sigma: \mathcal{X} \to \mathbb{R}$ on program variables $\mathcal{X}$) to states $\sigma'$, as usual in classical programming. Instead it reduces triples p, $\sigma$, t of programs p, initial states $\sigma$ and time instants t to a state $\sigma'$; such a reduction can be read as "given $\sigma$ as the initial state, program p produces a state $\sigma'$ at time instant t". Then, the reduction process of p, $\sigma$, t to a state only examines fragments of p or unfolds it when strictly necessary, depending of the time instant t. For example, the reduction of the (cruise controller) unfolds the underlying loop only twice for the time instant $1 + 1/2$ (the time instant $1 + 1/2$ occurred in the second iteration of the loop). This is directly reflected in our prototype implementation of an interactive evaluator of hybrid programs LINCE. It is available online and comes with a series of examples for the reader to explore (http://arcatools.org/lince). The plot in Figure 1 was automatically obtained from LINCE, by calling on the previously described reduction process for a predetermined sequence of time instants t. + +For the denotational model, we build on our previous work [12,14] where hybrid programs are interpreted via a suitable monad **H**, called the *hybrid monad* and capturing the computational effect of *hybridness*, following the seminal approach of Moggi [24,25]. Our present semantics is more lightweight and is naturally couched in terms of another monad **H**S, parametrized by a set **S**. In our case, as mentioned above, **S** is the set of trajectories over $\mathbb{R}^n$ where *n* is the number of available program variables $\mathcal{X}$. The latter monad is in fact parametrized in a formal sense [35] and comes out as an instance of a recently emerged generic construction [7]. A remarkable salient feature of that construction is that it can be instantiated in a constructive setting (without using any choice principles) – although we do not touch upon this aspect here, in our view this reinforces the fundamental nature of our semantics. Among various benefits of **H**S over **H**, the former monad enjoys a construction of an iteration operator (in the sense of Elgot [8]) as a *least fixpoint*, calculated as a limit of an $\omega$-chain of approximations, while for **H** the construction of the iteration operator is rather intricate and no similar characterization is available. A natural question that arises is: how are **H** and **H**S related? We do answer it by providing an instructive connection, which sheds light on the construction of **H**, by explicitly identifying semantic ingredients which have to be added to **H**S to obtain **H**. Additionally, this results in “backward compatibility” with our previous work. +---PAGE_BREAK--- + +**Document structure.** After short preliminaries (Section 2), in Section 3 we introduce our while-language and its operational semantics. In Sections 4 and 5, we develop the denotational model for our language and connect it formally to the existing hybrid monad [12,14]. In Section 6, we prove a soundness and adequacy result for our operational semantics w.r.t. the developed model. Section 7 describes LINCE's architecture. Finally, Section 8 concludes and briefly discusses future work. Omitted proofs and examples are found in the extended version of the current paper [15]. + +## 2 Preliminaries + +We assume familiarity with category theory [1]. By $\mathbb{R}$, $\mathbb{R}_+$ and $\bar{\mathbb{R}}_+$ we respectively denote the sets of reals, non-negative reals, and extended non-negative reals (i.e. $\mathbb{R}_+$ extended with the infinity value $\infty$). Let $[0, \bar{\mathbb{R}}_+)$ denote the set of downsets of $\bar{\mathbb{R}}_+$ having the form $[0, d]$ ($d \in \mathbb{R}_+$) or the form $[0, d)$ ($d \in \bar{\mathbb{R}}_+$). We call the elements of the dependent sum $\sum_{I \in [0, \bar{\mathbb{R}}_+)} X^I$ trajectories (over $X$). By $[0, \mathbb{R}_+]$, $[0, \bar{\mathbb{R}}_+)$ and $[\bar{0}, \bar{\mathbb{R}}_+)$ we denote the following corresponding subsets of $[0, \bar{\mathbb{R}}_+]$: $([0, d] | d \in \mathbb{R}_+)$, $([0, d] | d \in \bar{\mathbb{R}}_+)$ and $([0, d] | d \in \bar{\mathbb{R}}_+)$. By $X \amalg Y$ we denote the disjoint union, which is the categorical coproduct in the category of sets with the corresponding left and right injections inl: $X \to X \amalg Y$, inr: $Y \to X \amalg Y$. To reduce clutter, we often use plain union $X \cup Y$ in place of $X \amalg Y$ if X and Y are disjoint by construction. + +By $a \triangleleft b \triangleright c$ we denote the case distinction construct: a if b is true and c otherwise. By ! we denote the empty function, i.e. a function with the empty domain. For the sake of succinctness, we use the notation $e^t$ for the function application $e(t)$ with real-value t. + +## 3 An imperative hybrid while-language and its semantics + +This section introduces the syntax and operational semantics of our language. We first fix a stock of n-variables $\mathcal{X} = \{x_1, \dots, x_n\}$ over which we build atomic programs, according to the grammar + +$$ +\begin{aligned} +At(\mathcal{X}) &\ni x := t \mid x'_1 = t_1, \dots, x'_n = t_n \quad \texttt{for } t \\ +LTerm(\mathcal{X}) &\ni r \mid r \cdot x \mid t+s +\end{aligned} + $$ + +where $x \in \mathcal{X}$, $r \in \mathbb{R}$, $t_i, t, s \in LTerm(\mathcal{X})$. An atomic program is thus either a classical assignment $x := t$ or a differential statement $x'_1 = t_1, \dots, x'_n = t_n$ for t. The latter reads as "run the system of differential equations $x'_1 = t_1, \dots, x'_n = t_n$ for t time units". We then define the while-language via the grammar + +$$ Prog(\mathcal{X}) \ni a \mid p; q \mid \texttt{if} b \texttt{then} p \texttt{else} q \mid \texttt{while} b \texttt{do} \{ p \} $$ + +where $p, q \in Prog(\mathcal{X})$, $a \in At(\mathcal{X})$ and $b$ is an element of the free Boolean algebra generated by the terms $t \leqslant s$ and $t \geqslant s$. The expression `wait t` (from the previous section) is encoded as the differential statement $x'_1 = 0, \dots, x'_n = 0$ for t. +---PAGE_BREAK--- + +*Remark 1.* The systems of differential equations that our language allows are always linear. This is not to say that we could not consider more expressive systems; in fact we could straightforwardly extend the language in this direction, for its semantics (presented below) is not impacted by specific choices of solvable systems of differential equations. But here we do not focus on such choices regarding the expressivity of continuous dynamics and concentrate on a core hybrid semantics instead on which to study the fundamentals of hybrid programming. + +In the sequel we abbreviate differential statements $x_1' = t_1, \dots, x_n' = t_n$ for $t$, where $\bar{x}'$ and $\bar{t}$ abbreviate the corresponding vectors of variables $x_1' \dots x_n'$ and linear-combination terms $t_1 \dots t_n$. We call functions of type $\sigma: \mathcal{X} \to \mathbb{R}$ environments; they map variables to the respective valuations. We use the notation $\sigma\nabla[\bar{\nu}/\bar{x}]$ to denote the environment that maps each $x_i$ in $\bar{x}$ to $v_i$ in $\bar{\nu}$ and the rest of variables in the same way as $\sigma$. Finally, we denote by $\phi_{\sigma}^{\bar{x}'=\bar{t}}: [0, \infty) \to \mathbb{R}^n$ the solution of a system of differential equations $\bar{x}' = \bar{t}$ with $\sigma$ determining the initial condition. When clear from context, we omit the superscript in $\phi_{\sigma}^{\bar{x}'=\bar{t}}$. For a linear-combination term $t$ the expression $t\sigma$ denotes the corresponding interpretation according to $\sigma$ and analogously for $b\sigma$ where $b$ is a Boolean expression. + +We now introduce a small-step operational semantics for our language. Intuitively, the semantics establishes a set of rules for reducing a triple $\langle program \rangle$ to an environment, via a *finite* sequence of reduction steps. The rules are presented in Figure 2. The terminal configuration $\langle skip, \sigma, t \rangle$ represents a successful end of a computation, which can then be fed into another computation (via rule (**seq-skip**→)). Contrastingly, $\langle stop, \sigma, t \rangle$ is a terminating configuration that inhibits the execution of subsequent computations. The latter is reflected in rules (**diff-stop**→) and (**seq-stop**→) which entail that, depending on the chosen time instant, we do not need to evaluate the whole program, but merely a part of it – consequently, infinite while-loops need not yield infinite reduction sequences (as explained in Remark 2). Note that time $t$ is consumed when applying the rules (**diff-stop**→) and (**diff-seq**→) in correspondence to the duration of the differential statement at hand. The rules (**seq**) and (**seq-skip**→) correspond to the standard rules of operational semantics for while languages over an imperative store [37]. + +*Remark 2.* Putatively infinite while-loops do not necessarily yield infinite reduction steps. Take for example the while-loop below whose iterations have always duration 1. + +$$ x := 0; \while true do { x := x + 1; wait 1 } \end{while} \quad (1) $$ + +It yields a finite reduction sequence for the time instant 1/2, as shown below: + +$$ +\begin{aligned} +& x := 0; \while true do \{ x := x + 1; wait 1 \}, \sigma, 1/2 \rightarrow \\ +& \quad \{ \text{by the rules } (\mathbf{asg}\xrightarrow{\phantom{=}}) \text{ and } (\mathbf{seq-skip}\xrightarrow{\phantom{=}}) \} \\ +& \while true do \{ x := x + 1; wait 1 \}, \sigma \nabla[0/x], 1/2 \rightarrow \\ +& \quad \{ \text{by the rule } (\mathbf{wh-true}\xrightarrow{\phantom{=}}) \} +\end{aligned} +$$ +---PAGE_BREAK--- + +Fig. 2: Small-step Operational Semantics + +$$ +\begin{align*} +& x := x + 1 ; \textcolor{blue}{wait} 1 ; \textcolor{blue}{while} \textcolor{blue}{true} \textcolor{blue}{do} \{ x := x + 1 ; \textcolor{blue}{wait} 1 \}, \sigma \nabla [0/x] , \frac{1}{2} \rightarrow \\ +& \qquad \{\text{by the rules } (\mathbf{asg}\xrightarrow{\phantom{=}}) \text{ and } (\mathbf{seq-skip}\xrightarrow{\phantom{=}})\} \\ +& \textcolor{blue}{wait} 1 ; \textcolor{blue}{while} \textcolor{blue}{true} \textcolor{blue}{do} \{ x := x + 1 ; \textcolor{blue}{wait} 1 \}, \sigma \nabla [0 + 1/x] , \frac{1}{2} \rightarrow \\ +& \qquad \{\text{by the rules } (\mathbf{diff-stop}\xrightarrow{\phantom{=}}) \text{ and } (\mathbf{seq-stop}\xrightarrow{\phantom{=}})\} \\ +& stop, \sigma \nabla [0 + 1/x] , 0 +\end{align*} +$$ + +The gist is that to evaluate program (1) at time instant $1/2$, one only needs to unfold the underlying loop until surpassing $1/2$ in terms of execution time. Note that if the wait statement is removed from the program then the reduction sequence would not terminate, intuitively because all iterations would be instantaneous and thus the total execution time of the program would never reach $1/2$. + +The following theorem entails that our semantics is deterministic, which is +instrumental for our implementation. + +**Theorem 1.** For every program *p*, environment *σ*, and time instant *t* there is at most one applicable reduction rule. + +Let $\to^*$ be the transitive closure of the reduction relation $\to$ that was previously presented. + +**Corollary 1.** For every program term p, environments σ, σ', σ'', time instants t, t', t'', and termination flags s, s' ∈ {skip, stop}, if p, σ, t →* s, σ', t' and p, σ, t →* s', σ'', t'', then the equations s = s', σ' = σ'' and t' = t'' must hold. + +*Proof.* Follows by induction on the number of reduction steps and Theorem 1. □ + +As alluded above, the operational semantics treats time as a resource. This is formalised below. +---PAGE_BREAK--- + +**Proposition 1.** For all program terms $p$ and $q$, environments $\sigma$ and $\sigma'$, and time instants $t, t'$ and $s$, if $p, \sigma, t \to q, \sigma'$, $t'$ then $p, \sigma, t+s \to q, \sigma'$, $t'+s$; and if $p, \sigma, t \to \text{skip}, \sigma'$, $t'$ then $p, \sigma, t+s \to \text{skip}, \sigma'$, $t'+s$. + +# 4 Towards Denotational Semantics: The Hybrid Monad + +A mainstream subsuming paradigm in denotational semantics is due to Moggi [24,25], who proposed to identify a computational effect of interest as a monad, around which the denotational semantics is built using standard generic mechanisms, prominently provided by category theory. In this section we recall necessary notions and results, motivated by this approach, to prepare ground for our main constructions in the next section. + +**Definition 1 (Monad).** A monad $\mathbf{T}$ (on the category of sets and functions) is given by a triple $(T, \eta, (-)^*)$, consisting of an endomap $T$ over the class of all sets, together with a set-indexed class of maps $\eta_X: X \to TX$ and a so-called Kleisli lifting sending each $f: X \to TY$ to $f^*: TX \to TY$ and obeying monad laws: $\eta^* = \text{id}, f^* \cdot \eta = f, (f^* \cdot g)^* = f^* \cdot g^*$ (it follows from this definition that $T$ extends to a functor and $\eta$ to a natural transformation). + +A monad morphism $\theta: \mathbf{T} \to \mathbf{S}$ from $(T, \eta^{\mathbf{T}}, (-)^{\mathbf{T}})$ to $(S, \eta^{\mathbf{S}}, (-)^{\mathbf{S}})$ is a natural transformation $\theta: T \to S$ such that $\theta \cdot \eta^{\mathbf{T}} = \eta^{\mathbf{S}}$ and $\theta \cdot f^{\mathbf{T}} = (\theta \cdot f)^{\mathbf{S}} \cdot \theta$. + +We will continue to use bold capitals (e.g. **T**) for monads over the corresponding endofunctors written as capital Romans (e.g. **T**). + +In order to interpret while-loops one needs additional structure on the monad. + +**Definition 2 (Elgot Monad).** A monad $\mathbf{T}$ is called Elgot if it is equipped with an iteration operator $(-)^{\dagger}$ that sends each $f: X \to T(Y \Join X)$ to $f^{\dagger}: X \to TY$ in such a way that certain established axioms of iteration are satisfied [2,16]. + +Monad morphisms between Elgot monads are additionally required to preserve iteration: $\theta \cdot f^{\dagger\mathbf{T}} = (\theta \cdot f)^{\dagger\mathbf{S}}$ for $\theta: \mathbf{T} \to \mathbf{S}$, $f: X \to T(Y \Join X)$. + +For a monad $\mathbf{T}$, a map $f: X \to TY$, called a Kleisli map, is roughly to be regarded as a semantics of a program $p$, with $X$ as the semantics of the input, and $Y$ as the semantics of the output. For example, with $T$ being the maybe monad $(-) \Join \{\perp\}$, we obtain semantics of programs as partial functions. Let us record this example in more detail for further reference. + +*Example 1 (Maybe Monad M)*. The maybe monad is determined by the following data: $MX = X \Join \{\perp\}$, the unit is the left injection $\text{inl}: X \to X \Join \{\perp\}$ and given $f: X \to Y \Join \{\perp\}$, $f^*$ is equal to the copairing $\text{[f, inr]}: X \Join \{\perp\} \to Y \Join \{\perp\}$. + +It follows by general considerations (enrichment of the category of Kleisli maps over complete partial orders) that **M** is an Elgot monad with the following iteration operator $(-)^{\flat}$: given $f: X \to (Y \Join X) \Join \{\perp\}$, and $x_0 \in X$, let $x_0, x_1, ...$ be the longest (finite or infinite) sequence over $X$ constructed inductively in such a way that $f(x_i) = \text{inl}(\text{inr} x_{i+1})$. Now, $f^{\flat}(x_0) = \text{inr} \perp$ if the sequence is infinite or +---PAGE_BREAK--- + +$f(x_i) = \text{inr} \perp \text{ for some } i$, and $f^z(x_0) = \text{inl} y$ if for the last element of the sequence $x_n$, which must exist, $f(x_n) = \text{inl inl } y$. + +Other examples of Elgot monad can be consulted e.g. in [16]. + +The computational effect of *hybridness* can also be captured by a monad, called *hybrid monad* [12,14], which we recall next (in a slightly different but equivalent form). To that end, we also need to recall *Minkowski addition* for subsets of the set $\mathbb{R}_+$ of extended non-negative reals (see Section 2): $A + B = \{a + b \mid a \in A, b \in B\}$, e.g. $[a, b] + [c, d] = [a + c, b + d]$ and $[a, b] + [c, d) = [a + c, b + d)$. + +**Definition 3 (Hybrid Monad H).** The hybrid monad **H** is defined as follows. + +$$ +\begin{align*} +-HX &= \sum_{I \in [0, \bar{R}_+]} X^I \uplus \sum_{I \in [0, \bar{R}_+]} X^I, \text{ i.e. it is a set of trajectories valued on } X \\ +&\text{and with the domain downclosed. For any } p = \text{inj}\langle I, e \rangle \in HX \text{ with } \text{inr} \in \{\text{inl}, \\ +&\text{inr}\}, \text{ let us use the notation } p_d = I, p_e = e, \text{ the former being the duration of} \\ +&\text{the trajectory and the latter the trajectory itself. Let also } \varepsilon = \langle \emptyset, ! \rangle. +\end{align*} +$$ + +- $\eta(x) = \text{inl}\langle[0,0], \lambda t. x\rangle$, i.e. $\eta(x)$ is a trajectory of duration 0 that returns $x$. + +- given $f: X \to HY$, we define $f^*: HX \to HY$ via the following clauses: + +$$ +\begin{align*} +f^*(\text{inl}\langle I, e \rangle) &= \text{inj}\langle I + J, \lambda t. (f(e^t))_e^0 \rangle \quad \triangleleft t < d \triangleright (f(e^d))_e^{t-d} \\ +&\qquad \text{if } I' = I = [0, d] \text{ for some } d, f(e^d) = \text{inj}\langle J, e' \rangle +\end{align*} +$$ + +$$ +\begin{align*} +f^*(\mathrm{inl}\langle I, e \rangle) &= \mathrm{inr}\langle I', \lambda t. (f(e^t))_e^0 \rangle & \text{if } I' \neq I \\ +f^*(\mathrm{inr}\langle I, e \rangle) &= \mathrm{inr}\langle I', \lambda t. (f(e^t))_e^0 \rangle +\end{align*} +$$ + +where $I' = \bigcup \{[0,t] \subseteq I | \forall s \in [0,t]. f(e^s) \neq \mathrm{inr} \varepsilon\}$ and $\mathrm{inj} \in \{\mathrm{inl}, \mathrm{inr}\}$. + +The definition of the hybrid monad **H** is somewhat intricate, so let us complement it with some explanations (details and further intuitions about the hybrid monad can also be consulted in [12]). The domain **HX** constitutes three types of trajectories representing different kinds of hybrid computation: + +- (closed) convergent: $\text{inl}\langle[0,d],e\rangle \in HX$ (e.g. instant termination $\eta(x)$); + +- open divergent: $\text{inr}\langle[0,d),e\rangle \in HX$ (e.g. instant divergence $\text{inr}\epsilon$ or a trajectory $[0,\infty) \rightarrow X$ which represents a computation that runs ad infinitum); + +- closed divergent: $\text{inr}\langle[0,d],e\rangle \in HX$ (representing computations that start to diverge precisely after the time instant $d$). + +The Kleisli lifting $f^*$ works as follows: for a given trajectory $\text{inj}\langle I, e \rangle$, we first calculate the largest interval $I' \subseteq I$ on which the trajectory $\lambda t \in I'$. $f(e^t)$ does not instantly diverge (i.e. $f(e^t) \neq \text{inr} \varepsilon$) throughout, hence $I'$ is either $[0, d']$ or $[0, d')$ for some $d'$. Now, the first clause in the definition of $f^*$ corresponds to the successful composition scenario: the argument trajectory $\langle I, e \rangle$ is convergent, and composing $f$ with $e$ as described in the definition of $I'$ does not yield divergence all over $I$. In that case, we essentially concatenate $\langle I, e \rangle$ with $f(e^d)$, the latter being the trajectory computed by $f$ at the last point of $e$. The remaining two clauses correspond to various flavours of divergence, including divergence of the input $(\text{inr}\langle I, e\rangle)$ and divergences occurring along $f \cdot e$. Incidentally, this explains how closed divergent trajectories may arise: if $I' = [0, d']$ and $d'$ is properly smaller than $d$, then we diverge precisely *after* $d'$, which is possible e.g. if the program behind $f$ continuously checks a condition which did not fail up until $d'$. +---PAGE_BREAK--- + +# 5 Deconstructing the Hybrid Monad + +As mentioned in the introduction, in [14] we used **H** for giving semantics to a functional language HYBCORE whose programs are interpreted as morphisms of type $X \to HY$. Here, we are dealing with an imperative language, which from a semantic point of view amounts to fixing a type of states *S*, shared between all programs; the semantics of a program is thus restricted to morphisms of type *S* $\to HS$. As explained next, this allows us to make do with a simpler monad **H**S, globally parametrized by *S*. The new monad **H**S has the property that $H_S S$ is naturally isomorphic to *HS*. Apart from (relative to **H**) simplicity, the new monad enjoys further benefits, specifically **H**S is mathematically a better behaved structure, e.g. in contrast to **H**, Elgot iteration on **H**S is constructed as a least fixed point. Factoring the denotational semantics through **H**S thus allows us to bridge the gap to the operational semantics given in Section 3, and facilitates the soundness and adequacy proof in the forthcoming Section 6. + +In order to define $H_S$, it is convenient to take a slightly broader perspective. We will also need to make a detour through the topic of ordered monoid modules with certain completeness properties so that we can characterise iteration on $H_S$ as a least fixed point. + +**Definition 4 (Monoid Module, Generalized Writer Monad [14]).** Given a (not necessarily commutative) monoid ($\mathbb{M}, +, 0$), a monoid module is a set $\mathbb{E}$ equipped with a map $\triangleright: \mathbb{M} \times \mathbb{E} \to \mathbb{E}$ (monoid action), subject to the laws $0 \triangleright e = e$, $(m+n) \triangleright e = m \triangleright (n \triangleright e)$. + +Every monoid-module pair $(\mathbb{M}, \mathbb{E})$ induces a generalized writer monad $T = (T, \eta, (-)^*)$ with $T = \mathbb{M} \times (-) \cup \mathbb{E}$, $\eta_X(x) = \langle 0, x \rangle$, and + +$$f^*(m, x) = (m + n, y) \quad \text{where} \quad m \in \mathbb{M}, x \in X, f(x) = \langle n, y \rangle \in \mathbb{M} \times Y$$ + +$$f^*(m, x) = m \triangleright e \quad \text{where} \quad m \in \mathbb{M}, x \in X, f(x) = e \in \mathbb{E}$$ + +$$f^*(e) = e \quad \text{where} \quad e \in \mathbb{E}$$ + +This generalizes the writer monad ($\mathbb{E} = \emptyset$) and the exception monad ($\mathbb{M} = 1$). + +*Example 2.* A simple motivating example of a monoid-module pair $(\mathbb{M}, \mathbb{E})$ is the pair $(\mathbb{R}_+, \mathbb{R}_+)$ where the monoid operation is addition with 0 as the unit and the monoid action is also addition. + +More specifically, we are interested in ordered monoids and (conservatively) complete monoid modules. These are defined as follows. + +**Definition 5 (Ordered Monoids, (Conservatively) Complete Monoid Modules [7]).** We call a monoid $(\mathbb{M}, 0, +)$ an ordered monoid if it is equipped with a partial order $\leq$, such that $0$ is the least element of this order and $+$ is right-monotone (but not necessarily left-monotone). + +An ordered $\mathbb{M}$-module w.r.t. an ordered monoid $(\mathbb{M}, +, 0, \leq)$, is an $\mathbb{M}$-module $(\mathbb{E}, \triangleright)$ together with a partial order $\sqsubseteq$ and a least element $\perp$, such that $\triangleright$ is +---PAGE_BREAK--- + +monotone on the right and $(- \triangleright \perp)$ is monotone, i.e. + +$$ +\overline{\perp \sqsubseteq x} \qquad \frac{x \sqsubseteq y}{a \triangleright x \sqsubseteq a \triangleright y} \qquad \frac{a \le b}{a \triangleright \perp \sqsubseteq b \triangleright \perp} +$$ + +We call the last property restricted left monotonicity. + +An ordered $\mathbb{M}$-module is $(\omega)$-complete if for every $\omega$-chain $s_1 \sqsubseteq s_2 \sqsubseteq \dots$ on $\mathbb{E}$ there is a least upper bound $\bigcup_i s_i$ and $\triangleright$ is continuous on the right, i.e. + +$$ +\overline{\forall i. s_i \sqsubseteq \bigsqcup_i s_i} \qquad \frac{\forall i. s_i \sqsubseteq x}{\bigsqcup_i s_i \sqsubseteq x} \qquad \overline{a \triangleright \bigsqcup_i s_i \sqsubseteq \bigsqcup_i a \triangleright s_i} +$$ + +(the law $\bigsqcup_i a \triangleright s_i \sqsubseteq a \triangleright \bigsqcup_i s_i$ is derivable). Such an $\mathbb{M}$-module is conservatively complete if additionally for every $\omega$-chain $a_1 \sqsubseteq a_2 \sqsubseteq \dots$ in $\mathbb{M}$, such that the least upper bound $\bigvee_i a_i$ exists, $(\bigvee_i a_i) \triangleright \perp = \bigsqcup_i a_i \triangleright \perp$. + +A homomorphism $h: \mathbb{E} \to \mathbb{F}$ of (conservatively) complete monoid $\mathbb{M}$-modules is required to be monotone and structure-preserving in the following sense: $h(\perp) = \perp$, $h(a \triangleright x) = a \triangleright h(x)$, $h(\bigsqcup_i x_i) = \bigsqcup_i h(x_i)$. + +The completeness requirement for $\mathbb{M}$-modules has a standard motivation coming from domain theory, where $\sqsubseteq$ is regarded as an *information order* and completeness is needed to ensure that the relevant semantic domain can accommodate infinite behaviours. The conservativity requirement additionally ensures that the least upper bounds, which exist in $\mathbb{M}$ agree with those in $\mathbb{E}$. Our main example is as follows (we will use it for building $\mathbf{H}_S$ and its iteration operator). + +**Definition 6 (Monoid Module of Trajectories).** The ordered monoid of finite open trajectories $(\text{Trj}_S, \hat{\wedge}, \langle\emptyset, !\rangle, \leqslant)$ over a given set $S$, is defined as follows: $\text{Trj}_S = \sum_{I \in [0, \bar{R}_+)} S^I$, the unit is the empty trajectory $\varepsilon = \langle\emptyset, !\rangle$; summation is concatenation of trajectories $\hat{\wedge}$, defined as follows: + +$$ +\langle[0, d_1), e_1\rangle^{\wedge} \langle[0, d_2), e_2\rangle = \langle[0, d_1 + d_2), \lambda t. e_1^t \triangleleft t < d_1 \triangleright e_2^{t-d_1}\rangle. +$$ + +The relation $\leqslant$ is defined as follows: $\langle[0, d_1), e_1\rangle \leqslant \langle[0, d_2), e_2\rangle$ if $d_1 \leqslant d_2$ and $e_1^t = e_2^t$ for every $t \in [0, d_1)$. We can additionally consider both sets $\sum_{I \in [0, \bar{R}_+)} S^I$ and $\sum_{I \in [0, \bar{R}_+]} S^I$ as $\text{Trj}_S$-modules, by defining the monoid action $\triangleright$ also as concatenation of trajectories and by equipping these sets with the order $\sqsubseteq$: $\langle I_1, e_1\rangle \sqsubseteq \langle I_2, e_2\rangle$ if $I_1 \subseteq I_2$ and $e_1^t = e_2^t$ for all $t \in I_1$. + +Consider the following functors: + +$$ +H'_S X = \sum_{I \in [0, \bar{R}_+)} S^I \times X \cup \sum_{I \in [0, \bar{R}_+)} S^I +$$ + +$$ +(2) +$$ + +$$ +H_S X = \sum_{I \in [0, \bar{R}_+)} S^I \times X \cup \sum_{I \in [0, \bar{R}_+]} S^I +$$ + +(3) + +Both of them extend to monads $H'_S$ and $H_S$ as they are instances of Definition 4. Moreover, it is laborious but straightforward to prove that both $H'_S X$ and $H_S X$ are conservatively complete Trj$_S$-modules on X [7], i.e. conservatively complete +---PAGE_BREAK--- + +TrjS-modules, equipped with distinguished maps η: X → H'SX, η: X → HSX. +In each case η sends x ∈ X to ⟨ε, x⟩. The partial order on H'SX (which we will +use for obtaining the least upper bound of a certain sequence of approximations) +is given by the clauses below and relies on the previous order ≤ on trajectories: + +$$ +\frac{\langle I, e \rangle \le \langle I', e' \rangle}{\langle I, e \rangle \sqsubseteq \langle I', e' \rangle, x} +\qquad +\frac{\langle I, e \rangle \le \langle I', e' \rangle}{\langle I, e \rangle \sqsubseteq \langle I', e' \rangle} +$$ + +The monad given by (2) admits a sharp characterization, which is an instance of +a general result [7]. In more detail, + +**Proposition 2.** The pair $(H'_S X, \eta)$ is a free conservatively complete $\text{Trj}_S$-module on $X$, i.e. for every conservatively complete $\text{Trj}_S$-module $\mathbb{E}$ and a map $f: X \to \mathbb{E}$, there is unique homomorphism $\hat{f}: H'_S X \to \mathbb{E}$ such that $\hat{f} \cdot \eta = f$. + +Intuitively, Proposition 2 ensures that $H'_S X$ is a least conservatively complete $\text{Trj}_S$-module generated by $X$. This characterization entails a construction of an iteration operator on $\mathbf{H}'_S$ as a least fixpoint. This, in fact, also transfers to $\mathbf{H}_S$ (as detailed in the proof of the following theorem). + +**Theorem 2.** Both $\mathbf{H}'_S$ and $\mathbf{H}_S$ are Elgot monads, for which $f^\dagger$ is computed as a least fixpoint of $\omega$-continuous endomaps $g \mapsto [\eta,g]^* \cdot f$ over the function spaces $X \to \mathbf{H}'_S Y$ and $X \to \mathbf{H}_S Y$ correspondingly. + +In this section's remainder, we formally connect the monad **H**S with the monad **H**, +the latter introduced in our previous work and used for providing a semantics +to the functional language HYBCORE. In the following section we provide a +semantics for the current imperative language via the monad **H**S. Specifically, +in this section we will show how to build **H** from **H**S by considering additional +semantic ingredients on top of the latter. + +Let us subsequently write ηS, (–)S and (–)S for the unit, the Kleisli lifting and the Elgot iteration of **H**S. Note that *S*, *X* ↦→ **H**S*X* is a parametrized monad in the sense of Uustalu [35], in particular *H*S is functorial in *S* and for every *f*: *S* → *S*′, *H**f*: *H*S → *H*S*′* is a monad morphism. + +Then we introduce the following technical natural transformations $\iota$: $H_S X \to X \circled(S \circled{\perp})$ and $\tau$: $H_{S \circled{Y}} X \to H_S X$. First, let us define $\iota$: + +$$ +\iota(I, e, x) = \begin{cases} \operatorname{inl} \operatorname{inl} e^0, & \text{if } I \neq \emptyset \\ \operatorname{inl} x, & \text{otherwise} \end{cases} \qquad \iota(I, e) = \begin{cases} \operatorname{inr} \operatorname{inl} e^0, & \text{if } I \neq \emptyset \\ \operatorname{inr} \operatorname{inr} \perp, & \text{otherwise} \end{cases} +$$ + +In words: $\iota$ returns the initial point for non-zero length trajectories, and otherwise returns either an accompanying value from $X$ or $\perp$ depending on that if the given trajectory is convergent or divergent. The functor $(-) \bowtie E$ for every $E$ extends to a monad, called the *exception monad*. The following is easy to show for $\iota$. + +**Lemma 1.** For every $S$, $\iota: H_S \rightarrow (-) \bowtie (S \bowtie \{\perp\})$ is a monad morphism. + +Next we define $\tau : H_{S \circled{Y}} X \rightarrow H_S X$: + +$$ +\tau(I, e, x) = \begin{cases} \langle I, e, x \rangle, & \text{if } I = I' \\ \langle I', e' \rangle, & \text{otherwise} \end{cases} \qquad \tau(I, e) = \langle I', e' \rangle +$$ + +where ⟨I', e'] is the largest such trajectory that for all t ∈ I', et = inl ett. +---PAGE_BREAK--- + +$$ +\begin{align*} +[\mathbf{x} := \mathbf{t}](\sigma) &= \eta(\sigma \triangleright [\mathbf{t}\sigma/\mathbf{x}]) \\ +[\bar{\mathbf{x}}' = \bar{u} \text{ for } \mathbf{t}](\sigma) &= \langle [0, \mathbf{t}\sigma), \lambda t. \sigma \triangleright [\phi_{\sigma}(t)/\bar{\mathbf{x}}], \sigma \triangleright [\phi_{\sigma}(\mathbf{t}\sigma)/\bar{\mathbf{x}}] \rangle \\ +[\mathbf{p}; \mathbf{q}](\sigma) &= [\mathbf{q}]^*([\mathbf{p}](\sigma)) \\ +[\texttt{if } \mathbf{b} \texttt{ then } \mathbf{p} \texttt{ else } \mathbf{q}](\sigma) &= [\mathbf{p}](\sigma) \triangleleft \mathbf{b}\sigma \triangleright [\mathbf{q}](\sigma) \\ +[\texttt{while } \mathbf{b} \texttt{ do } \{\mathbf{p}\}](\sigma) &= (\lambda \sigma . (\hat{H} \operatorname{inr})([\mathbf{p}](\sigma)) \triangleleft \mathbf{b}\sigma \triangleright \eta(\operatorname{inl} \sigma))^\dagger(\sigma) +\end{align*} +$$ + +Fig. 3: Denotational semantics. + +**Lemma 2.** For all *S* and *Y*, $\tau: H_{S\omega Y} \to H_S$ is a monad morphism. + +We now arrive at the main result of this section. + +**Theorem 3.** The correspondence $S \mapsto H_S S$ extends to an Elgot monad as follows: + +$$ +\begin{align*} +\eta(x \in S) &= \eta^S(x), \\ +(f: X \rightarrow H_S S)^* &= (H_X X \xrightarrow{H_{\iota,f}^{\mathrm{id}}} H_{S\omega\{\perp\}} X \xrightarrow{\tau} H_S X \xrightarrow{f_S^*} H_S S), \\ +(f: X \rightarrow H_{S\omega X}(S \Join X))^{\dagger} &= (X \xrightarrow{f_{S\omega X}^{\dagger}} H_{S\omega X} S \xrightarrow{H_{[\mathrm{inl},(\iota',f)]^{\mathrm{id}}}} H_{S\omega\{\perp\}} S \xrightarrow{\tau} H_S S). +\end{align*} +$$ + +where $\iota' = [\mathrm{inl}, \mathrm{id}] \cdot \iota : H_S S \to S \Join \{\perp\}$ and $(-)^\sharp : (X \to (S \Join X) \Join \{\perp\}) \to (X \to S \Join \{\perp\})$ is the iteration operator of the maybe-monad $(-) \Join \{\perp\}$ (as in Example 1). Moreover, thus defined monad is isomorphic to $\mathbf{H}$. + +*Proof (Proof Sketch).* It is first verified that the monad axioms are satisfied using abstract properties of $\iota$ and $\tau$, mainly provided by Lemmas 1 and 2. Then the isomorphism $\theta: H_S S \cong HS$ is defined as expected: $\theta([0, d], e, x) = \mathrm{inl}\langle[0, d], \hat{e}\rangle$ where $\hat{e}^t = \hat{e}^0$ for $t \in [0, d)$, $\hat{e}^d = x$; and $\theta(I, e) = \mathrm{inr}\langle I, e\rangle$. It is easy to see that $\theta$ respects the unit. The fact that $\theta$ respects Kleisli lifting amounts to a (tedious) verification by case distinction. Checking the formula for $(-)^\dagger$ amounts to transferring the definition of $(-)^\dagger$, as defined in previous work [13], along $\theta$. See the full proof in [15]. □ + +# 6 Soundness and Adequacy + +Let us start this section by providing a denotational semantics to our language using the results of the previous section. We will then provide a soundness and adequacy result that formally connects the thus established denotational semantics with the operational semantics presented in Section 3. + +First, consider the monad in (3) and fix $S = \mathbb{R}^\lambda$. We denote the obtained instance of $H_S$ as $\hat{H}$. Intuitively, we interpret a program $p$ as a map $[[p]] : S \to \hat{H}S$ which given an environment (a map from variables to values) returns a trajectory over $S$. The definition of $[[p]]$ is inductive over the structure of $p$ and is given in Figure 3. +---PAGE_BREAK--- + +In order to establish soundness and adequacy between the small-step operational semantics and the denotational semantics, we will use an auxiliary device. Namely, we will introduce a *big-step* operational semantics that will serve as midpoint between the two previously introduced semantics. We will show that the small-step semantics is equivalent to the big-step one and then establish soundness and adequacy between the big-step semantics and the denotational one. The desired result then follows by transitivity. The big-step rules are presented in Figure 4 and follow the same reasoning than the small-step ones. The expression $p, \sigma, t \Downarrow r, \sigma'$ means that $p$ paired with $\sigma$ evaluates to $r, \sigma'$ at time instant $t$. + +Fig. 4: Big-step Operational Semantics + +Next, we need the following result to formally connect both styles of operational semantics. + +**Lemma 3.** *Given a program p, an environment σ and a time instant t* + +1. if $p, \sigma, t \rightarrow p', \sigma', t'$ and $p', \sigma', t' \Downarrow skip, \sigma''$ then $p, \sigma, t \Downarrow skip, \sigma''$; + +2. if $p, \sigma, t \rightarrow p', \sigma', t'$ and $p', \sigma', t' \Downarrow stop, \sigma''$ then $p, \sigma, t \Downarrow stop, \sigma''$. + +*Proof.* The proof follows by induction over the derivation of the small step relation. □ + +**Theorem 4.** *The small-step semantics and the big-step semantics are related as follows. Given a program p, an environment σ and a time instant t* +---PAGE_BREAK--- + +1. $p, \sigma, t \Downarrow \mathit{skip}, \sigma' \text{ iff } p, \sigma, t \to^\star \mathit{skip}, \sigma', 0$; + +2. $p, \sigma, t \Downarrow \mathit{stop}, \sigma' \text{ iff } p, \sigma, t \to^\star \mathit{stop}, \sigma', 0.$ + +*Proof.* The right-to-left direction is obtained by induction over the length of the small-step reduction sequence using Lemma 3. The left-to-right direction follows by induction over the proof of the big-step judgement using Proposition 1. $\square$ + +Finally, we can connect the operational and the denotational semantics in the +expected way. + +**Theorem 5 (Soundness and Adequacy).** *Given a program p, an environment σ and a time instant t* + +1. $p, \sigma, t \to^* \mathit{skip}, \sigma', 0 \text{ iff } [\mathbf{p}](\sigma) = (\mathbf{h}: [0, t) \to \mathbb{R}^\mathcal{X}, \sigma');$ + +2. $p, \sigma, t \to^* \mathit{stop}, \sigma', 0 \text{ iff either } [\mathbf{p}](\sigma) = (\mathbf{h}: [0, t') \to \mathbb{R}^{\mathcal{X}}, \sigma'') \text{ or } [\mathbf{p}](\sigma) = \mathbf{h}: [0, t') \to \mathbb{R}^{\mathcal{X}}, \text{ and in either case with } t' > t \text{ and } h(t) = \sigma'.$ + +Here, “soundness” corresponds to the left-to-right directions of the equivalences and “adequacy” to the right-to-left ones. + +*Proof.* By Theorem 4, we equivalently replace the goal as follows: + +1. $p, \sigma, t \Downarrow \mathit{skip}, \sigma' \text{ iff } [\mathbf{p}](\sigma) = (\mathbf{h}: [0, t) \to \mathbb{R}^{\mathcal{X}}, \sigma');$ + +2. $p, \sigma, t \Downarrow \mathit{stop}, \sigma' \text{ iff either } [\mathbf{p}](\sigma) = (\mathbf{h}: [0, t') \to \mathbb{R}^{\mathcal{X}}, \sigma'') \text{ or } [\mathbf{p}](\sigma) = \mathbf{h}: [0, t') \to \mathbb{R}^{\mathcal{X}}, \text{ and in either case with } t' > t \text{ and } h(t) = \sigma'.$ + +Then the “soundness” direction is obtained by induction over the derivation of +the rules in Fig. 4. The “adequacy” direction follows by structural induction over +$p$; for while-loops, we call the fixpoint law $[\eta, f^\dagger]^* \cdot f = f^\dagger$ of Elgot monads. $\square$ + +# 7 Implementation + +This section presents our prototype implementation – LINCE – which is available +online both to run in our servers and to be compiled and executed locally +(http://arcatools.org/lince). Its architecture is depicted in Figure 5. The +dashed rectangles correspond to its main components. The one on the left +(Core engine) provides the parser respective to the while-language and the +engine to evaluate hybrid programs using the small-step operational semantics +of Section 3. The one on the right (Inspector) depicts trajectories produced +by hybrid programs according to parameters specified by the user and provides +an interface to evaluate hybrid programs at specific time instants (the initial +environment $\sigma: \mathcal{X} \to \mathbb{R}$ is assumed to be the function constant on zero). As +already mentioned, plots are generated by automatically evaluating at different +time instants the program given as input. Incoming arrows in the figure denote +an input relation and outgoing arrows denote an output relation. The two main +components are further explained below. + +**Core engine.** Our implementation extensively uses the computer algebra tool SAGEMATH [31]. This serves two purposes: (1) to solve systems of differential +---PAGE_BREAK--- + +Fig. 5: Depiction of LINCE's architecture + +equations (present in hybrid programs); and (2) to correctly evaluate if-then- +else statements. Regarding the latter, note that we do not merely use predicate +functions in programming languages for evaluating Boolean conditions, essentially +because such functions tend to give wrong results in the presence of real numbers +(due to the finite precision problem). Instead of this, LINCE uses SAGEMATH +and its ability to perform advanced symbolic manipulation to check whether +a Boolean condition is true or not. However, note that this will not always +give an output, fundamentally because solutions of linear differential equations +involve transcendental numbers and real-number arithmetic with such numbers is +undecidable [20]. We leave as future work the development of more sophisticated +techniques for avoiding errors in the computational evaluation of hybrid programs. + +**Inspector.** The user interacts with LINCE at two different stages: (a) when inputting a hybrid program and (b) when inspecting trajectories using LINCE's output interfaces. The latter case consists of adjusting different parameters for observing the generated plots in an optimal way. + +**Event-triggered programs.** Observe that the differential statements $x_1' = t, \dots, x_n' = t$ for $t$ are *time-triggered*: they terminate precisely when the instant of time $t$ is achieved. In the area of hybrid systems it is also usual to consider *event-triggered* programs: those that terminate *as soon as* a specified condition $\psi$ becomes true [38,6,11]. So we next consider atomic programs of the type $x_1' = t, \dots, x_n' = t$ until $\psi$ where $\psi$ is an element of the free Boolean algebra generated by $t \le s$ and $t \ge s$ where $t, s \in LTerm(X)$, signalling the termination of the program. In general, it is impossible to determine with *exact* precision when such programs terminate (again due to the undecidability of real-number arithmetic with transcendental numbers). A natural option is to tackle this problem by checking the condition $\psi$ periodically, which essentially reduces event-triggered programs into time-triggered ones. The cost is that the evaluation of a program might greatly diverge from the nominal behaviour, as discussed for instance in documents [4,6] where an analogous approach is discussed for the well-established simulation tools SIMULINK and MODELICA. In our case, we allow programs of the form $x_1' = t, \dots, x_n' = t$ until$_\epsilon$ $\psi$ in the tool and define them as the abbreviation of `while ¬ψ do { x_1' = t, \dots, x_n' = t for ε }`. This sort of abbreviation has the advantage of avoiding spurious evaluations of hybrid programs w.r.t. the established semantics. We could indeed easily allow such event-triggered programs natively in our language (i.e. without recurring to +---PAGE_BREAK--- + +Fig. 6: Position of the bouncing ball over time (plot on the left); zoomed in position of the bouncing ball at the first bounce (plot on the right). + +abbreviations) and extend the semantics accordingly. But we prefer not to do this at the moment, because we wish first to fully understand the ways of limiting spurious computational evaluations arising from event-triggered programs. + +*Remark 3.* SIMULINK and MODELICA are powerful tools for simulating hybrid systems, but lack a well-established, formal semantics. This is discussed for example in [3,9], where the authors aim to provide semantics to subsets of SIMULINK and MODELICA. Getting inspiration from control theory, the language of SIMULINK is circuit-like, block-based; the language of MODELICA is *acausal* and thus particularly useful for modelling electric circuits and the like which are traditionally modelled by systems of equations. + +*Example 3 (Bouncing Ball)*. As an illustration of the approach described above for event-triggered programs, take a bouncing ball dropped at a positive height $p$ and with no initial velocity $v$. Due to the gravitational acceleration $g$, it falls to the ground and bounces back up, losing part of its kinetic energy in the process. This can be approximated by the following hybrid program + +$$ (p' = v, v' = g \ \mathbf{until}_{0.01} p \le 0 \land v \le 0); (v := v \times -0.5) $$ + +where 0.5 is the dampening factor of the ball. We now want to drop the ball from a specific height (e.g. 5 meters) and let it bounce until it stops. Abbreviating the previous program into $b$, this behaviour can be approximated by $p := 5; v := 0; while true do { b}$. Figure 6 presents the trajectory generated by the ball (calculated by LINCE). Note that since $\epsilon = 0.01$ the ball reaches below ground, as shown in Figure 6 on the right. Other examples of event- and time-triggered programs can be seen in LINCE's website. + +# 8 Conclusions and future work + +We introduced small-step and big-step operational semantics for hybrid programs suitable for implementation purposes and provided a denotational counterpart via the notion of Elgot monad. These semantics were then linked by a soundness and adequacy theorem [37]. We regard these results as a stepping stone for developing computational tools and techniques for hybrid programming; which we attested +---PAGE_BREAK--- + +with the development of LINCE. With this work as basis, we plan to explore the +following research lines in the near future. + +**Program equivalence.** Our denotational semantics entails a natural notion of program equivalence (denotational equality) which inherently includes classical laws of iteration and a powerful uniformity principle [33], thanks to the use of Elgot monads. We intend to further explore the equational theory of our language so that we can safely refactor/simplify hybrid programs. Note that the theory includes equational schema like `(x := a; x := b) = x := b` and `(wait a; wait b) = wait (a + b)` thus encompassing not only usual laws of programming but also axiomatic principles behind the notion of time. + +**New program constructs.** Our while-language is intended to be as simple as possible whilst harbouring the core, uncontroversial features of hybrid programming. This was decided so that we could use the language as both a theoretical and practical basis for advancing hybrid programming. A particular case that we wish to explore next is the introduction of new program constructs, including e.g. non-deterministic or probabilistic choice and exception operations `raiseware`. Denotationally, the fact that we used monadic constructions readily provides a palette of techniques for this process, e.g. tensoring and distributive laws [22,23]. + +**Robustness.** A core aspect of hybrid programming is that programs should be *robust*: small variations in their input should *not* result in big changes in their output [32,21]. We wish to extend LINCE with features for detecting non-robust programs. A main source of non-robustness are conditional statements `if b then p else q`: very small changes in their input may change the validity of b and consequently cause a switch between (possibly very different) execution branches. Currently, we are working on the systematic detection of non-robust conditional statements in hybrid programs, by taking advantage of the notion of $\delta$-perturbation [20]. + +**Acknowledgements** The first author would like to acknowledge support of German Research Council (DFG) under the project A High Level Language for Monad-based Processes (GO 2161/1-2). The second author was financed by the ERDF – European Regional Development Fund through the Operational Programme for Competitiveness and Internationalisation – COMPETE 2020 Programme and by National Funds through the Portuguese funding agency, FCT – Fundação para a Ciência e a Tecnologia, within project POCI-01-0145-FEDER-030947. The third author was partially supported by National Funds through FCT/MCTES, within the CISTER Research Unit (UIDB/04234/2020); by COMPETE 2020 under the PT2020 Partnership Agreement, through ERDF, and by national funds through the FCT, within project POCI-01-0145-FEDER-029946; by the Norte Portugal Regional Operational Programme (NORTE 2020) under the Portugal 2020 Partnership Agreement, through ERDF and also by national funds through the FCT, within project NORTE-01-0145-FEDER-028550; and by the FCT within project ECSEL/0016/2019 and the ECSEL Joint Undertaking (JU) under grant agreement No 876852. The JU receives support from the European Union's Horizon 2020 research and innovation programme and Austria, Czech Republic, Germany, Ireland, Italy, Portugal, Spain, Sweden, Turkey. +---PAGE_BREAK--- + +References + +1. J. Adámek, H. Herrlich, and G. Strecker. *Abstract and concrete categories*. John Wiley & Sons Inc., New York, 1990. + +2. J. Adámek, S. Milius, and J. Velebil. Elgot theories: a new perspective on the equational properties of iteration. *Mathematical Structures in Computer Science*, 21(2):417–480, 2011. + +3. O. Bouissou and A. Chapoutot. An operational semantics for Simulink's simulation engine. In *ACM SIGPLAN Notices*, vol. 47, pp. 129–138. ACM, 2012. + +4. D. Broman. Hybrid simulation safety: Limbos and zero crossings. In *Principles of Modeling*, pp. 106–121. Springer, 2018. + +5. Z. Chaochen, C. A. R. Hoare, and A. P. Ravn. A calculus of durations. *Information Processing Letters*, 40(5):269–276, 1991. + +6. D. A. Copp and R. G. Sanfelice. A zero-crossing detection algorithm for robust simulation of hybrid systems jumping on surfaces. *Simulation Modelling Practice and Theory*, 68:1–17, 2016. + +7. T. L. Diezel and S. Goncharov. Towards Constructive Hybrid Semantics. In Z. M. Ariola, ed., *5th International Conference on Formal Structures for Computation and Deduction (FSCD 2020)*, vol. 167 of LIPIcs, pp. 24:1–24:19, Dagstuhl, Germany, 2020. Schloss Dagstuhl–Leibniz-Zentrum für Informatik. + +8. C. Elgot. Monadic computation and iterative algebraic theories. In *Studies in Logic and the Foundations of Mathematics*, vol. 80, pp. 175–230. Elsevier, 1975. + +9. S. Foster, B. Thiele, A. Cavalcanti, and J. Woodcock. Towards a UTP semantics for Modelica. In *International Symposium on Unifying Theories of Programming*, pp. 44–64. Springer, 2016. + +10. P. Fritzson. *Principles of object-oriented modeling and simulation with Modelica 3.3: a cyber-physical approach*. John Wiley & Sons, 2014. + +11. R. Goebel, R. G. Sanfelice, and A. R. Teel. Hybrid dynamical systems. *IEEE Control Systems*, 29(2):28–93, 2009. + +12. S. Goncharov, J. Jakob, and R. Neves. A semantics for hybrid iteration. In *29th International Conference on Concurrency Theory, CONCUR 2018*. Schloss Dagstuhl – Leibniz-Zentrum fuer Informatik, 2018. + +13. S. Goncharov, J. Jakob, and R. Neves. A semantics for hybrid iteration. CoRR, abs/1807.01053, 2018. + +14. S. Goncharov and R. Neves. An adequate while-language for hybrid computation. In *Proceedings of the 21st International Symposium on Principles and Practice of Programming Languages 2019*, PPDP ’19, pp. 11:1–11:15, New York, NY, USA, 2019. ACM. + +15. S. Goncharov, R. Neves, and J. Proença. Implementing hybrid semantics: From functional to imperative. CoRR, abs/2009.14322, 2020. + +16. S. Goncharov, L. Schröder, C. Rauch, and M. Piróg. Unifying guarded and un-guarded iteration. In *International Conference on Foundations of Software Science and Computation Structures*, pp. 517–533. Springer, 2017. + +17. T. A. Henzinger. The theory of hybrid automata. In *LICS96: Logic in Computer Science, 11th Annual Symposium, New Jersey, USA, July 27-30, 1996*, pp. 278–292. IEEE, 1996. + +18. P. Höfner and B. Möller. An algebra of hybrid systems. *The Journal of Logic and Algebraic Programming*, 78(2):74–97, 2009. + +19. J. J. Huerta y Munive and G. Struth. Verifying hybrid systems with modal kleene algebra. In J. Desharnais, W. Guttmann, and S. Joosten, eds., *Relational* +---PAGE_BREAK--- + +*and Algebraic Methods in Computer Science*, pp. 225–243, Cham, 2018. Springer International Publishing. + +20. S. Kong, S. Gao, W. Chen, and E. Clarke. dreach: $\delta$-reachability analysis for hybrid systems. In *International Conference on TOOLS and Algorithms for the Construction and Analysis of Systems*, pp. 200–205. Springer, 2015. + +21. D. Liberzon and A. S. Morse. Basic problems in stability and design of switched systems. *IEEE Control Systems*, 19(5):59–70, 1999. + +22. C. Lüth and N. Ghani. Composing monads using coproducts. In M. Wand and S. L. P. Jones, eds., *ICFP'02: Functional Programming, 7th ACM SIGPLAN International Conference, Pittsburgh, USA, October 04 - 06, 2002*, pp. 133–144. ACM, 2002. + +23. E. Manes and P. Mulry. Monad compositions I: general constructions and recursive distributive laws. *Theory and Applications of Categories*, 18(7):172–208, 2007. + +24. E. Moggi. Computational lambda-calculus and monads. In *Proceedings of the Fourth Annual Symposium on Logic in Computer Science (LICS '89), Pacific Grove, California, USA, June 5-8, 1989*, pp. 14–23. IEEE Computer Society, 1989. + +25. E. Moggi. Notions of computation and monads. *Information and computation*, 93(1):55–92, 1991. + +26. R. Neves. *Hybrid programs*. PhD thesis, Minho University, 2018. + +27. P. C. Ölveczky and J. Meseguer. Semantics and pragmatics of real-time maude. *Higher-order and symbolic computation*, 20(1-2):161–196, 2007. + +28. A. Platzer. Differential dynamic logic for hybrid systems. *Journal of Automated Reasoning*, 41(2):143–189, 2008. + +29. A. Platzer. *Logical Analysis of Hybrid Systems: Proving Theorems for Complex Dynamics*. Springer, Heidelberg, 2010. + +30. R. R. Rajkumar, I. Lee, L. Sha, and J. Stankovic. Cyber-physical systems: the next computing revolution. In *DAC'10: Design Automation Conference, 47th ACM/IEEE Conference, Anaheim, USA, June 13-18, 2010*, pp. 731–736. IEEE, 2010. + +31. W. Stein et al. *Sage Mathematics Software (Version 6.4.1)*. The Sage Development Team, 2015. http://www.sagemath.org/. + +32. R. Shorten, F. Wirth, O. Mason, K. Wulff, and C. King. Stability criteria for switched and hybrid systems. *Society for Industrial and Applied Mathematics (review)*, 49(4):545–592, 2007. + +33. A. Simpson and G. Plotkin. Complete axioms for categorical fixed-point operators. In *Logic in Computer Science, LICS 2000*, pp. 30–41, 2000. + +34. K. Suenaga and I. Hasuo. Programming with infinitesimals: A while-language for hybrid system modeling. In *International Colloquium on Automata, Languages, and Programming*, pp. 392–403. Springer, 2011. + +35. T. Uustalu. Generalizing substitution. *RAIRO-Theoretical Informatics and Applications*, 37(4):315–336, 2003. + +36. R. van Glabbeek. The linear time-branching time spectrum (extended abstract). In *Theories of Concurrency, CONCUR 1990*, vol. 458, pp. 278–297, 1990. + +37. G. Winskel. *The formal semantics of programming languages: an introduction*. MIT press, 1993. + +38. H. Witsenhausen. A class of hybrid-state continuous-time dynamic systems. *IEEE Transactions on Automatic Control*, 11(2):161–167, 1966. \ No newline at end of file diff --git a/samples_new/texts_merged/3148538.md b/samples_new/texts_merged/3148538.md new file mode 100644 index 0000000000000000000000000000000000000000..deb17a9768d19384fbb9abe9df317682e0575f6f --- /dev/null +++ b/samples_new/texts_merged/3148538.md @@ -0,0 +1,141 @@ + +---PAGE_BREAK--- + +A CORRECTION TO “THE CONNECTIVITY +STRUCTURE OF THE HYPERSPACES $C_\epsilon(X)$” + +by +ERIC L. McDOWELL + +Electronically published on February 19, 2009 + +Topology Proceedings + +**Web:** http://topology.auburn.edu/tp/ + +**Mail:** Topology Proceedings +Department of Mathematics & Statistics +Auburn University, Alabama 36849, USA + +**E-mail:** topolog@auburn.edu + +**ISSN:** 0146-4124 + +COPYRIGHT © by Topology Proceedings. All rights reserved. +---PAGE_BREAK--- + +A CORRECTION TO “THE CONNECTIVITY +STRUCTURE OF THE HYPERSPACES $C_{\epsilon}(X)$” + +ERIC L. McDOWELL + +ABSTRACT. We demonstrate that Proposition 3.1 of [Eric L. McDowell and B. E. Wilder, *The connectivity structure of the hyperspaces* $C_{\epsilon}(X)$, Topology Proc. **27** (2003), no. 1, 223–232] is false by constructing a locally connected metric continuum which admits a non-locally connected small-point hyperspace. + +Let $X$ be a continuum with metric $d$. For any $\epsilon > 0$ the set $C_{d,\epsilon}(X) = \{A \in C(X) : \text{diam}_d(A) \le \epsilon\}$ is called a *small-point hyperspace* of $X$. The notation $C_{\epsilon}(X)$ is used when the metric on $X$ is understood. + +Proposition 3.1 of [2] asserts that $X$ is locally connected if and only if $C_{\epsilon}(X)$ is locally connected for every $\epsilon > 0$. While it is true that the local connectivity of $C_{\epsilon}(X)$ for every $\epsilon > 0$ implies the local connectivity of $X$, we show in this note that the reverse implication is false. + +Below we construct a locally connected continuum $X$ in $\mathbb{R}^3$ for which $C_{\epsilon}(X)$ fails to be locally connected for some $\epsilon > 0$. The metric considered on $X$ is the usual metric inherited from $\mathbb{R}^3$. All + +2000 Mathematics Subject Classification. Primary 54F15; Secondary 54B20. +Key words and phrases. cyclic connectedness, hyperspace, locally connected continuum. + +The author is grateful to Professor Sam B. Nadler, Jr. for questioning the validity of the proposition that this note addresses. The author is also grateful to the referee for suggestions which significantly enhanced this paper. + +©2009 Topology Proceedings. +---PAGE_BREAK--- + +points $(r, \theta, z)$ are described using the standard cylindrical coordinate system, and all concepts and notation which are used without definition can be found in [3]. The example is similar to [4, Example 2]. + +**Example 1.** For each $n = 1, 2, \dots$, let $S_n$ denote the circle described by $\{(1, \theta, n^{-1}) : 0 \le \theta < 2\pi\}$ and let $S_0 = \{(1, \theta, 0) : 0 \le \theta < 2\pi\}$. For each $n = 1, 2, \dots$ and each $i = 1, 2, \dots, 2^n$, let $A_i^n$ denote the straight line segment given by $\{(1, 2\pi i/2^n, z) : 0 \le z \le n^{-1}\}$. Define $X$ to be the continuum given by + +$$X = \left( \bigcup_{n=0}^{\infty} S_n \right) \cup \left( \bigcup_{n=1}^{\infty} \bigcup_{i=1}^{2^n} A_i^n \right).$$ + +It is straightforward to show that $X$ is a Peano continuum. We will now prove that $C_\epsilon(X)$ fails to be locally connected at the point $S_0$ when $\epsilon = 2$. + +Let $\{U_1, \dots, U_k\}$ be an open cover of $S_0$ with the property that for every $n = 0, 1, \dots$ and every $i = 1, \dots, k$ it is true that + +$$ (1) \quad S_n - U_i \text{ is connected and has arc length greater than } 3\pi/2. $$ + +Observe that $\mathcal{U} = \langle U_1, \cdots, U_k \rangle$ is an open subset of $C(X)$ that contains $S_0$ as well as all $S_n$ for $n$ sufficiently large. Select $N$ such that $S_N \in \mathcal{U}$. We will prove that $C_\epsilon(X)$ fails to be locally connected at $S_0$ by showing that every arc in $\mathcal{U}$ with endpoints $S_0$ and $S_N$ must contain a point of diameter greater than 2. Let $f: [0, 1] \to \mathcal{U}$ be an embedding for which $f(0) = S_0$ and $f(1) = S_N$. Let $\pi: X \to S_N$ denote the natural projection map. For any subset $S \subset X$ we say that $(1, \theta, z) \in S$ is an *antipodal point* of $S$ provided that $(1, \theta + \pi, z')$ belongs to $S$ for some $z'$. We will denote the set of antipodal points of $S$ by $\mathrm{AP}(S)$. We now show that + +$$ (2) \quad (1, \theta, z) \in \mathrm{AP}(S) \text{ if and only if } (1, \theta, N^{-1}) \in \mathrm{AP}(\pi(S)). $$ + +To see (2), let $S \subset X$ and let $(1, \theta, z) \in \mathrm{AP}(S)$. By definition it follows that $(1, \theta + \pi, z')$ belongs to $S$ for some $z'$; thus, $\pi(1, \theta + \pi, z') = (1, \theta + \pi, N^{-1})$ belongs to $\pi(S)$. Since $(1, \theta, N^{-1}) = \pi(1, \theta, z) \in \pi(S)$, it follows that $(1, \theta, N^{-1}) \in \mathrm{AP}(\pi(S))$. The argument for the converse is similar. + +If $M \in \mathcal{U}$ and $M \subset S_N$, then there exists an arc $A$ (possibly empty) such that $M$ is the closure of $S_N - A$; thus, the only elements +---PAGE_BREAK--- + +of $M - AP(M)$ are the points that are diametrically opposed to the interior points of A. Therefore, $AP(M)$ is either $S_N$ (if $A = \emptyset$) or the union of two disjoint arcs. Since $f(t)$ is a continuum for each $0 \le t \le 1$, it follows from continuity that + +(3) $AP(\pi(f(t)))$ is either $S_N$ or the union of two disjoint arcs. + +Continuity also shows that the intersection of $\pi^{-1}(AP(\pi(f(t)))))$ and $f(t)$ is closed; moreover, it follows from (2) that this intersection is equal to $AP(f(t))$. Therefore, we have that + +(4) $AP(f(t))$ is closed for every $0 \le t \le 1$. + +Suppose that $(1, \theta, z) \in AP(f(t))$; then $(1, \theta + \pi, z') \in f(t)$ for some $z'$. If $z' \neq z$, then $(1, \theta, z)$ and $(1, \theta + \pi, z')$ are more than two units apart. Moreover, if $(1, \theta, z) \in AP(f(t)) - \bigcup_{n=0}^{\infty} S_n$, then it follows from the connectivity of $f(t)$ that there must exist some $z'' \neq z$ with $(1, \theta + \pi, z'') \in f(t)$. It follows that + +(5) if $AP(f(t)) - \bigcup_{n=0}^{\infty} S_n \neq \emptyset$ then $\text{diam}(f(t)) > 2$. + +We now show that there exists some $t_0 \in [0, 1]$ for which the diameter of $f(t_0)$ is greater than 2. Begin by defining + +$$t' = \min\{t : [0, 1] : AP(f(t)) \cap S_N \neq \emptyset\}.$$ + +Suppose that $t' = 1$. Choose $\gamma > 0$ small enough such that the $\gamma$-ball, $\mathcal{B}$, about $S_N$ has the properties that $\mathcal{B} \subset \mathcal{U}$ and $S_n \cap (\cup \mathcal{B}) = \emptyset$ for all $n \neq N$. Choose $\delta > 0$ such that if $t \in (1 - \delta, 1]$ then $H_d(f(t), S_N) < \gamma$. Let $t_0 \in (1 - \delta, 1)$. By (3) we have that $AP(f(t_0)) \neq \emptyset$. However, since $t_0 < t'$ we have by the definition of $t'$ and our choice of $\gamma$ that $AP(f(t_0)) - \bigcup_{n=0}^{\infty} S_n \neq \emptyset$. Therefore, $\text{diam}(f(t_0)) > 2$ by (5). + +Now suppose that $t' < 1$. Let $q = (1, \theta, z) \in AP(f(t')) \cap S_N$ and let $q' \in f(t') \cap \pi^{-1}(1, \theta+\pi, z)$. We may assume that $q' = (1, \theta+\pi, z)$ since $d(q, q') > 2$ otherwise. Using (3), we have that $AP(\pi(f(t'))) contains an arc $I$ containing $q$. We suppose first that $q$ is an isolated point of $AP(f(t'))$. Let $\{y_i\}_{i=1}^{\infty}$ be a sequence in $I$ converging to $q$; then use (2) to select $x_i \in \pi^{-1}(y_i) \cap AP(f(t'))$ for each $i = 1, 2, \dots$. We have by (4) that $AP(f(t'))$ is closed; hence, some subsequence of $\{x_i\}_{i=1}^{\infty}$ converges to a point $x_0$ of $AP(f(t'))$. Moreover, since $\{y_i\}_{i=1}^{\infty}$ converges to $q$, we have that $x_0 \in \pi^{-1}(q)$. Finally, since $q +---PAGE_BREAK--- + +is an isolated point of $AP(f(t'))$, it follows that $x_0$ is a member of +$f(t') \cup \pi^{-1}(q)$ that does not belong to $S_N$. Therefore, $d(x_0, q') > 2$, +and thus, $\text{diam}(f(t')) > 2$. On the other hand, if $q$ is not an isolated +point of $AP(f(t'))$, then we may assume that the arc $I$ containing +$q$ belongs to $S_N \cap AP(f(t'))$. Choose $\gamma > 0$ small enough so that +(i) no $\gamma$-ball about a point of $I$ meets any $S_n$ for $n \neq N$ and (ii) the +midpoint $m = (1, \mu, z)$ of $I$ is not contained in the $\gamma$-balls about +the endpoints of $I$. Choose $\delta > 0$ such that if $t \in (t' - \delta, t']$, then +$H_d((f(t), f(t')) < \gamma$. Let $t_0 \in (t' - \delta, t')$. Since $H_d(f(t_0), f(t')) < \gamma$, +we have by (i), (ii), and the construction of $X$ that $f(t_0)$ contains +a point $m'$ for which $\pi(m') = m$; furthermore, we have by (i) that +$m' \in S_N$. Thus, $m' = (1, \mu, z) = m \in f(t_0)$. By a similar argument +we can show that $(1, \mu + \pi, z) \in f(t_0)$. Therefore, $m \in AP(t_0)$, +contrary to our assumption that $t_0 < t'$. + +**Example 2.** K. Kuratowski [1, p. 268] describes a continuum, *K*, consisting of the segment {(*x*, 0) : 0 ≤ *x* ≤ 1}, of the vertical segments {(*m*/2n+1, *y*) : 0 ≤ *m* ≤ 2n+1, 0 ≤ *y* ≤ 1/2n} and of the level segments {(*x*, 1/2n) : 0 ≤ *x* ≤ 1}, where n = 1, 2, .... We note that *K* is similar in structure to the continuum in the previous example; however, Cρ1(*K*) is locally connected when ρ1 is the usual metric inherited from R2. (Informally, observe that if a subcontinuum *A* of *K* is contained in an open subset U of C(X), then U also con- tains subsets of *A* with diameter smaller than that of *A*. By first shrinking *A* to a continuum with smaller diameter within U, one can then continuously grow continua to include a subset of a target subcontinuum within U before continuously releasing *A*.) + +Instead of considering the usual metric on $K$, let $h: K \to S^1 \times [0, 1]$ be an embedding which sends the leftmost vertical segment of $K$ to $\{(1, 0, z) : 0 \le z \le 1\}$ and the rightmost vertical segment of $K$ to $\{[1, 3\pi/2, z) : 0 \le z \le 1\}$, and which preserves the vertical and horizontal orientations of all subsets of $K$. Let $d$ denote the usual metric for $h(K)$ inherited from $\mathbb{R}^3$, and let $\rho_2$ denote the metric on $K$ given by $\rho_2(x, y) = d(h(x), h(y))$. Then an argument essentially identical to the one given in Example 1 can be used to show that $C_{\rho_2, \epsilon}(X)$ fails to be locally connected for $\epsilon = 2$. + +Noting that the small-point hyperspaces of the arc, circle, and +simple triod are all locally connected, while the examples provided +---PAGE_BREAK--- + +in this article admit non-locally connected small-point hyperspaces, +the referee suggests the following question. + +**Question 1.** *Are the small-point hyperspaces of an hereditarily locally connected continuum always locally connected?* + +Recall that a continuum is said to be *cyclicly connected* provided +that any two points of the continuum are contained in some simple +closed curve. Theorem 3.11 of [2] states that $C_{\epsilon}(X)$ is cyclicly +connected for every $\epsilon > 0$ whenever $X$ is locally connected; however, +the argument that is used to justify this assertion uses Proposition +3.1 of [2]. Therefore, the following question remains open. + +**Question 2.** If $X$ is a locally connected continuum with metric $\rho$, +must $C_{\rho,\epsilon}(X)$ be cyclicly connected for every $\epsilon > 0$? + +REFERENCES + +[1] K. Kuratowski, *Topology. Vol. II.* New edition, revised and augmented. Translated from the French by A. Kirkor. New York-London: Academic Press and Warsaw: PWN, 1968. + +[2] Eric L. McDowell and B. E. Wilder, *The connectivity structure of the hyperspaces Cε(X)*, *Topology Proc.* **27** (2003), no. 1, 223-232. + +[3] Sam B. Nadler, Jr. *Continuum Theory: An Introduction*. Monographs and Textbooks in Pure and Applied Mathematics, 158. New York: Marcel Dekker, Inc., 1992. + +[4] Sam B. Nadler, Jr. and Thelma West, *Size levels for arcs*, Fund. Math. **141** (1992), no. 3, 243–255. + +DEPARTMENT OF MATHEMATICS AND COMPUTER SCIENCE; BERRY COL- +LEGE; MOUNT BERRY, GEORGIA 30149-5014 + +*E-mail address: emcdowell@berry.edu* \ No newline at end of file diff --git a/samples_new/texts_merged/3193892.md b/samples_new/texts_merged/3193892.md new file mode 100644 index 0000000000000000000000000000000000000000..a1f42cecf259099fcef6b03cdf820ac9432e9692 --- /dev/null +++ b/samples_new/texts_merged/3193892.md @@ -0,0 +1,136 @@ + +---PAGE_BREAK--- + +# Anomalous VVH interactions at a linear collider + +SUDHANSU S BISWAL¹,*, DEBAJYOTI CHOUDHURY², +ROHINI M GODBOLE¹ and RITESH K SINGH³ + +¹Centre for High Energy Physics, Indian Institute of Science, Bangalore 560 012, India + +²Department of Physics and Astrophysics, University of Delhi, New Delhi 110 007, India + +³Laboratoire de Physique Théoretique, 91405 Orsay Cedex, France + +*E-mail: sudhansu@cts.iisc.ernet.in + +**Abstract.** We examine, in a model independent way, the sensitivity of a linear collider to the couplings of a light Higgs boson to a pair of gauge bosons, including the possibility of CP violation. We construct several observables that probe the various possible anomalous couplings. For an intermediate mass Higgs, a collider operating at a center of mass energy of 500 GeV and with an integrated luminosity of 500 fb⁻¹ is shown to be able to constrain the ZZH vertex at the few per cent level, with even higher sensitivity for some of the couplings. However, lack of sufficient number of observables as well as contamination from the ZZH vertex limits the precision to which anomalous part of the WWH coupling can be probed. + +**Keywords.** Anomalous Higgs couplings; linear collider. + +PACS Nos 13.66.Fg; 14.80.Cp; 14.70.Fm; 14.70.Hp + +## 1. Introduction + +The standard model (SM) of particle physics has been tested up to a high degree of accuracy, but the direct experimental verification of the phenomenon of spontaneous symmetry breaking is still pending. Various extensions of the SM have more than one Higgs boson whose CP parity and hypercharges may differ from those of the SM Higgs boson. The minimal supersymmetric standard model (MSSM) is one example of such an extended Higgs sector [1]. To establish the experimental observation of the SM Higgs boson it will be therefore, necessary to establish its properties such as hypercharge, CP parity etc. At an $e^+e^-$ collider the dominant Higgs production processes are $e^+e^- \to f\bar{f}H$, which proceed via the VVH coupling with $V = W, Z$ and $f$ any light fermion. Demanding Lorentz invariance, the VVH couplings can be parameterized as + +$$ \Gamma_{\mu\nu} = g_V \left[ a_V g_{\mu\nu} + \frac{b_V}{m_V^2} (k_{1\nu} k_{2\mu} - g_{\mu\nu} k_1 \cdot k_2) + \frac{\tilde{b}_V}{m_V^2} \epsilon_{\mu\nu\alpha\beta} k_1^\alpha k_2^\beta \right], \quad (1) $$ + +where $k_i$ denote the momenta of the two W's (Z's); $g_W^{SM} = e \cot \theta_W M_Z$ and $g_Z^{SM} = 2eM_Z/\sin 2\theta_W$. In general, all these anomalous couplings can be complex. For +---PAGE_BREAK--- + +simplicity we assume $a_V$ to be real and close to its SM value. For processes involving $VVH$ coupling alone we can choose, without loss of generality, $g_V = g_V^{SM}$ and $a_V = 1 + \Delta a_V$. We further assume $\Delta a_W = \Delta a_Z$ and keep terms up to linear order in the anomalous couplings. The analysis will be made for the ILC with center of mass energy 500 GeV and a Higgs boson of mass 120 GeV. We will use $H \to b\bar{b}$ final state and further assume b-quark detection efficiency of 0.7. The largest contribution comes from the process, $e^+e^- \to \nu_e\bar{\nu}_e H$. This process contains two missing neutrinos in the final state. However, this receives contributions from both the $WWH$ and $ZZH$ vertices. Hence one needs to look at $e^+e^- \to Z^*H \to f\bar{f}H$ to constrain $ZZH$ anomalous couplings and then make use of this information while probing $WWH$ couplings. + +## 2. Observables and kinematical cuts + +We have constructed various momentum combinations $C_i$ by taking dot and scalar triple products of different linear combinations of momenta. These combinations have been listed in table 1 with their transformation properties under discrete symmetries C, P and $\tilde{T}$, where the pseudotime reversal operator ($\tilde{T}$) reverses the momenta and spins of particles without interchanging their initial and final states. Then we construct observables ($O_i$) by taking the expectation values of the signs of various $C_i$'s, i.e. $O_i = \langle \text{sign}(C_i) \rangle$. Most of these observables have definite CP and $\tilde{T}$ properties and hence can be used directly to probe the anomalous coupling which has the same CP and $\tilde{T}$ properties. In our analysis we keep the terms only upto linear order in anomalous couplings $B_i$. So all observables can be written down as + +$$ \mathcal{O}(\{B_i\}) = \sum O_i B_i . $$ + +Measurements of these observables may be used to constrain the anomalous couplings. The possible sensitivity of these observables to the different anomalous couplings $B_i$, at a given degree of statistical significance $f$, can be obtained by demanding $|\mathcal{O}(\{B_i\}) - \mathcal{O}(\{0\})| \le f \delta\mathcal{O}$. Here $\mathcal{O}(\{0\})$ is the SM value of $\mathcal{O}$ and $\delta\mathcal{O}$ is the statistical fluctuation in $\mathcal{O}$. + +**Table 1.** List of momentum correlators, their discrete transformation properties and anomalous couplings they probe. $\vec{P}_e = \vec{p}_{e-} - \vec{p}_{e+}$, $\vec{P}_f^+ = \vec{p}_f + \vec{p}_{\bar{f}}$, $\vec{P}_{\bar{f}} = \vec{p}_{\bar{f}} - \vec{p}_{\bar{f}}$. + +
CorrelatorCPCPCPT̄Probe of
C0 1+++++aV, ℜ(bV)
C1e ⋅ ℬf+-+-+-Ī(b̃V)
C2 [ℬe ⋅ ℬf+] ⋅ ℬf-+---+&Reacr;(b̃V)
C3 [[ℬe ⋅ ℬf+] ⋅ ℬf-][ℬe ⋅ ℬf+]--+--Ī(bV)
C4 [[ℬe ⋅ ℬf+] ⋅ ℬf-][ℬe ⋅ ℬf-]×-×-×Ī(bV), ℜ(b̃V)
+ +Sudhansu S Biswal et al +---PAGE_BREAK--- + +Anomalous VVH interactions + +Statistical fluctuation in cross-section and in an asymmetry can be written as + +$$ +\Delta\sigma = \sqrt{\sigma_{\text{SM}}/\mathcal{L} + \epsilon^2 \sigma_{\text{SM}}^2}, \quad (2) +$$ + +$$ +(\Delta A)^2 = \frac{1 - A_{\text{SM}}^2}{\sigma_{\text{SM}} \mathcal{L}} + \frac{\epsilon^2}{2} (1 - A_{\text{SM}}^2)^2. \qquad (3) +$$ + +Here $\sigma_{\text{SM}}$ and $A_{\text{SM}}$ are the SM value of cross-section and asymmetry respectively. + +We choose the integrated luminosity $\mathcal{L} = 500 \text{ fb}^{-1}$, fractional systematic error $\epsilon = 0.01$ and $f = 3$. + +Various kinematical cuts we impose, to suppress dominant background to the signal, are 5° ≤ θ₀ ≤ 175°; E_b, E_̄, E_l-, E_l+ ≥ 10 GeV; pTmissing ν ≥ 15 GeV; ΔRq₁q₂ ≥ 0.7; ΔRl-l+ ≥ 0.2; ΔRl-b, ΔRl-̄, ΔRl+b, ΔRl+l̄ ≥ 0.4. + +Here $(\Delta R)^2 \equiv (\Delta\phi)^2 + (\Delta\eta)^2$ when $\Delta\phi$ and $\Delta\eta$ denote the separation between the two jets in azimuthal angle and rapidity respectively. + +We additionally impose cuts on the invariant mass of the $f\bar{f}$ system: + +$$ +R1 \equiv |m_{ff} - M_Z| \le 5 \Gamma_Z \quad \text{select Z-pole,} \tag{4} +$$ + +$$ +R2 \equiv |m_{f\bar{f}} - M_Z| \ge 5 \Gamma_Z \quad \text{de-select Z-pole.} \tag{5} +$$ + +These enhance or suppress the contribution from Z resonance in the Bjorken process respectively. $\Gamma_Z$ in the above is the width of Z boson. + +**3. ZZH couplings** + +To probe the anomalous ZZH couplings we consider $f\bar{f}$ final state, where $f$ is any light fermion other than neutrinos. As outlined above we can construct observables with definite CP and $\tilde{T}$ properties and thus can maximize sensitivity to the anomalous couplings for a chosen final state. One can use some of these variables to probe the anomalous couplings [1a]. + +Cross-section: (observable $O_0$ corresponding to correlator $C_0$). Total rates are CP and $\tilde{T}$ even quantities. Hence these can be used to constrain $\Delta a_Z$ and $\Re(b_Z)$. Total rates with $R1$ cut and $f = \mu, u, d, c, s$ can be used to probe $|\Re(b_Z)| > 0.48 \times 10^{-2}$. Similarly total cross-section for $f=e$ with $R2$ cut, $\sigma(R2; e)$ can probe $\Delta a_Z$ to $|\Delta a_Z| > 0.038$ at $3\sigma$ level. Figure 1a shows that the sensitivity to $\Re(b_Z)$ is correlated with $\Delta a_Z$, whereas the reverse is not true. + +*Forward-backward asymmetry (A₁):* We define the FB asymmetry $A_1$ with respect to the polar angle of Higgs boson. Since $A_1$ is CP odd and $\tilde{T}$ even, $A_1(R1; \mu, q)$ can be used to probe $\Im(\tilde{b}_Z)$. We find that this measurement can probe $|\Im(\tilde{b}_Z)| > 0.042$. + +*Up-down asymmetry (A₂):* $A_2$ is the up-down asymmetry corresponding to $f$ being above or below the H-production plane. It is a CP odd and $\tilde{T}$ odd observable and a real probe of $\Re(\tilde{b}_Z)$. Since this asymmetry requires charge determination of the final-state fermions, we cannot consider quarks in the final state. Hence using $A_2^{R2}(e)$ one will be able to constrain $|\Re(\tilde{b}_Z)| \le 0.064$ and it is shown by vertical lines in figure 1b. +---PAGE_BREAK--- + +Figure 1. Simultaneous $3\sigma$ limits on anomalous couplings with $L = 500 \text{ fb}^{-1}$: (a) $\Delta a_Z - \Re(b_Z)$ plane using cross-sections; (b) $\Re(\tilde{b}_Z) - \Im(\tilde{b}_Z)$ plane using various asymmetries. + +**Polar–azimuthal asymmetry ($A_3$):** $A_3$ is a mixed polar–azimuthal asymmetry combining polar angle of Higgs boson and azimuthal angle of $f$ with respect to Higgs production plane and is CP even and $\tilde{T}$ odd. So it is sensitive only to $\Im(b_Z)$. This asymmetry requires charge measurement of $f$, hence suitable only for $f = e, \mu$. This can give a sensitivity at $3\sigma$ level as $|\Im(b_Z)| \le 0.17$. The region inside the horizontal lines in figure 1b shows $3\sigma$ variation in $A_3$. + +**Another combined asymmetry ($A_4$):** We construct this combined asymmetry with respect to the polar and azimuthal angles of final state $f$. Although $A_4$ is $\tilde{T}$ odd, it does not have any definite CP property. So it is sensitive to both $\Im(b_Z)$ and $\Re(\tilde{b}_Z)$. Also $A_4$ requires charge determination of $f$ and hence we cannot consider quarks in the final-state for this observable. But we consider only $f = \mu$, because for $f = e$ many anomalous couplings contribute significantly with R1 cut. The corresponding constraint is shown in figure 1b with slant lines. + +In table 2 we list all the achievable limits obtained above. We emphasize that all of them, except for $\Delta a_Z$ and $\Re(b_Z)$, are independent of other anomalous couplings. Table 2 shows that the constraint on $\Re(b_Z)$ depends on $\Delta a_Z$. Also $\tilde{T}$-odd observables require charge measurement of final-state fermions and hence quarks in the final-state cannot be considered to probe $\tilde{T}$-odd couplings leading to rather poor sensitivity to them. +---PAGE_BREAK--- + +Anomalous VVH interactions + +**Table 2.** Sensitivity achievable at 3σ level for various anomalous couplings with L = 500 fb⁻¹. + +
Coupling3σ BoundObservable used
|ΔaZ|0.038σ with R2 cut; f = e-
|Re(bZ)|{ 0.0048 (ΔaZ = 0)
0.013 (|ΔaZ| = 0.038)
σ with R1 cut; f = μ, q
|Ξ(bZ)|0.17A3 with R1 cut; f = μ-, e-
|Re(̃bZ)|0.064A2e-) with R2 cut
|Ξ(̃bZ)|0.042A1(cH) with R1 cut; f = μ, q
+ +**Table 3.** Individual 3σ limits of sensitivity. + +
CouplingLimitObservable used
|Δa| ≤ 0.018σR2
|Re(bW)| ≤ 0.098σR2
|Ξ(bW)| ≤ 0.62σR1
|Re(̃bW)| ≤ 1.6A1FB(cH)
|Ξ(̃bW)| ≤ 0.39A2FB(cH)
+ +**Table 4.** Simultaneous 3σ limits of sensitivity. + +
CouplingΔa = 0Δa ≠ 0
|Δa| ≤ –0.038
|Re(bW)| ≤ 0.100.31
|Ξ(bW)| ≤ 1.61.6
|Re(̃bW)| ≤ 3.23.2
|Ξ(̃bW)| ≤ 0.440.44
+ +## 4. WWH couplings + +Due to missing neutrinos in the final state here one can only construct two observables: cross-section and forward-backward asymmetry with respect to polar angle of Higgs boson. Any deviation from SM value for cross-section largely depends on Δa$_{V}$ and Re(b$_{V}$) (CP even, T̄ even). Similarly, FB asymmetry receives a large contribution from Ξ(¯b$_{V}$) (CP odd, T̄ even). Hence there is no other direct observable to probe the remaining anomalous couplings. Assuming Δa$_{Z}$ = Δa$_{W}$ = Δa, we calculate the expressions for both the observables with R1 and R2 cuts. In table 3 we list the individual limits of sensitivity on the various anomalous couplings at 3σ level. To see what the sensitivity will be when all the anomalous couplings were to be nonzero, we construct a nine-dimensional region in parameter space and take a point from that region and calculate all the observables simultaneously. If the difference from their SM values due to these anomalous couplings is within the statistical fluctuation in SM values of these observables, then we say that the point is inside the blind region. The points on the boundary of this region give us the simultaneous limit of sensitivity of these measurements to the anomalous couplings. These are listed in table 4. These tables show that the lack of a specific observable to probe T̄-odd couplings results in rather poor sensitivity to them. For more details, see [2]. +---PAGE_BREAK--- + +Sudhansu S Biswal et al + +5. Conclusion + +We have analyzed the sensitivity of the process $e^{+}e^{-} \rightarrow f\bar{f}H$, $f$ being a light fermion and probe different anomalous couplings. We implement various kinematical cuts on the different final-state particles so as to reduce background and also take into account finite b-tagging efficiency. When these effects are removed, our analysis reproduces the results of [4]. Although the observables constructed using optimal observable analysis [3] have maximum sensitivity to the anomalous couplings, they are a little opaque to the physics that is being probed. The observables that we have constructed by taking expectation values of sign of the correlators are simple to construct and most of them have definite CP and $\tilde{T}$ properties thus probing specific anomalous couplings. Apart from $\Re(b_V)$ and $\Delta a_V$, constraints on all the other anomalous couplings can be obtained using asymmetries and hence are robust to the effects of radiative corrections. + +References + +[1] See, for example, M Drees, R M Godbole and P Roy, *Theory and phenomenology of sparticles* (World Scientific, Singapore, 2004) + +[1a] For detailed definition, see [2] + +[2] Sudhansu S Biswal, Debajyoti Choudhury, Rohini M Godbole and Ritesh K Singh, *Phys. Rev. D73*, 035001 (2006) + +[3] K Hagiwara, S Ishihara, J Kamoshita and B A Kniehl, *Euro. Phys. J. C14*, 457 (2000) + +[4] T Han and J Jiang, *Phys. Rev. D63*, 096007 (2001) \ No newline at end of file diff --git a/samples_new/texts_merged/3224121.md b/samples_new/texts_merged/3224121.md new file mode 100644 index 0000000000000000000000000000000000000000..858acb2dbbe276bc9802f50d17f95c0529f6fac5 --- /dev/null +++ b/samples_new/texts_merged/3224121.md @@ -0,0 +1,735 @@ + +---PAGE_BREAK--- + +Cooperation and dependencies in multipartite systems + +Waldemar Kłobus,¹ Marek Miller,² Mahasweta Pandit,¹ Ray Ganardi,¹,³ Lukas Knips,⁴,⁵,⁶ Jan Dziewior,⁴,⁵,⁶ +Jasmin Meinecke,⁴,⁵,⁶ Harald Weinfurter,⁴,⁵,⁶ Wiesław Laskowski,¹,³ and Tomasz Paterek¹,²,⁷ + +¹Institute of Theoretical Physics and Astrophysics, Faculty of Mathematics, +Physics and Informatics, University of Gdańsk, 80-308 Gdańsk, Poland + +²School of Physical and Mathematical Sciences, Nanyang Technological University, 637371 Singapore + +³International Centre for Theory of Quantum Technologies, University of Gdańsk, 80-308 Gdańsk, Poland + +⁴Max-Planck-Institut für Quantenoptik, Hans-Kopfermann-Straße 1, 85748 Garching, Germany + +⁵Department für Physik, Ludwig-Maximilians-Universität, Schellingstraße 4, 80799 München, Germany + +⁶Munich Center for Quantum Science and Technology (MCQST), Schellingstraße 4, 80799 München, Germany + +⁷MajuLab, International Joint Research Unit UMI 3654, +CNRS, Université Côte d'Azur, Sorbonne Université, +National University of Singapore, Nanyang Technological University, Singapore + +We propose an information-theoretic quantifier for the advantage gained from cooperation that captures the degree of dependency between subsystems of a global system. The quantifier is distinct from measures of multipartite correlations despite sharing many properties with them. It is directly computable for classical as well as quantum systems and reduces to comparing the respective conditional mutual information between any two subsystems. Secret sharing provides an exemplary cooperation task where this quantifier is beneficial. Based on the new quantifier we prove an inequality characterizing the lack of monotonicity of conditional mutual information under local operations and provide intuitive understanding for it. + +I. INTRODUCTION + +Identifying and quantifying dependencies in multipartite systems enable their analysis and provides a better understanding of complex phenomena. The problem has been addressed by several communities, considering both classical and quantum systems. For example, in neuroscience and genetics measures of multipartite synergy were put forward [1–6], in quantitative sociology quantifiers of coordination were introduced [7], and in physics and information processing quantities aimed at characterizing genuine multiparty correlations were studied in depth [8–13]. The former quantifiers are motivated mathematically, keeping the combinatorial aspects of complex systems in mind, e.g., the synergy is the difference in the information all subsystems have about an extra system as compared to the total information contained in any subset of the systems. Many of the latter quantifiers involve difficult optimizations and are therefore hard to compute. Here, we introduce an operationally defined, simple and computable quantifier of multipartite dependency in terms of information gain from cooperation when some parties meet and try to deduce the variables of some of the remaining parties. We show how it differs from multipartite correlations, prove its essential properties and discuss the application to quantum secret sharing. + +It turns out that, in order to compute the quantity introduced here, it is sufficient to consider the respective conditional mutual information between only two subsystems. Therefore, any operational meaning of the conditional mutual information, e.g., in terms of communication cost of quantum state redistribution [14, 15], applies to the dependence measure as well. In this context, we prove an inequality which characterizes the lack of monotonicity of quantum conditional mutual informa- + +tion under general local operations. + +II. MULTIPARTITE DEPENDENCE + +Let us begin by briefly recalling fundamental relationships, e.g., that two classical variables $X_1$ and $X_2$ are statistically independent if their probabilities satisfy $P(X_1|X_2) = P(X_1)$. Alternatively, the statistical independence can be stated in terms of entropies with the help of both the Shannon entropy $H(X) = -\sum_{i=1}^{d} P(x_i) \log_d P(x_i)$, where $d$ is the number of outcomes, and the conditional entropy $H(X|Y) = -\sum_{i,j} P(x_i, y_j) \log_d \frac{P(x_i, y_j)}{P(y_j)}$. As a measure of dependence of two variables $X_1$ and $X_2$ one introduces the corresponding entropic difference $H(X_1) - H(X_1|X_2)$, the so-called mutual information $I(X_1: X_2)$ [16]. Similarly, the quantum mutual information captures the dependence between quantum subsystems [17]. However, already in the case of three variables there are two levels of independence. The variable $X_1$ can be independent of all other variables, i.e., $P(X_1|X_2X_3) = P(X_1)$, or it can be conditionally independent of one of them, e.g., $P(X_1|X_2X_3) = P(X_1|X_2)$. The former dependence is again captured by the mutual information $I(X_1: X_2X_3)$, while the so-called conditional mutual information $I(X_1: X_3|X_2) = H(X_1|X_2) - H(X_1|X_2X_3)$ considers the latter. It is thus natural to define the *tripartite dependence* as the situation where any variable depends on all the other variables. This can be quantified as the worst case conditional mutual information + +$$D_3 \equiv \min[I(X_1 : X_2 | X_3), I(X_1 : X_3 | X_2), \\ I(X_2 : X_3 | X_1)]. \quad (1)$$ +---PAGE_BREAK--- + +Due to strong subadditivity the conditional mutual in- +formation is non-negative and hence $D_3 \ge 0$ [18]. $D_3$ +vanishes if and only if there exists a variable such that +already a subset of the remaining parties can gain the +maximally accessible information about the variable in +question. Note that this condition is also satisfied if a +variable is not correlated with the rest of the system at +all. + +The value of $\mathcal{D}_3$ can be interpreted using an alternative expression for conditional mutual information, e.g., $I(X_1: X_3|X_2) = I(X_1: X_2X_3) - I(X_1: X_2)$. Reformulating now (1), one recognizes that $\mathcal{D}_3$ expresses the gain in information about the first subsystem that the second party has from cooperating with the third party. Accordingly, nonzero $\mathcal{D}_3$ ensures that any two parties always gain through cooperation when accessing the knowledge about the remaining subsystem. The minimal gain over the choice of parties is an alternative way to compute $\mathcal{D}_3$. + +In the context of quantum subsystems we can +rewrite the conditional mutual information as $I(X_1 : X_3|X_2) = S(X_1|X_2) + S(X_3|X_2) - S(X_1X_3|X_2)$, where +e.g. $S(X_1|X_2)$ is the conditional entropy based on the +von Neumann entropy $S(\cdot)$. Since $S(X_1|X_2)$ is the entan- +glement cost of merging a state $X_1$ with $X_2$, see Ref. [19], +we can interpret the conditional mutual information as +the extra cost of merging states one by one ($X_1$ with $X_2$ +and $X_3$ with $X_2$) instead of altogether ($X_1X_3$ with $X_2$). +$\mathcal{D}_3$ is the minimum extra cost of this merging. + +*Secret sharing.*—An example of an intuitive applica- +tion of $\mathcal{D}_3$ is (quantum) secret sharing [20–23]. In the +tripartite setting, secret sharing requires collaboration of +two parties in order to read out the secret of the remain- +ing party. In the classical version of this problem the se- +cret is a random variable, e.g., the measurement outcome +of, say, the first observer. It is thus required that both, +the second as well as the third party alone has only little +or no information about the secret, i.e., $I(X_1: X_2)$ and +$I(X_1: X_3)$ are small, while both of them together can +reveal the result of the first observer, i.e., $I(X_1: X_2X_3)$ +is large or unity. It is clear that the value of $\mathcal{D}_3$ (close to +its maximum) yields a measure for the working of secret +sharing. Furthermore, due to the minimization in (1), the +secret can be generated at any party. Below we derive +the classical distributions with large $\mathcal{D}_3$ as well as quan- +tum states which achieve maximal dependence. Quite +surprisingly these are mixed states belonging to the class +of so-called k-uniform states [24]. It turns out that these +states have perfect correlations along complementary lo- +cal measurements and therefore, by following the proto- +col in [22], the quantum solution to the secret sharing +problem offers additionally security against eavesdrop- +ping. In Appendix E we show that these states enable +perfect sharing of a quantum secret (unknown quantum +state) and that the value of dependence provides a lower +bound on the quality of quantum secret sharing for a class +of states. See Ref. [25] for an example of secret sharing +with a class of pure k-uniform states. + +*Correlations and dependence.*—Before we generalize to + +an arbitrary number of parties and present the properties +of the resulting $\mathcal{D}_N$, let us give a simple example that il- +lustrates the difference between multipartite correlations +and multipartite dependence. Consider three classical bi- +nary random variables described by the joint probability +distribution $P(000) = P(111) = \frac{1}{2}$. All three variables +are clearly correlated as confirmed, e.g., by quantifiers +introduced in Refs. [12, 13]. However, the knowledge of, +say, the first party about the third party does not in- +crease if the first observer is allowed to cooperate with +the second one. By examining her data, the first ob- +server knows the variables of both remaining parties and +any cooperation with one of them does not change this. +There is no information gain and hence this distribution +has vanishing tripartite dependence. + +On the other hand, let us consider the joint proba- +bility distribution with $P(000) = P(011) = P(101) =$ +$P(110) = \frac{1}{4}$, which can describe also a classical system. +Any two variables in this distribution are completely un- +correlated, but any two parties can perfectly decode the +value of the remaining variable. Hence the gain from co- +operation is 1 and so is the value of $\mathcal{D}_3$. This quantifier is +thus very good for identifying the suitability of a system +for secret sharing, where the secret could be at any party. + +*Larger systems.*—Moving on to more complex systems, +we note that there are more conditions to be considered +already in order to define the four-partite dependence. +In analogy to the tripartite case the first condition is +to require that cooperation of any triple of parties pro- +vides more information about the remaining subsystem, +e.g., $I(X_1: X_2X_3X_4) - I(X_1: X_2X_3)$ must be positive. +But one should also impose that cooperation between +any pair brings information gain about the two remain- +ing variables, e.g., $I(X_1X_2: X_3X_4) - I(X_1X_2: X_3)$ must +be positive. The former condition demands a positive +conditional mutual information, $I(X_1: X_4|X_2X_3) > 0$, +while the latter one requires $I(X_1X_2: X_4|X_3) > 0$. In +order to compute $\mathcal{D}_4$ one takes the minimum of these +two conditional mutual informations over all permuta- +tions of subsystems. Note, however, that, e.g., $I(X_1X_2: +X_4|X_3) \ge I(X_1: X_4|X_2X_3)$ and therefore it is sufficient +to minimize over the conditional mutual information be- +tween two variables only. We emphasize that this step +simplifies the computation significantly. The same argu- +ment applies for arbitrary $N$ and leads to the definition +of $N$-partite dependence + +$$ +\mathcal{D}_N \equiv \min_{\text{perm}} I(X_1 : X_2 | X_3 \dots X_N), \quad (2) +$$ + +where the minimum is taken over all permutations of the +subsystems. In the case of a quantum system in state ρ +we obtain + +$$ +\mathcal{D}_N(\rho) = \min_{j,k} [S(\operatorname{Tr}_j \rho) + S(\operatorname{Tr}_k \rho) - S(\operatorname{Tr}_{jk} \rho) - S(\rho)], \quad (3) +$$ + +where *j*, *k* = 1...*N* and *j* ≠ *k*. Tr*j*ρ denotes a partial trace over the subsystem *j*. In general, calculating the N-partite dependence requires computation and comparison +---PAGE_BREAK--- + +of $\binom{N}{2}$ values, i.e., scales polynomially as $N^2$, whereas for +permutationally invariant systems it is straightforward. + +One may also like to study *k*-partite dependencies +within an *N*-partite system. To this aim we propose to +apply the definitions above to any *k*-partite subsystem +and take the minimum over the resulting values. + +### III. PROPERTIES + +The maximal *N*-partite dependence over classical dis- +tributions of *d*-valued variables is given by 1 (recall that +our logarithms are base *d*) and follows from the fact that +classical mutual information cannot exceed the entropy +of each variable. On the other hand, quantum mutual in- +formation is bounded by 2 and this is the bound on $\mathcal{D}_N$ +optimized over quantum states (see Appendix D). This +bound is achieved by mixed states belonging to the class +of *k*-uniform states, in particular for $k = N - 1$ [24]. In +the case of *N* qubits (for *N* even) the optimal states have +the following form + +$$ \rho_{\max} = \frac{1}{2^N} \left( \sigma_0^{\otimes N} + (-1)^{N/2} \sum_{j=1}^{3} \sigma_j^{\otimes N} \right), \quad (4) $$ + +where $\sigma_j$ are the Pauli matrices and $\sigma_0$ denotes the $2 \times 2$ +identity matrix. Note that $\rho_{\max}$ is permutationally in- +variant and gives rise to perfect correlations or anti- +correlations when all observers measure locally the same +Pauli observable. These states are known as the general- +ized bound entangled Smolin states [26, 27]. They are a +useful quantum resource for multiparty communication +schemes [28] and were experimentally demonstrated in +Refs. [29–34]. Per definition for (N − 1)-uniform states +all reduced density matrices are maximally mixed, with +vanishing mutual information, whereas the whole system +is correlated. In Appendix D we provide examples of +states which maximize $\mathcal{D}_N$ for arbitrary $d$ and show in +general that the only states achieving the maximal quan- +tum value of 2 are (N − 1)-uniform. + +Let us also offer an intuition for values of $\mathcal{D}_N$ above +the classical bound of one. As shown in Appendix G +this can only happen for mixed quantum states. One +could then consider an auxiliary system which purifies +the mixed state. High values of $\mathcal{D}_N$ correspond to learn- +ing simultaneously the variables of the subsystems and +the auxiliary system. Note that making this statement +mathematically precise may be difficult as the problem +is equivalent to the interpretation of negative values of +conditional entropy [19, 35, 36]. + +As we have already emphasized, multipartite depen- +dence is different from multipartite correlations. Nev- +ertheless, it does share a number of properties that are +expected from measures of genuine multipartite correla- +tions. Any such quantifier should satisfy a set of postu- +lates put forward in Refs. [11, 13]. We now show that +most of them also hold for $\mathcal{D}_N$ and we precisely charac- +terize the deviation from one of the postulates. In Ap- + +pendices A-C we prove the following properties of the +dependence: + +(i) If $\mathcal{D}_N = 0$ and one adds a party in a product state +then the resulting $(N+1)$-party state has $\mathcal{D}_N = 0$. + +(ii) If $\mathcal{D}_N = 0$ and one subsystem is split with two of its parts placed in different laboratories then the resulting $(N+1)$-party state has $\mathcal{D}_{N+1} = 0$. + +(iii) $\mathcal{D}_N$ can increase under local operations. Let us denote with the bar the quantities computed after local operations. We have the following inequality: + +$$ \bar{\mathcal{D}}_N \le \mathcal{D}_N + I(X_1 X_2 : X_3 \dots X_N) - I(X_1 X_2 : \bar{X}_3 \dots \bar{X}_N), \quad (5) $$ + +where systems $X_1$ and $X_2$ are the ones minimizing +$\mathcal{D}_N$, i.e., before the operations were applied. + +The properties (i) and (ii) hold for all quantifiers of +multipartite correlations. It is expected that measures +of multipartite correlations are also monotonic under lo- +cal operations (though note that often this condition is +relaxed in practice, see e.g. quantum discord). In the +present case, the monotonicity property does not hold in +general for $\mathcal{D}_N$, however, property (iii) puts a bound on +its maximal violation. Moreover, it has a clear interpreta- +tion: local operations that uncorrelate a given subsystem +from the others may lead to information gain when the +less correlated party cooperates with other parties. + +Let us explain this more quantitatively for the condi- +tional mutual information between variables $X_1$ and $X_2$. +While it is well-known that this quantity is monotonic +under local operations on subsystems not in the condi- +tion [37], we prove in Appendix C that the following in- +equality is satisfied under local operations on arbitrary +subsystem (being the origin of property (iii)): + +$$ I(\overline{X}_1 : \overline{X}_2 | \overline{X}_3 \dots \overline{X}_N) \le I(X_1 : X_2 | X_3 \dots X_N) + I(X_1 X_2 : X_3 \dots X_N) - I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N). \quad (6) $$ + +The second line is non-negative due to the data process- +ing inequality and it quantifies how much the local opera- +tions have uncorrelated the variables $X_3 \dots X_N$ from the +variables $X_1 X_2$. This sets the upper bound to the lack +of monotonicity of the conditional mutual information. + +## IV. EXAMPLES + +Multipartite dependence can be computed for both +classical and quantum systems and is a generic quan- +tifier of information gain from cooperation that can be +used across science. Here we discuss a few exemplary +calculations and applications of $\mathcal{D}_N$ in quantum infor- +mation. +---PAGE_BREAK--- + +*Pure states.*—First of all, for pure quantum states $|\Psi\rangle$, the dependence can be further simplified as + +$$ +\begin{align} +\mathcal{D}_N(|\Psi\rangle) &= \min_{i,j} [S(\operatorname{Tr}_i |\Psi\rangle\langle\Psi|) \nonumber \\ +&\quad + S(\operatorname{Tr}_j |\Psi\rangle\langle\Psi|) - S(\operatorname{Tr}_{ij} |\Psi\rangle\langle\Psi|)] \nonumber \\ +&= \min_{i,j} [S(\rho_i) + S(\rho_j) - S(\rho_{ij})], \tag{7} +\end{align} +$$ + +where $\rho_i$ is the state of the system after removing all but the $i$-th particle, i.e., $\mathcal{D}_N(|\Psi\rangle)$ is given by the smallest quantum mutual information in two-partite subsystems. Here, we made use of the fact that both subsystems of a pure state have the same entropy: $S(\operatorname{Tr}_i\rho) = S(\rho_i)$ for $\rho = |\Psi\rangle\langle\Psi|$. In Appendix G we prove the following upper bound on $\mathcal{D}_N$ for pure states + +$$ \mathcal{D}_N(|\Psi\rangle) \le 1. \tag{8} $$ + +It is a consequence of the trade-off relation between the quantum mutual information for different two-particle subsystems of a pure global state and the definition of $\mathcal{D}_N$ where the smallest conditional mutual information is chosen. In particular, the bound is achieved by N-qubit GHZ state $\frac{1}{\sqrt{d}}(|0\dots0\rangle + \dots + |d-1\dots d-1\rangle)$. Additionally, the quantum mutual information is bounded by 1 whenever the state $\rho_{ij}$ is separable [38]. A comprehensive list of dependencies within standard classes of quantum states is given in Tab. I. The analytical formula for the N-qubit Dicke states with $e$ excitations, $|D_N^e\rangle$, is presented in Appendix F. In short, if one fixes $e$ and takes the limit $N \to \infty$, the dependence $\mathcal{D}_N$ vanishes. For $e$ being a function of $N$, e.g., $e = N/2$, the dependence $\mathcal{D}_N$ tends to $1/2$. + +*Entanglement without dependence.*—An intriguing question in the theory of multipartite entanglement is whether entanglement can exist without classical multipartite correlations [10]. The examples of N-party entangled states with vanishing N-party classical correlations are known in the literature [39–43], though the corresponding notions of classical correlations do not satisfy all the postulates of Refs. [11, 13]. Here we ask whether there are genuinely multipartite entangled states with no multipartite dependence and whether multipartite dependence can exist without multipartite correlations and vice versa. It turns out that all of those combinations are possible. There exist even pure genuinely multipartite entangled states without multipartite dependence. Consider any N-qubit cluster state (including linear, ring, 2D, etc.) for $N \ge 4$. It was shown in Ref. [44] that all single-particle subsystems are completely mixed and there exists at least one pair of subsystems in the bipartite completely mixed state. The corresponding entropies are equal to $S(\rho_i) = 1$ and $S(\rho_{ij}) = 2$, and lead to $\mathcal{D}_N = 0$, due to Eq. (7). Therefore, the information about a particular subsystem cannot be increased when other subsystems are brought together which explains the impossibility of the corresponding secret sharing task [45–47]. Note that there exist other subsets of observers who can successfully run secret sharing using a cluster + +
NstateD3D4D5D6
3{Psame}0---
3{Peven}1---
3GHZ1---
3D310.9183---
3ρnc0.5033---
4GHZ01--
4D410.37740.62256--
4D420.50330.7484--
4L410--
43-uniform20--
5GHZ001-
5D510.24900.24900.4729-
5D520.32450.32450.6464-
5L5000-
5R5110-
5AME(5,2)110-
6GHZ0001
6D610.18660.16340.18660.3818
6D620.25660.19610.25660.5637
6D630.27290.19610.27290.6291
6L60000
6R60000
6AME(6,2)0200
65-uniform0002
+ +TABLE I. Values of the dependence for several quantum states and probability distributions. {$P_{\text{same}}$} stands for $P(000) = P(111) = \frac{1}{2}$ and {$P_{\text{even}}$} for $P(000) = P(110) = P(101) = P(011) = \frac{1}{4}$. $D_N^k$ denotes the N-partite Dicke states with $k$ excitations $\sim |1...10...0\rangle + ... + |0...01...1\rangle$, with $k$ ones, $\rho_{nc}$ denotes the genuinely multipartite entangled state without multipartite correlations [10], the GHZ state is described in the text, $L_4$ stands for the linear cluster of four qubits and $\Psi_4$ is discussed in [48]. k-uniform states are states where all k-partite marginals are maximally mixed, whereas AME(n,d), so-called absolutely maximally entangled states, refers to $[n/2]$-uniform states of d dimensions [25]. + +This state also illustrates nicely that full correlations can exist without multipartite dependence. Conversely, the state $\rho_{nc} = \frac{1}{2}|D_N^c\rangle\langle D_N^c| + \frac{1}{2}|D_N^{N-1}\rangle\langle D_N^{N-1}|$ has the property of being N-partite entangled without N-partite correlation functions [10], yet its $\mathcal{D}_N$ is finite. This again shows that multipartite dependence is distinct from multipartite correlations and captures other properties of genuinely multi-partite entangled systems. + +*Increasing *D* with local operations.*—We now give an analytical example where $\mathcal{D}_3$ increases under local operation on the system in the condition. Consider the following classical state + +$$ \rho = \frac{1}{2} |000\rangle\langle000| + \frac{1}{8} |101\rangle\langle101| + \frac{1}{8} |110\rangle\langle110| + \frac{1}{4} |111\rangle\langle111|. \tag{9} $$ +---PAGE_BREAK--- + +
N stateD3D4D5D6
3D31 0.87 (0.92)---
3ρnc 0.45 (0.50)---
4GHZ 0.06 (0.00)0.95 (1.00)--
4D42 0.42 (0.50)0.67 (0.75)--
4L4 0.90 (1.00)0.09 (0.00)--
4Ψ4 0.33 (0.42)0.39 (0.42)--
5ρnc 0.25 (0.17)0.16 (0.65)0.171 (0.47)-
6D63 0.21 (0.27)0.13 (0.20)0.14 (0.27)0.21 (0.63)
+ +TABLE II. Illustrative values of dependence for several experimental quantum states. In brackets we give theoretical predictions for ideal states. + +One verifies that its 3-dependence equals $D_3(\rho) = I(X_2 : X_3|X_1) = 0.06$, i.e., conditioning on $X_1$ gives the smallest conditional mutual information. The application of an amplitude-damping channel with Kraus operators + +$$K_0 = \begin{pmatrix} 0 & 1/\sqrt{2} \\ 0 & 0 \end{pmatrix}, \quad K_1 = \begin{pmatrix} 1 & 0 \\ 0 & 1/\sqrt{2} \end{pmatrix}, \quad (10)$$ + +on subsystem $X_1$ produces the state $\bar{\rho}$, for which one computes $D_3(\bar{\rho}) = I(\bar{X}_1 : X_2|\bar{X}_3) = I(\bar{X}_1 : X_3|\bar{X}_2) = 0.19$. Note the change in the conditioned system minimizing the dependence. The local operation on $X_1$ has increased the information $I(X_2 : X_3|\bar{X}_1)$ above the other two conditional mutual informations. + +*Experimental states.*—Finally, we move to multipartite dependence in quantum optics experiments. Table II gathers quantum states prepared with photonic qubits in Refs. [40, 49–53]. The dependencies were extracted from experimental density matrices obtained via state tomography using the evaluation described in Ref. [54]. We have chosen to present the states illustrating the properties discussed above. + +The experimental data is in good agreement with the theoretical calculations. Deviations for the six qubit state $D_6^3$ result from reduced fidelities due to contributions of higher order noise in the state preparation. The same applies to the five qubit state $\rho_{nc}$ derived from $D_6^3$. Indeed, the states denoted as $\rho_{nc}$, which have vanishing correlation functions between all $N$ observers [40], clearly show a non-vanishing value for $D_N$. Hence, these states are examples for “entanglement without correlations” and “dependence without correlations”. Similarly, the experimental data of the linear cluster state $L_4$ indicates “entanglement without dependence” and “correlations without dependence”. In the experiment, the GHZ state $\sim |0000\rangle + |1111\rangle$ achieves the highest dependence of all considered states and is close to the theoretical dependence $D_4 = 1$, which is maximal over all pure states. The small value of $D_3$ for the four-partite GHZ state reflects its property of having vanishing dependence for all tripartite classically correlated subsystems. + +## V. CONCLUSIONS + +We have introduced a quantity, the multipartite dependence, in order to determine whether and by what amount cooperation between any subsystems brings additional information about the remaining subsystems. It is expected that this tool, which can be used in classical as well as in quantum domains, will be of broad relevance as it is directly calculable and has a clear interpretation. Furthermore, it offers an alternative to the characterization of multipartite properties via multipartite correlations. + +## ACKNOWLEDGMENTS + +We thank Krzysztof Szczygielski for valuable discussions. The work is supported by DFG (Germany) and NCN (Poland) within the joint funding initiative “Beethoven2” (2016/23/G/ST2/04273, 381445721), by the Singapore Ministry of Education Academic Research Fund Tier 2 Project No. MOE2015-T2-2-034, and by Polish National Agency for Academic Exchange NAWA Project No. PPN/PPO/2018/1/00007/U/00001. W.L. and R.G. acknowledge partial support by the Foundation for Polish Science (IRAP project, ICTQT, Contract No. 2018/MAB/5, cofinanced by EU via Smart Growth Operational Programme). JD and LK acknowledge support from the PhD programs IMPRS-QST and ExQM, respectively. JDMA is funded by the Deutsche Forschungs­gemeinschaft (DFG, German Research Foundation) under Germany’s Excellence Strategy - EXC-2111 - 390814868. + +## Appendix A: Proof of property (i) + +If $D_N = 0$ and one adds a party in a product state then the resulting $(N+1)$-partite state has $D_N = 0$. + +*Proof.* Per definition, we are minimizing the conditional mutual information over all $N$-partite subsystems of the total $(N+1)$-party state. If one takes the $N$-partite subsystem that excludes the added party, by assumptions $D_N = 0$. □ + +In other words, if the cooperation of $N-1$ parties within the $N$-partite system does not help in gaining additional knowledge about any other remaining party, then the cooperation with any additional independent system will not help either. + +## Appendix B: Proof of property (ii) + +If $D_N = 0$ and one subsystem is split with two of its parts placed in different laboratories then the resulting $(N+1)$-party state has $D_{N+1} = 0$. +---PAGE_BREAK--- + +*Proof.* Without loss of generality and in order to simplify notation let us consider an initially tri-partite system where the third party is in possession of two variables labeled $X_3$ and $X_4$. The splitting operation places these variables in separate laboratories producing a four-partite system. By assumption $\mathcal{D}_3 = 0$, but this does not specify which conditional mutual information in Eq. (1) vanishes. If this is the mutual information where the variables $X_3$ and $X_4$ of the third party enter in the condition, then this mutual information is also minimizing $\mathcal{D}_4$, and hence the latter vanishes. The second possibility is that the variables of the third party enter outside the condition, e.g., the vanishing conditional mutual information could be $I(X_1 : X_3X_4|X_2)$. From the chain rule for mutual information, $0 = I(X_1 : X_3X_4|X_2) \ge I(X_1 : X_4|X_2X_3)$. Finally, from strong subadditivity follows $\mathcal{D}_4 = 0$. In the N-partite case one writes more variables in the conditions and follows the same steps. $\square$ + +## Appendix C: Proof of property (iii) + +Consider a state $\rho$ that is processed by general local operations (CPTP maps) to a state $\bar{\rho}$. The following upper bound on the multipartite dependence after local operations holds: + +$$ \overline{\mathcal{D}}_{\mathcal{N}} \leq \mathcal{D}_{\mathcal{N}} + I(X_1 X_2 : X_3 \dots X_N) \\ -I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N), \quad (\text{C1}) $$ + +where systems $X_1$ and $X_2$ are the ones minimizing $\mathcal{D}_N$, i.e., before the operations were applied. + +Let us begin with a lemma characterizing the lack of monotonicity of conditional mutual information under local operations. + +**Lemma 1.** *The following inequality holds:* + +$$ I(\overline{X}_1 : \overline{X}_2 | \overline{X}_3 \dots \overline{X}_N) \le I(X_1 : X_2 | X_3 \dots X_N) + I(X_1 X_2 : X_3 \dots X_N) - I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N), (\text{C2}) $$ + +where bars denote subsystems transformed by arbitrary local CPTP maps. + +*Proof.* The conditional mutual information is already known to be monotonic under operations on systems not in the condition [37]: + +$$ I(\overline{X}_1 : \overline{X}_2 | \overline{X}_3 \dots \overline{X}_N) \le I(X_1 : X_2 | \overline{X}_3 \dots \overline{X}_N) (\text{C3}) $$ + +Now we continue as follows: + +$$ +\begin{align*} +& I(X_1 : X_2 | \overline{X}_3 \dots \overline{X}_N) + I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N) \\ +&= I(X_1 : X_2 \overline{X}_3 \dots \overline{X}_N) + I(X_2 : X_1 \overline{X}_3 \dots \overline{X}_N) - I(X_1 : X_2) \\ +&\le I(X_1 : X_2 X_3 \dots X_N) + I(X_2 : X_1 X_3 \dots X_N) - I(X_1 : X_2) \\ +&= I(X_1 : X_2 | X_3 \dots X_N) + I(X_1 X_2 : X_3 \dots X_N), +\end{align*} +$$ + +where the first equation is obtained by manipulating entropies such that the mutual informations containing barred subsystems come with positive sign, next we used the data processing inequality and in the last step we + +reversed the manipulations on entropies. This completes the proof of the lemma. $\square$ + +To complete the proof of property (iii) we write + +$$ +\begin{align*} +\mathcal{D}_N &= I(X_1 : X_2 | X_3 \dots X_N) \\ +&\geq I(\overline{X}_1 : \overline{X}_2 | \overline{X}_3 \dots \overline{X}_N) - I(X_1 X_2 : X_3 \dots X_N) \\ +&\phantom{\geq} + I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N) \\ +&\geq \overline{\mathcal{D}}_N - I(X_1 X_2 : X_3 \dots X_N) + I(X_1 X_2 : \overline{X}_3 \dots \overline{X}_N), +\end{align*} +$$ + +where in the first line we denote the subsystems such that the conditional mutual information $I(X_1 : X_2|X_3\dots X_N)$ achieves minimum in $\mathcal{D}_N$. Next, the first inequality follows from Lemma 1, and the second inequality from the fact that $I(\overline{X}_1 : \overline{X}_2|\overline{X}_3\dots\overline{X}_N)$ may not be the one minimizing $\overline{\mathcal{D}}_N$. + +## Appendix D: Quantum qudit states maximizing $\mathcal{D}_N$ + +Let us consider a quantum state of $N$ qudits, for $N$ being a multiple of $d$ and $N \ge 3$, defined as the common eigenstate of the generators + +$$ G_{1}^{(d)} = \bigotimes_{i=1}^{N} X^{(d)}, \quad G_{2}^{(d)} = \bigotimes_{i=1}^{N} Z^{(d)}, \quad (\text{D1}) $$ + +composed of $d$-dimensional Weyl-Heisenberg matrices +$$ X^{(d)} = \sum_{j=0}^{d-1} |j\rangle\langle j+1|, \quad \text{and} \quad Z^{(d)} = \sum_{j=0}^{d-1} \omega^j |j\rangle\langle j|, $$ +with $\omega = e^{i2\pi/d}$. The explicit form of the state can be calculated in the following way: + +$$ \rho_N^{(d)} = \frac{1}{d^N} \sum_{i,j=0}^{d-1} (G_1^{(d)})^i (G_2^{(d)})^j. \quad (\text{D2}) $$ + +The state (D2) belongs to the class of k-uniform mixed states defined in [24], with $k=N-1$. + +It is known that for $N$ even the state $\rho_N^{(d)}$ has $d^{N-2}$ eigenvalues equal to $\frac{1}{d^{N-2}}$, so the entropy $S(\rho_N^{(d)})$ is equal to + +$$ S(\rho_N^{(d)}) = N - 2. \quad (\text{D3}) $$ + +Since the state is $(N-1)$-uniform, all reduced density matrices are proportional to identity matrices giving + +$$ S(\operatorname{Tr}_i \rho_N^{(d)}) = N - 1, \quad (\text{D4}) $$ + +$$ S(\operatorname{Tr}_{i,j} \rho_N^{(d)}) = N - 2. \quad (\text{D5}) $$ + +Therefore, for $N$ even + +$$ +\begin{align} +\mathcal{D}_N(\rho_N^{(d)}) &= S(\operatorname{Tr}_i \rho_N^{(d)}) + S(\operatorname{Tr}_j \rho_N^{(d)}) && (\text{D6}) \\ +&\quad -S(\operatorname{Tr}_{i,j} \rho_N^{(d)}) - S(\rho_N^{(d)}) = 2. +\end{align} +$$ + +In the case of $N$ odd, however, the state $\rho_N^{(d)}$ has $d^{N-1}$ eigenvalues equal to $\frac{1}{d^{N-1}}$, and by analogous calculations we get + +$$ \mathcal{D}_N(\rho_N^{(d)}) = 1, \quad (\text{D7}) $$ +---PAGE_BREAK--- + +for $(N-1)$-uniform states. + +Now we show that the $(N-1)$-uniform states are the only ones that can achieve $\mathcal{D}_N = 2$. The requirement is + +$$ +\begin{aligned} +\mathcal{D}_N &= I(X_1 : X_2 | X_3 \dots X_N) \\ +&= I(X_1 : X_2 X_3 \dots X_N) - I(X_1 : X_3 \dots X_N) \\ +&= 2, +\end{aligned} +\quad (\text{D8}) +$$ + +where $X_i$ stands for individual subsystem. Since in the definition of $\mathcal{D}_N$ we minimize over all permutations, the same equation holds for all permutations of subsystems. Due to subadditivity, the only way to satisfy (D8) is + +$$ +\begin{aligned} +I(X_1 : X_3 \dots X_N) &= 0, && (\text{D9}) \\ +I(X_1 : X_2 X_3 \dots X_N) &= 2. && (\text{D10}) +\end{aligned} +$$ + +From the first equation we conclude that + +$$ \rho_{13 \dots N} = \rho_1 \otimes \rho_{3 \dots N}, \quad (\text{D11}) $$ + +which also holds for all permutation of indices. After tracing out all but the 1st and 3rd subsystem, we arrive at + +$$ \rho_{13} = \rho_1 \otimes \rho_3, \quad (\text{D12}) $$ + +which means that every pair of subsystems is described by a tensor product state. It follows that any $N-1$ particle subsystem is described by a simple tensor product, e.g., + +$$ \rho_{13 \dots N} = \rho_1 \otimes \rho_3 \otimes \dots \otimes \rho_N. \quad (\text{D13}) $$ + +Using (D10) we write + +$$ S(X_1) - S(X_1 | X_2 X_3 \dots X_N) = 2. \quad (\text{D14}) $$ + +Since for the quantum conditional entropy we have + +$$ -S(X_1|X_2X_3\dots X_N) \leq S(X_1), \quad (\text{D15}) $$ + +the bound is achieved if + +$$ +\begin{aligned} +&2 = S(X_1) - S(X_1 | X_2 X_3 \dots X_N) \\ +&\leq S(X_1) + S(X_1), +\end{aligned} +$$ + +i.e., for $S(X_1) = 1$. Hence, taking into account (D13), all $N-1$ particle subsystems are maximally mixed, i.e., the total state is $(N-1)$-uniform. + +## Appendix E: Quantum secret sharing + +After introducing the $(N-1)$-uniform states, which are maximizing the $N$-dependence, we now show that they naturally feature in the task of quantum secret sharing. + +Suppose Alice has a quantum state $\rho$, called the secret, that she wants to split into $n$ shares such that the secret is recoverable only when a party has all $n$ shares. A quantum secret sharing scheme [23] is a map $\mathcal{E}_n: A \to X^{\otimes n}$ such that, + +$$ C_Q(\operatorname{Tr}_k \circ \mathcal{E}_n) = 0 \quad (\text{E1}) $$ + +where $\operatorname{Tr}_k$ is the partial trace over an arbitrary set of subsystems and $C_Q(\Lambda)$ is the quantum capacity of the channel $\Lambda$. The rate of a secret sharing scheme is given by the quantum capacity of the channel $\mathcal{E}_n$. + +Consider that Alice prepares a quantum secret in the state $\rho = \frac{1}{2}(\sigma_0 + \sum_j s_j \sigma_j)$ of a single qubit, where $s_j$ are the components of the Bloch vector. Her encoding map has the $(N-1)$-uniform state as the Choi state [55], and one verifies that it leads to the outcome + +$$ +\begin{aligned} +\mathcal{E}_N(\rho) &= \frac{1}{2N} \left( \sigma_0^{\otimes N} \operatorname{Tr}\rho + (-1)^{N/2} \sum_{j=1}^3 \sigma_j^{\otimes N} \operatorname{Tr}(\sigma_j^T \rho) \right). +\end{aligned} +\quad (\text{E2}) +$$ + +Since for any $\rho$ we have $(\operatorname{Tr}_k \circ \mathcal{E}_N)(\rho) \propto 1$, it follows that $C_Q(\operatorname{Tr}_k \circ \mathcal{E}_N) = 0$, i.e., no subset of observers can recover the quantum secret. All of them, however, can recover it perfectly with the decoding map + +$$ +\begin{aligned} +\mathcal{D}_N(\rho_N) &= \frac{1}{2} \left( \sigma_0 + (-1)^{N/2} \sum_j \operatorname{Tr}(\sigma_j^{\otimes N} \rho_N) \sigma_j \right)^T, +\end{aligned} +\quad (\text{E3}) +$$ + +where $\rho_N = \mathcal{E}_N(\rho)$. + +We now show that any $(N+1)$-partite state $\rho_c$ with maximally mixed marginals and non-classical dependence $\mathcal{D}_{N+1}(\rho_c) > 1$ is useful for quantum secret sharing. Consider the encoding map $\mathcal{E}_c: A \to X^{\otimes N}$ with the Choi state given by $\rho_c$, i.e., $(\mathbb{1} \otimes \mathcal{E}_c)(|\Phi\rangle\langle\Phi|) = \rho_c$, where $|\Phi\rangle$ is the maximally entangled state. The rate of quantum secret sharing admits the lower bound + +$$ +\begin{align*} +R &= C_Q(\mathcal{E}_c) && (\text{E4a}) \\ + &\geq \sup_{\phi_{AN}} -S_{A|X_1\dots X_N}((\mathbb{1} \otimes \mathcal{E}_c)(\phi_{AN})) && (\text{E4b}) \\ + &\geq -S_{A|X_1\dots X_N}(\rho_c) && (\text{E4c}) \\ + &= I(A:X_1|X_2\dots X_N) - S(A|X_2\dots X_N) && (\text{E4d}) \\ + &\geq I(A:X_1|X_2\dots X_N) - 1 && (\text{E4e}) \\ + &\geq \mathcal{D}_{N+1}(\rho_c) - 1. && (\text{E4f}) +\end{align*} +$$ + +The steps are justified as follows. The first line follows from definition. Ineq. (E4b) is the result of computing the quantum capacity of a channel [56–61], (E4c) follows because the maximally entangled state is a particular choice of $\phi_{AN}$, and the Choi state of $\mathcal{E}_c$ is $\rho_c$. Eqs. (E4d) and (E4e) follow from the properties of entropy recalling that our logarithms are base $d$. Finally, the dependence is the worst case conditional mutual information. + +Since the marginals of $\rho_c$ are maximally mixed, the same holds for the encoded state $\rho_N = \mathcal{E}_c(\rho)$, i.e., no subset of parties can recover the quantum secret alone, yet for all of them together $R > 0$ holds for $\mathcal{D}_{N+1}(\rho_c) > 1$. + +## Appendix F: Dependence of Dicke states + +We now present an analytical formula for $\mathcal{D}_N^e$ in $N$-qubit Dicke states with $e$ excitations. For that state it is +---PAGE_BREAK--- + +given by + +$$ +\begin{equation} +\begin{aligned} +\mathcal{D}_N(D_N^e) = {}& (\begin{smallmatrix} N \\ e \end{smallmatrix})^{-1} \left[ - \frac{2(N-1)!\log\left(\frac{e}{N}\right)}{(e-1)!(N-e)!} \right. \\ +& \qquad \left. - 2\binom{N-1}{e} \log\left(1-\frac{e}{N}\right) + \binom{N-2}{e-2} \log\left(\frac{\binom{N-2}{e-1}}{\binom{N}{e}}\right) \right] \quad (\text{F1}) \\ +& + 2\binom{N-2}{e-1} \log\left(\frac{2\binom{N-2}{e-1}}{\binom{N}{e}}\right) + \binom{N-2}{e} \log\left(\frac{\binom{N-2}{e}}{\binom{N}{e}}\right). +\end{aligned} +\end{equation} +$$ + +This comes from the fact that for a general Dicke state with *e* excitations all one-partite reduced density matrices {$ρ_i$} have the two non-zero eigenvalues *e*/N and (N-*e*)/N, while all two-partite reduced states {$ρ_{ij}$} have the three non-vanishing eigenvalues *e*(e−1)/N(N−1), 2*e*(N−*e*)/N(N−1), and (N−*e*−1)(N−*e*)/N(N−1). For *e* as a function of the number of parties, *e* = N/*k*, in the limit of *N* → ∞, the N-dependence converges to a finite value, i.e., *D**N*(*D**N**e*) tends to 2(*k* − 1)/*k*2. The maximally achievable dependence of 1/2 is reached for *e* = N/2. For an arbitrarily chosen constant *e* (e.g., for the W state, *e* = 1), *D**N*(*D**N**e*) tends to 0 for *N* → ∞. + +These results allow to answer the following question: If $\mathcal{D}_N \le 1$, are there local measurements on the subsystems with classical outcomes having conditional mutual information equal to $\mathcal{D}_N$? The answer is negative. We have optimized the conditional informations over local measurements for Dicke states with $N=3,4$ and $0 < e < N$, and observed that the values obtained are always smaller than $\mathcal{D}_N$. + +## Appendix G: Bounds on mutual N-dependence + +a. Bound on mixed states + +$$ +S(\mathrm{Tr}_{j}\rho) \le S(\mathrm{Tr}_{ij}\rho) + S(\rho_{i}), \quad (\mathrm{G1}) +$$ + +$$ +S(\mathrm{Tr}_i\rho) - S(\rho_i) \le S(\rho), \quad (G2) +$$ + +where $ρ_i$ is the reduced state of the *i*-th particle. Using the above inequalities we write + +$$ +\begin{align*} +\mathcal{D}_N(\rho) &\le S(\mathrm{Tr}_i\rho) - S(\rho) + S(\mathrm{Tr}_j\rho) - S(\mathrm{Tr}_{ij}\rho) \\ +&\le S(\rho_i) + S(\rho_i) \\ +&\le 2. +\end{align*} +\tag{G3} +$$ + +b. Bounds on pure states + +Now we prove that for pure states we have $\mathcal{D}_N(\rho) \le 1$. Note that due to Eq. (7) from the main text we need to find the smallest mutual information $I(\rho_i : \rho_j)$, where $\rho_i$, $\rho_j$ are subsystems of the pure state $\rho$. Consider + +$$ +\begin{align} +I(\rho_i : \rho_j) + I(\rho_j : \rho_k) \tag{G4} \\ +&= S(\rho_i) + S(\rho_j) - S(\rho_{ij}) + S(\rho_j) + S(\rho_k) - S(\rho_{jk}) \nonumber \\ +&\le 2S(\rho_j) \nonumber \\ +&\le 2, \tag{G5} +\end{align} +$$ + +where the first inequality comes from the strong subadditivity of entropy + +$$ +S(\rho_i) + S(\rho_k) \leq S(\rho_{ij}) + S(\rho_{jk}). \quad (G6) +$$ + +The subadditivity of quantum entropy states that for the reduced quantum states we have + +Hence, this monogamy relation with respect to mutual information proves that there is always a bipartite subsystem with mutual information bounded by 1. + +[1] T. Gawne and B. Richmond, Journal of Neuroscience **13**, 2758 (1993). + +[2] I. Gat and N. Tishby, in Proceedings of the 1998 Conference on Advances in Neural Information Processing Systems (Ben, V. Vedral, and A. Winter, Phys. Rev. Lett. **101**, 070502 (2008). + +[3] E. Schneidman, W. Bialek, and M. J. Berry, Journal of Neuroscience **23**, 11539 (2003). + +[4] E. Schneidman, S. Still, M. J. Berry, and W. Bialek, Phys. Rev. Lett. **91**, 238701 (2003). + +[5] V. Varadan, I. Miller, David M., and D. Anastassiou, Bioinformatics **22**, e497 (2006). + +[6] D. Anastassiou, Molecular Systems Biology **3**, 83 (2007). + +[7] D. Trendafilov, D. Polani, and R. Murray-Smith, *2015 17th UKSim-AMSS International Conference on Modelling and Simulation (UKSim)*, , 361 (2015). + +[8] D. L. Zhou, B. Zeng, Z. Xu, and L. You, Phys. Rev. A **74**, 052110 (2006). + +[9] D. L. Zhou, Phys. Rev. Lett. **101**, 180505 (2008). + +[10] C. H. Bennett, A. Grudka, M. Horodecki, P. Horodecki, and R. Horodecki, Phys. Rev. A **83**, 012312 (2011). + +[11] G. L. Giorgi, B. Bellomo, F. Galve, and R. Zambrini, Phys. Rev. Lett. **107**, 190501 (2011). + +[12] D. Girolami, T. Tufarelli, and C. E. Susa, Phys. Rev. Lett. **119**, 140505 (2017). + +[13] I. Devetak and J. Yard, Phys. Rev. Lett. **100**, 230501 (2008). + +[14] F. G. S. L. Brandao, A. W. Harrow, J. Oppenheim, and S. Strelchuk, Phys. Rev. Lett. **115**, 050501 (2015). +---PAGE_BREAK--- + +[16] T. M. Cover and J. A. Thomas, *Elements of Information Theory* (Wiley-Interscience, 2006). + +[17] K. Modi, T. Paterek, W. Son, V. Vedral, and M. Williamson, Phys. Rev. Lett. **104**, 080501 (2010). + +[18] M. A. Nielsen and I. L. Chuang, *Quantum Computation and Quantum Information* (Cambridge University Press, 2000). + +[19] M. Horodecki, J. Oppenheim, and A. Winter, Nature **436**, 673 (2005). + +[20] A. Shamir, ACM **22**, 612 (1979). + +[21] G. R. Blakley, Proceedings of AFIPS'79 **48**, 313 (1979). + +[22] M. Hillery, V. Bužek, and A. Berthiaume, Phys. Rev. A **59**, 1829 (1999). + +[23] H. Imai, J. Müller-Quade, A. C. A. Nascimento, P. Tuyls, and A. Winter, Quantum Info. Comput. **5**, 69 (2005). + +[24] W. Klobus, A. Burchardt, A. Kolodziejski, M. Pandit, T. Vértesi, K. Życzkowski, and W. Laskowski, Phys. Rev. A **100**, 032112 (2019). + +[25] W. Helwig, W. Cui, J. I. Latorre, A. Riera, and H.-K. Lo, Phys. Rev. A **86**, 052335 (2012). + +[26] J. A. Smolin, Phys. Rev. A **63**, 032306 (2001). + +[27] R. Augusiak and P. Horodecki, Phys. Rev. A **73**, 012318 (2006). + +[28] R. Augusiak and P. Horodecki, Phys. Rev. A **74**, 010305R (2006). + +[29] E. Amselem and M. Bourennane, Nat. Phys. **5**, 748 (2009). + +[30] J. Lavoie, R. Kaltenbaek, M. Piani, and K. J. Resch, Nat. Phys. **6**, 827 (2010). + +[31] E. Amselem and M. Bourennane, Nat. Phys. **6**, 827 (2010). + +[32] J. Lavoie, R. Kaltenbaek, M. Piani, and K. J. Resch, Phys. Rev. Lett. **105**, 130501 (2010). + +[33] J. Barreiro, P. Schindler, O. Gühne, T. Monz, M. Chwalla, C. F. Roos, M. Hennrich, and R. Blatt, Nat. Phys. **6**, 943 (2010). + +[34] E. Amselem, M. Sadiq, and M. Bourennane, Sci. Rep. **3**, 1966 (2013). + +[35] L. del Rio, J. Aberg, R. Renner, O. Dahlsten, and V. Vedral, Nature **474**, 61 (2011). + +[36] T. K. Chuan, J. Maillard, K. Modi, T. Paterek, M. Paternostro, and M. Piani, Phys. Rev. Lett. **109**, 070501 (2012). + +[37] M. M. Wilde, J. Phys. A: Math. Theor. **51**, 374002 (2018). + +[38] R. Horodecki, P. Horodecki, M. Horodecki, and K. Horodecki, Rev. Mod. Phys. **81**, 865 (2009). + +[39] W. Laskowski, M. Markiewicz, T. Paterek, and M. Wieśniak, Phys. Rev. A **86**, 032105 (2012). + +[40] C. Schwemmer, L. Knips, M. C. Tran, A. de Rosier, W. Laskowski, T. Paterek, and H. Weinfurter, Phys. Rev. Lett. **114**, 180501 (2015). + +[41] S. Designolle, O. Giraud, and J. Martin, Phys. Rev. A **96**, 032322 (2017). + +[42] M. C. Tran, M. Zuppardo, A. de Rosier, L. Knips, W. Laskowski, T. Paterek, and H. Weinfurter, Phys. Rev. A **95**, 062331 (2017). + +[43] W. Klobus, W. Laskowski, T. Paterek, M. Wieśniak, and H. Weinfurter, Eur. Phys. J. D **73**, 29 (2019). + +[44] P. Hyllus, O. Gühne, and A. Smerzi, Phys. Rev. A **82**, 012337 (2010). + +[45] D. Markham and B. C. Sanders, Phys. Rev. A **78**, 042309 (2008). + +[46] A. Keet, B. Fortescue, D. Markham, and B. C. Sanders, Phys. Rev. A **82**, 062315 (2010). + +[47] D. Markham and B. C. Sanders, Phys. Rev. A **83**, 019901 (2010). + +[48] H. Weinfurter and M. Żukowski, Phys. Rev. A **64**, 010102 (2001). + +[49] N. Kiesel, C. Schmid, G. Toth, E. Solano, and H. Weinfurter, Phys. Rev. Lett. **98**, 063604 (2007). + +[50] G. Toth, W. Wieczorek, D. Gross, R. Krischek, C. Schwemmer, and H. Weinfurter, Phys. Rev. Lett. **105**, 250403 (2010). + +[51] R. Krischek, W. Wieczorek, A. Ozawa, N. Kiesel, P. Michelberger, T. Udem, and H. Weinfurter, Nat. Photonics **4**, 170 (2010). + +[52] R. Krischek, C. Schwemmer, W. Wieczorek, H. Weinfurter, P. Hyllus, L. Pezze, and A. Smerzi, Phys. Rev. Lett. **107**, 080504 (2011). + +[53] L. Knips, C. Schwemmer, N. Klein, M. Wieśniak, and H. Weinfurter, Phys. Rev. Lett. **117**, 210504 (2016). + +[54] L. Knips, C. Schwemmer, N. Klein, J. Reuter, G. Tóth, and H. Weinfurter, ArXiv e-prints (2015), arXiv:1512.06866 [quant-ph]. + +[55] M.-D. Choi, Linear Alg. Appl. **10**, 285 (1975). + +[56] B. Schumacher, Phys. Rev. A **54**, 2614 (1996). + +[57] B. Schumacher and M.A.Nielsen, +Phys. Rev. + +A **54**, 2629 (1996). + +[58] H. + +Barnum, + +M.A.Nielsen + +and B.Schumacher, +Phys.Rev.A**57**,4153(1998). + +[59] H. + +Barnum, + +E.Knill, + +and M.A.Nielsen, +IEEE Trans. +Info.Theor. +**46**, 1317 (2000). + +[60] S.Lloyd, +Phys.Rev.A +**55**, 1613 (1997). + +[61] I.Devetak, +IEEE Trans. +Info.Theor. +**51**, 44 (2005). \ No newline at end of file diff --git a/samples_new/texts_merged/3327355.md b/samples_new/texts_merged/3327355.md new file mode 100644 index 0000000000000000000000000000000000000000..4c85c42c9486644048349bd2bf886cf5ce80a180 --- /dev/null +++ b/samples_new/texts_merged/3327355.md @@ -0,0 +1,1296 @@ + +---PAGE_BREAK--- + +Inflation in AdS/CFT + +BEN FREIVOGEL$^{a,b}$, VERONIKA E. HUBENY$^{b,c}$, ALEXANDER MALONEY$^{a,d}$, +ROBERT C. MYERS$^{e,f}$, MUKUND RANGAMANI$^{b,c}$, AND STEPHEN SHENKER$^a$ + +$^a$ Department of Physics, Stanford University, Stanford, CA 94305, USA + +$^b$ Department of Physics & Theoretical Physics Group, LBNL, Berkeley, CA 94720, USA + +$^c$ Department of Mathematical Sciences, University of Durham, Durham DH1 3LE, UK + +$^d$ Stanford Linear Accelerator Center, Menlo Park, CA 94025, USA + +$^e$ Perimeter Institute for Theoretical Physics, Waterloo, Ontario, N2L 2Y5, Canada + +$^f$ Department of Physics, University of Waterloo, Waterloo, Ontario, N2L 3G1, Canada + +Abstract + +We study the AdS/CFT correspondence as a probe of inflation. We assume the existence of a string landscape containing at least one stable AdS vacuum and a (nearby) metastable de Sitter state. Standard arguments imply that the bulk physics in the vicinity of the AdS minimum is described by a boundary CFT. We argue that large enough bubbles of the dS phase, including those able to inflate, are described by mixed states in the CFT. Inflating degrees of freedom are traced over and do not appear explicitly in the boundary description. They nevertheless leave a distinct imprint on the mixed state. In the supergravity approximation, analytic continuation connects AdS/CFT correlators to dS/CFT correlators. This provides a framework for extracting further information as well. Our work also shows that no scattering process can create an inflating region, even by quantum tunneling, since a pure state can never evolve into a mixed state under unitary evolution. + +October 2005 + +freivogel@berkeley.edu, veronika.hubeny@durham.ac.uk, maloney@slac.stanford.edu, +rmyers@perimeterinstitute.ca, mukund.rangamani@durham.ac.uk, sshenker@stanford.edu +---PAGE_BREAK--- + +# Contents + +
1. Introduction1
2. Inflation in asymptotically AdS spacetimes4
2.1. Thin domain wall constructions5
2.2. Beyond the thin wall approximation13
2.3. A special parameter domain15
3. Properties of the boundary CFT16
3.1. The entropy puzzle17
3.2. Mixed states in asymptotically Schwarzschild-AdS geometries18
3.3. Conditions for the appearance of mixed states21
4. Probes of inflation in AdS/CFT26
4.1. Geodesics probes of domain wall spacetimes27
4.2. From AdS/CFT to dS/CFT and beyond30
5. Can inflation begin by tunneling?32
6. Discussion33
Appendix A. Details of the thin wall geometries34
A.1. Effective potential and extrinsic curvatures34
A.2. Thin wall trajectories35
Appendix B. False vacuum bubbles in scalar-gravity systems39
Appendix C. Construction allowing de Sitter $\mathcal{I}$ and $r_d < r_+$43
Appendix D. Computation of dS-SAdS Propagators46
Appendix E. A pure state description of spacetimes with causally disconnected regions?47
Appendix F. Analyticity in Coleman-de Luccia spacetimes50
+ +## 1. Introduction + +Our current understanding of the cosmological evolution of the universe relies on the existence of an early period of inflation¹. Recent data suggest that the universe is now undergoing another period of inflation. It is of central importance to understand this remarkable phenomenon as deeply as possible. + +String theory, which currently is the only viable candidate for a theory of quantum gravity, has had partial success in describing inflationary physics. In recent years there has been dramatic progress in constructing string vacua with stabilized moduli and positive and negative cosmological constants [4,5,6,7,8,9,10,11]. These lead to de Sitter and Anti-de Sitter (AdS) cosmologies, respectively. There are an enormous number of such vacua, populating what is now called the “string landscape” [12]. A small piece of the landscape, containing an AdS and a neighboring de Sitter vacuum, is sketched in Fig. 1. The richness of the landscape allows us to view the parameters of such a potential as essentially free parameters. + +¹ For reviews see [1,2,3]. +---PAGE_BREAK--- + +**Fig. 1:** A typical scalar potential appearing in string theory, with de Sitter and Anti-de Sitter minima. + +These constructions do not answer many of the deep questions raised by inflation: How can inflation begin? What measure should be used on the multiverse of eternal inflation? What is the holographic description of inflation? More generally, what degrees of freedom are appropriate for a complete description of quantum gravity in this domain? A substantial amount of work has been done on these topics [13,14,15,16,17,18,19]. We will not be able to answer these questions in this paper, but we will try to make some progress by embedding inflation in our best understood and most powerful framework for understanding quantum gravity, the AdS/CFT correspondence² [21,22,23,24]. + +Consider a stable supersymmetric AdS ground state, say the one indicated in Fig. 1. The bulk correlators taken to the AdS boundary define a conformal field theory (CFT), which encodes the bulk dynamics precisely. Given that small fluctuations around the AdS minimum are captured by the CFT, it seems plausible that classical configurations corresponding to the excursions to the neighboring de Sitter minimum should be encoded in the CFT somehow. For instance, correlators of the scalar field $\phi$ describing the horizontal axis of Fig. 1 should enable one to reconstruct the effective potential³. If we can construct a region of space where $\phi$ is displaced from the AdS minimum to the dS minimum⁴, the behavior of such a bubble of false vacuum might probe some inflationary physics. + +The fate of such bubbles has been investigated extensively while exploring the possibility of “creating a universe in a laboratory” [25,26,27]. We will discuss these results in more detail later, but for now we will just summarize the main points. Observed from the boundary of AdS (where the dual CFT is located), all such bubbles collapse into a black + +² The first work on this connection is [20]. + +³ Of course, a CFT that captures aspects of the landscape as a whole must be a complicated object indeed. + +⁴ The finite energy excitations described by the AdS/CFT correspondence require that $\phi$ approach the AdS minimum at the AdS boundary. +---PAGE_BREAK--- + +hole. If the bubble was large enough initially, an inflating region forms, but it is behind +the black hole horizon. At first glance this seems discouraging. The standard AdS/CFT +observables are only sensitive to physics outside the horizon. But in recent years tools +have been developed, based largely on analyticity, to examine physics behind the horizon +in AdS/CFT [28,29,30,31]. We will discuss here how these tools enable us to observe the +inflating region. + +A basic obstruction to making a universe in a laboratory classically was noted by +the authors of [26]. They argued using singularity theorems in general relativity that +an inflating region must classically always begin in a singularity. But AdS/CFT quite +comfortably describes geometries with both future and past singularities, like the eternal +Schwarzschild-AdS black hole [32,33]. So the observations of [26] should not prevent us +from studying inflation in AdS/CFT. + +A more general worry about representing inflation in AdS/CFT is that the boundary +CFT must encode a very large number of degrees of freedom describing the inflating +region. Specifically, the authors of [34,35] have pointed out that in certain situations the +dS entropy of the inflating region is larger than the entropy of the Schwarzschild-AdS black +hole. Notions of black hole complementarity and holography suggest that this would be +hard to accommodate. + +In our picture this puzzle is resolved in a simple way. We present arguments that the +geometries created by large bubbles of false vacuum must be represented as *mixed* states +in the boundary CFT. The large number of degrees of freedom in the region behind the +horizon are entangled with the degrees of freedom outside the horizon, as in the Hartle- +Hawking state representation of the eternal Schwarzschild-AdS black hole⁵ [36,32,33]. The +degrees of freedom behind the horizon are not explicitly represented; they are traced over. +They can be weakly entangled, though, so that even tracing over a large number of them +can yield a density matrix with entropy compatible with the black hole entropy. + +We do not expect the degrees of freedom behind the horizon to be fully represented by +a CFT, and do not know how to calculate the full density matrix beyond the supergravity +approximation. If we do know the density matrix, a large amount of information about the +degrees of freedom that have been traced over can be extracted, again by using analyticity. +As an example, in the eternal Schwarzschild-AdS black hole, boundary operators on the +right hand boundary can be moved to the left hand boundary by continuing in complex +time. In our situation, boundary operators on the right hand AdS boundary can be moved +by a suitable continuation in complex time to the de Sitter boundary at future (or past) +infinity. The resulting correlators living on the boundary of de Sitter have the form that + +⁵ As we review later, the eternal Schwarzschild-AdS black hole is described as a pure entangled state in the Hilbert space of two copies of the boundary CFT, each living on a separate AdS boundary. Tracing over one of these Hilbert spaces leads to a thermal density matrix in the other. +---PAGE_BREAK--- + +one would expect from the dS/CFT correspondence [19]. This conclusion holds classically. +Quantum mechanically the de Sitter regions decay and the behavior is richer, and more +mysterious. But if these correlators can be defined nonperturbatively it should be possible +to extract nonperturbative information about inflation from them. + +The paper is organized as follows. In Section 2 we review the construction of false vacuum bubble spacetimes in the thin wall approximation and discuss various aspects of these geometries. In Section 3 we describe the realization of these spacetimes in AdS/CFT. We consider the entropy puzzle and resolve it by presenting arguments showing that the CFT describing inflation must be in a mixed state. We also discuss the general question of which geometries are represented by mixed states. Section 4 deals with the signatures of inflating bubbles in the CFT correlation functions. We demonstrate that geodesic probes can sample the inflationary universe behind the horizon and describe the signatures that can be gleaned from this analysis. We discuss analytic continuation from the AdS to the de Sitter boundary. In Section 5 we revisit the idea of “creating” a universe in the laboratory from our perspective. We argue that because the CFT dual of an inflating region is a mixed state it cannot be produced in any scattering process, including quantum tunnelling, which is described by pure state evolution. This agrees with some previous work [37,38]. We end with a discussion in Section 6. Some calculations are collected in appendices. + +## 2. Inflation in asymptotically AdS spacetimes + +Recently, the landscape of string theory compactifications has been shown to include both Anti-de Sitter and de Sitter minima [4,5,6,7,8,9,10,11]. The theory includes domain walls interpolating between these states, so one might expect that there are asymptotically AdS spacetimes containing an inflating de Sitter region. For many classes of compactifications, the low energy theory is effectively described by gravity coupled to a scalar field in a potential, as in Fig. 1. This effective potential contains both positive and negative energy minima, with a domain wall given by field configurations interpolating between two vacua. Hence, by choosing carefully the initial profile of the scalar field and solving the equations of motion, one can obtain asymptotically AdS spaces with inflating regions in the low energy effective gravitational theory. + +Although this effective model is much simpler than the full string theory, it is nevertheless quite complicated: even solving for the exact spacetime metric requires messy numerical computations. So for much of this section we will work in the thin wall approximation, where we can write down the metric exactly. In this approximation we simply match two pieces of known spacetimes together across an infinitesimally thin ‘domain wall’, which obeys the appropriate junction conditions [39]. The simplest requirement for this approximation to be valid is that the width of the domain wall be less than the curvature +---PAGE_BREAK--- + +length scales in the geometry. This is easy to arrange. More refined requirements will be discussed below. For spherically symmetric spacetimes joined across a spherical shell, the full configuration is likewise spherically symmetric, and may be viewed as a bubble of one spacetime inside the other spacetime. Although the metric is continuous across the bubble wall, the extrinsic curvature is discontinuous because the shell carries some energy. Einstein's equations then reduce to an effective equation of motion for this shell. We merely have to solve this equation to determine the shell's trajectory, and patch together the spacetimes across the shell. + +Spherical symmetry enables us to draw two-dimensional Penrose diagrams which encode the full causal structure of the entire spacetime. In addition, knowing the metric exactly will allow us to study the behavior of the geodesics, Green's functions, etc., in these spacetimes, which will ultimately be of use in extracting information about this spacetime from its holographic dual. + +We will first consider thin domain wall constructions, before broadening our discussion to include the more realistic (scalar field) set-up towards the end. We begin by discussing what types of geometries are possible, specializing mainly to a bubble of de Sitter inside Schwarzschild-AdS. After explaining the construction and categorizing the various possible cases, we focus on time-symmetric situations. As we will see, having a piece of de Sitter infinity (denoted $I$) guarantees the existence of a de Sitter horizon; time symmetry then guarantees that its area is necessarily greater than that of the black hole horizon. As will be discussed in Section 3, this would seem to lead to an entropy paradox. There are also time asymmetric solutions with de Sitter $I$ – in this case the de Sitter horizon may be larger or smaller than the black hole horizon. + +We will then illustrate explicitly that one can achieve essentially the same desirable ingredients (namely de Sitter $I$ hidden behind a horizon) for a scalar field in a suitably chosen potential⁶. The latter is chosen by hand, but motivated by the string landscape; we discuss what are reasonable landscape parameters to expect. + +## 2.1. Thin domain wall constructions + +We start by reviewing the procedure of patching together geometries across a thin junction in general relativity [39]. This will allow us to construct classical solutions with both AdS and de Sitter regions (including $I$). For simplicity, we consider only spherically symmetric geometries in four dimensions – more general solutions are considered in Appendix A. + +⁶ However, as we will see, the nature of some parts of singularities and boundaries may be altered by instabilities. +---PAGE_BREAK--- + +We have a spherical shell, inside of which the metric is + +$$ds_i^2 = -f_i(r) dt_i^2 + \frac{dr^2}{f_i(r)} + r^2 d\Omega^2, \quad (2.1)$$ + +and outside of which the metric is + +$$ds_o^2 = -f_o(r) dt_o^2 + \frac{dr^2}{f_o(r)} + r^2 d\Omega^2 . \quad (2.2)$$ + +Note that having written the metrics in a static, spherically symmetric form, we must allow for the ‘time’ coordinates $t_\alpha$ ($\alpha = i, o$) to be different in each region, since this coordinate need not match across the shell. On the other hand, $r$ is a physically meaningful coordinate – it measures the proper size of the spheres of a spherically symmetric spacetime – and therefore has to vary continuously across the shell. Hence we can use the same coordinate $r$ both inside and outside the shell. + +The inside and outside geometries are patched together along a domain wall, with world-volume metric + +$$ds_{bubble}^2 = -d\tau^2 + R(\tau)^2 d\Omega^2 . \quad (2.3)$$ + +Here $R(\tau)$ denotes the proper size of the shell as a function of its proper time $\tau$; in each part of the spacetime its trajectory is given by $r = R(\tau)$. In the thin wall approximation we take the domain wall stress tensor to be delta function localized on the wall surface. The equation of motion of the shell, which determines $R(\tau)$, then follows from two matching conditions (for a review, see e.g. [25]). First, the metric must be continuous across the domain wall. Second, the jump in extrinsic curvature across the wall is related to the stress tensor of the bubble. This implies that + +$$\sqrt{\dot{R}^2 + f_i(R)} - \sqrt{\dot{R}^2 + f_o(R)} = \kappa R, \quad (2.4)$$ + +(with the sign of the radical determined by the extrinsic curvature – see below), where $\dot{R} \equiv \frac{dR}{d\tau}$. The parameter $\kappa = 4\pi G_N\sigma$ is related to the domain wall tension $\sigma$. By squaring (2.4) twice, we obtain the radial equation of motion of the shell, + +$$\dot{R}^2 + V_{\text{eff}}(R) = 0, \quad (2.5)$$ + +with the effective potential + +$$V_{\text{eff}}(r) = f_o(r) - \frac{(f_i(r) - f_o(r) - \kappa^2 r^2)^2}{4 \kappa^2 r^2}. \quad (2.6)$$ + +Equation (2.5) describes the one-dimensional motion of a point particle of zero energy in an effective potential (2.6). Many properties of the geometry can be read off directly from +---PAGE_BREAK--- + +the form of $V_{\text{eff}}(r)$. For example, if $V_{\text{eff}}(r) \to +\infty$ (or $V_{\text{eff}}(r) \to C > 0$) as $r \to \infty$, the shell cannot reach the boundary. + +We should note that the equation of motion (2.5) actually does not completely determine spacetime when, as in Schwarzschild-AdS or de Sitter, $r$ is not a global coordinate⁷. This is because the effective potential (2.6) was obtained by squaring the equation for junction conditions twice, so we have lost some sign information. In particular, (2.5) does not distinguish between different points with the same value of $r$. To fix this, we must take into account the extrinsic curvatures: + +$$ \beta_i = \frac{f_i(R) - f_o(R) + \kappa^2 R^2}{2 \kappa R}, \qquad \beta_o = \frac{f_i(R) - f_o(R) - \kappa^2 R^2}{2 \kappa R} . \tag{2.7} $$ + +Note that $\beta_\alpha = \pm\sqrt{\dot{R}^2 + f_\alpha(R)}$ automatically satisfy + +$$ \beta_i - \beta_o = \kappa R. \tag{2.8} $$ + +Physically, the extrinsic curvature is positive (negative) if the outward pointed normal points toward larger (smaller) $r$. Hence, for a given trajectory of the domain wall (as given by (2.5) and (2.6)), one can find the extrinsic curvatures $\beta_\alpha$, and thereby determine which types of bubble trajectories are compatible and which are inconsistent. This allows us to construct the appropriate Penrose diagram. + +We are interested in geometries describing a bubble of de Sitter in Schwarzschild-AdS. So the inner and outer metrics can be written in static coordinates as (2.1), (2.2) with + +$$ f_i(r) = 1 - \lambda r^2, \qquad f_o(r) = 1 + r^2 - \frac{\mu}{r}. \tag{2.9} $$ + +The three independent parameters $\lambda > 0$, $\mu > 0$ and $\kappa > 0$ are related to the de Sitter cosmological constant, the mass of the black hole and the tension of the shell⁸. We will work in units where the AdS radius is one. Then the relevant length scales are the AdS radius, the de Sitter radius $r_d = 1/\sqrt{\lambda}$, and the black hole horizon radius $r_+$. The horizon radius $r_+$ is defined by $f_o(r_+) = 0$, so for large $\mu$ we have $r_+ \sim \mu^{1/3}$. We will now discuss the domain wall solutions found in this case – a more general family of solutions is described in Appendix A. + +⁷ In spacetimes with horizons, such as de Sitter or Schwarzschild-AdS, the static coordinates of the type used in (2.1), (2.2) are not globally well defined. So two distinct points in the spacetime can have the same value of $r$, $t$ and $\Omega$. Typically one can distinguish such points either by passing to a good global coordinate chart, such as the Kruskal coordinates for Schwarzschild-AdS, or by a further specification of the imaginary part of the time coordinate, $\Im(t)$. + +⁸ More precisely, in terms of the actual cosmological constant $\Lambda$ and the ADM mass of the black hole $M$, $\lambda = \Lambda/3$ and $\mu = 2G_N M$. +---PAGE_BREAK--- + +**Fig. 2:** Possible types of trajectories in an effective potential (2.10), with $V_{\text{eff}}(r) \to -\infty$ both as $r \to 0$ and as $r \to \infty$. (a): $V_{\text{max}} > 0$, (b): $V_{\text{max}} < 0$, and (c): $V_{\text{max}} = 0$. Trajectories A, B, and D are time-symmetric (case D describes a static shell), while the others are not time-symmetric. + +Evaluating (2.6) for the specific case (2.9) gives the following effective potential: + +$$V_{\text{eff}}(r) = - \left[ \frac{(\lambda + \kappa^2 - 1)^2 + 4\lambda}{4\kappa^2} \right] r^2 + 1 + \mu \frac{(1 + \lambda - \kappa^2)}{2\kappa^2} \frac{1}{r} - \frac{\mu^2}{4\kappa^2} \frac{1}{r^4}. \quad (2.10)$$ + +The behavior of the shell can be read off from this effective potential. Both the $r^2$ and the $1/r^4$ coefficients are negative, so $V_{\text{eff}} \to -\infty$ at $r \to 0$ and $r \to \infty$ and the potential has a maximum $V_{\text{max}}$ at some value of $r$, say $r = r_0$. The possible domain wall trajectories $R(\tau)$ depend on the sign of $V_{\text{max}}$ (recall that the effective ‘energy’ is zero), as indicated in Fig. 2. If $V_{\text{max}} > 0$, as sketched in Fig. 2a, there are two possible types of time symmetric situations: the shell can expand from zero size and recollapse (case A), or it can contract from infinite size and re-expand (case B). On the other hand, if $V_{\text{max}} < 0$ as in Fig. 2b, then no time symmetric situation exists: the shell either expands (case C) or contracts (case C') on its semi-infinite trajectory. Finally, if $V_{\text{max}} = 0$ as in Fig. 2c, then we can consider a static shell sitting at $R(\tau) = r_0$ (case D). Such a case requires a certain fine-tuning of the parameters to obtain $V_{\text{max}} = 0$, as well as of the initial conditions: $R(\tau_0) = r_0$, $\frac{d}{d\tau}R(\tau_0) = 0$. If the latter is relaxed, the bubble may expand forever (case E) or collapse (case E'). Of course, the time reverse where the bubble slowly settles to $R(\tau \to \infty) = r_0$ is also possible. + +We can now write down the Penrose diagrams for these various cases. Note that to do so we must take into account the sign of the extrinsic curvature as mentioned above. The details of this extrinsic curvature analysis are contained in Appendix A – we will simply quote the answers here. We will take the spacetime inside the bubble to be on the left of the wall trajectory in the Penrose diagram, and the outside spacetime on the right. +---PAGE_BREAK--- + +**Fig. 3:** Sketches of Penrose diagrams for (a) de Sitter, (b) Schwarzschild-AdS, and (c) de Sitter/Schwarzschild-AdS domain wall spacetimes, with constant-$r$ surfaces indicated. The dashed vertical lines correspond to the points $r = 0$ in de Sitter, the dashed diagonal lines are horizons, the horizontal squiggly lines are the singularities, and the bold lines indicate the boundaries. The thick dotted lines indicate a possible trajectory of a shell across which the two spacetimes (a) and (b) may be patched together to obtain (c). + +The Penrose diagrams⁹ for de Sitter and Schwarzschild-AdS are given in Fig. 3a and Fig. 3b, along with constant-$r$ surfaces. In the de Sitter geometry, $r$ increases from 0 at the ‘origin’ (indicated by the dashed vertical lines in Fig. 3a), through the cosmological horizon $r = r_d$ (diagonal dashed lines), to $r = \infty$ at de Sitter $\mathcal{I}$ (bold horizontal lines). In Schwarzschild-AdS, on the other hand, $r = 0$ at the singularities (indicated by the horizontal squiggly lines in Fig. 3b), increases through the black hole horizon $r = r_+$ (diagonal dashed lines), and becomes infinite at the AdS boundary (bold vertical lines). A possible trajectory of the shell is further sketched on both spacetimes as a thick dotted curve. The corresponding junction spacetime is found by patching the two Penrose diagrams together along the shell, as shown in Fig. 3c. Recall that the shell’s trajectory must of course pass through the same values of $r$ on both sides, given by $r = R(\tau)$. This means that if the shell starts out from zero size, expands, and recontracts, its trajectory must correspondingly start and end on an origin of de Sitter, and on a singularity in Schwarzschild-AdS, as sketched. Note that in the resulting diagram (Fig. 3c), $r = 0$ on the left, top, and bottom of the diagram, and $r = \infty$ only on the right vertical line. + +As is apparent from the Penrose diagrams, in a time symmetric set-up the shell reaches its maximum/minimum size $R_t$ at the $t=0$ slice (which passes horizontally through the + +⁹ These Penrose diagrams, as well as the constant-$r$ surfaces, are merely sketches; in actuality, the singularity would be curved in, *etc.*, as in [31] for Schwarzschild-AdS. Since they nevertheless capture many features of the causal structure, they are presented here and in the subsequent figures as sketches for ease of visualization. +---PAGE_BREAK--- + +middle of the diagrams and forms a symmetry axis). In de Sitter, this size $R_t$ is necessarily bounded from above by the de Sitter radius $r_d$ ($i.e., R_t \le r_d$), whereas in Schwarzschild-AdS, $R_t$ is bounded from below by the black hole horizon $r_+$ (so that $r_+ \le R_t$). We conclude that + +$$r_+ \le r_d \quad \text{for all time symmetric configurations.} \tag{2.11}$$ + +It immediately follows that the black hole entropy is smaller than the de Sitter entropy for time-symmetric domain wall configurations; the implications of this surprising fact are discussed in the next section. However, we will see that there also exist time asymmetric solutions where the black hole entropy is larger than the de Sitter entropy. + +**Fig. 4:** Shell trajectory corresponding to the time-symmetric cases (A) and (B) of Fig. 2, sketched on the de Sitter and Schwarzschild-AdS Penrose diagrams. + +We will now examine in greater detail what types of trajectories (and corresponding Penrose diagrams) are admissible. The effective potentials for the different cases indicated in Fig. 2, along with distinct possibilities for the extrinsic curvatures, are plotted in Fig. 12 in Appendix A. The time symmetric trajectories, corresponding to the top two cases (A and B) drawn in Fig. 12, are depicted on the spacetime diagrams in Fig. 4. The distinguishing feature between A1 (B1) and A2 (B2) is the sign of $\beta_o (\beta_i)$ at the turning point; the former include fewer bifurcation points. Similarly, the time asymmetric trajectories, described by the cases (C) and (E) of Fig. 12, as well as the static case (D), are shown on the respective spacetimes in Fig. 5. Along with (C1) and (C2), which start from $r=0$ and expand forever, we could of course also have their time reverse, as indicated by case (C') of Fig. 2; +---PAGE_BREAK--- + +**Fig. 5:** Shell trajectory corresponding to cases (C), (D) and (E) of Fig. 2, sketched on the de Sitter and Schwarzschild-AdS Penrose diagrams. In all cases, we glue together the left spacetime and the right spacetime across the domain wall, discarding part of each diagram. + +and similarly for case (E). As previously, the distinction between (C1) and (C2) comes from the sign of $\beta_i$ at large $r$. Case (D) is somewhat special: it corresponds to a static shell. Here we can have a global Killing field which is timelike everywhere outside the horizons. However, this geometry does not contain a piece of de Sitter $I^+$. + +We can now combine Penrose diagrams for the full junction de Sitter/Schwarzschild-AdS spacetime, as in Fig. 3c. The result is sketched in Fig. 6. + +Of the geometries described above, case A is an example of a false vacuum bubble that is excited in the true vacuum which re-collapses. These geometries are very similar to time symmetric spacetimes representing black hole collapse, except that the interior geometry is one with a different value of the cosmological constant. Cases B, C, E are the most interesting ones from our perspective, since here we see the presence of an inflating region of spacetime with de Sitter $I^+$. These are the geometries we will be interested in describing holographically from the boundary field theory living on the AdS boundary on the right. A crucial feature in these geometries is that the inflating region is hidden behind a black hole horizon from the AdS boundary. We shall later show that this situation is generic as long as the matter fields making up the domain wall satisfy the null energy condition. + +Note that in case C, the de Sitter horizon is not necessarily larger than the +---PAGE_BREAK--- + +**Fig. 6:** Sketches of the full Penrose diagrams combined from the corresponding cases of Fig. 4 and Fig. 5. Metrically, the space on the left of the shell (thick dotted curve) is de Sitter, while the space on the right of the shell in Schwarzschild-AdS. + +Schwarzschild-AdS horizon. In particular, which area is bigger depends on whether the de Sitter horizon (as drawn by the left diagonal dashed line in Fig. 6C) crosses the shell earlier or later than the black hole horizon. Since the radial coordinate increases monotonically along the shell, if the de Sitter horizon intersects the shell before the black hole horizon (at smaller $r$ and lower on the Penrose diagram), then $r_d < r_+$; conversely, if it intersects later, the de Sitter is bigger. Which of these is the case depends on the specific values of the parameters, but both possibilities are allowed¹⁰. + +It is possible to start with pure AdS space and smoothly deform parameters to obtain a geometry with an inflating region. A sequence of spacetimes which illustrates this is shown in Fig. 7. We can deform from one spacetime to the next in Fig. 7 by smoothly adjusting the bulk initial data – in particular, the size of the false vacuum bubble – on the $t=0$ spacelike slice. Note that although the local geometry does not vary much in this progression, the global properties vary substantially from case to case. Namely, (a) is causally trivial; in (b) the spacetime has an event horizon; (c) acquires regions which are causally disconnected from the boundary; in (d) the shell itself passes through such a region; in (e) the entire shell is causally disconnected from the boundary; and finally + +¹⁰ As explained in Appendix C, if in addition we require an initial Cauchy slice whose area increases monotonically and whose de Sitter part has domain of dependence which contains a piece of the de Sitter $\mathcal{I}$, then we necessarily obtain $r_d < r_+$. +---PAGE_BREAK--- + +**Fig. 7:** Sketch of Penrose diagrams obtained by continuous deformations of the initial data and parameters, starting with pure AdS and ending with de Sitter (including the future and past boundary $I^{\pm}$) in Schwarzschild-AdS. Note that (b) is possible only for small black holes $r_{+} < r_{A}$, and in (f) the de Sitter $I$ is generically joined to the Schwarzschild-AdS singularities by some metric which depends sensitively on the evolution (and hence drawn by a dotted line) but is unimportant for our discussion. + +(f) acquires additional (de Sitter) asymptotic regions¹¹. Naively, one might expect that if (a) is described by a holographic dual, then so will (f), since it seems unnatural that the holographic encoding would cease abruptly in this progression. However, as we argue later, the nature of the state may change. + +## 2.2. Beyond the thin wall approximation + +We have seen that in the thin domain approximation we can find geometries that contain both de Sitter $\mathcal{I}$ and AdS $\mathcal{I}$. Of course, we are really interested in the case of gravity coupled to a scalar field in a potential of the form sketched in Fig. 1. As described above, such potentials describe low energy dynamics on the string theory landscape. Moreover, the resulting spacetimes will be smooth. In Appendix B we will give a detailed argument that the basic features of the geometry do not change in this more general setup. + +¹¹ For later discussions, we also note that running across the $t=0$ Cauchy slice of (d), (e) or (f) the size of spheres does not vary monotonically. In fact, for (e) and (f), the same is true for any Cauchy surface. +---PAGE_BREAK--- + +particular, one can argue based on causality that there are solutions of scalar-gravity that +contain both the de Sitter and AdS $\mathcal{I}$. In this section we will discuss a few characteristic +features of these solutions. + +Perhaps the most important feature is the fact that the de Sitter $\mathcal{I}$ is causally disconnected from the AdS $\mathcal{I}$ – there is no null geodesic connecting the two. This is a very general property of any spacetime satisfying the null energy condition, including the scalar-gravity system under consideration. This can be seen from Raychaudhuri’s equation for a congruence of null geodesics. Physically, because gravity is attractive, once a set of null geodesics start converging, they cannot diverge (unless they pass through an origin $r=0$). This immediately rules out null geodesics connecting the de Sitter and AdS $\mathcal{I}$: a null congruence must converge to go into the bulk from an AdS $\mathcal{I}$, and diverge to reach the de Sitter $\mathcal{I}$ from the bulk. + +Another curious property of some of the thin wall constructions is the presence of +a part of AdS boundary on the left which disappears and appears as the shell attains +infinite size. This second AdS boundary is an artifact of the thin wall approximation, +and does not appear in the full solutions of scalar-gravity [35]. Physically, any radiation +emitted by the shell near the boundary would suffer a large blue-shift as it propagates into +the bulk, and its backreaction would lead to a curvature singularity. This is somewhat +analogous to the Cauchy horizon instability at the inner horizon of a charged or rotating +black hole. While the exact nature of this singularity is of course difficult to determine +due to its sensitivity to the initial domain wall profile, we expect that it will meet up with +the Schwarzschild-AdS black hole singularity in a(n almost) null fashion, as sketched in +Fig. 7f. This can also be thought of as due to cosmic censorship¹², because of an obstacle +to Cauchy evolution (though by a boundary rather than a naked singularity). As is well +known, asymptotically AdS spacetimes are not globally hyperbolic without specification +of additional boundary conditions. Hence the appearance of an AdS boundary implies the +formation of a Cauchy horizon for the evolution of the initial data as we cannot evolve +the spacetime in the domain of influence of this boundary¹³. Now similarly, given generic +perturbations on a Cauchy surface at finite time, we expect that a big bang singularity +will remove any such AdS boundary in the far past. Hence it appears that these AdS +boundaries are also artifacts. + +So far we have limited our discussion to spherically symmetric geometries. However, +there is a possibility that some of these geometries are dynamically unstable to aspherical +fluctuations¹⁴. In particular, while the Schwarzschild-AdS and the de Sitter geometries are + +¹² We thank Gary Horowitz for pointing this out to us. + +¹³ This is not an issue on the right AdS boundary, since there the CFT tells us what boundary conditions to impose. + +¹⁴ We thank John McGreevy for alerting us to this possibility. +---PAGE_BREAK--- + +individually dynamically stable to fluctuations, the shell itself might be unstable. If one considers the positions of individual pieces of the shell as determined by the effective potential $V_{\text{eff}}$, the shell's deformations will grow with time. This is because in the mechanical motion of particles away from the extremum of the effective potential (2.10) two particles tend to accelerate away from each other. A set-up similar to ours has been recently considered in [40], where it was shown that certain de Sitter/Schwarzschild-de Sitter domain walls are indeed unstable. Further it appears that changing the outside geometry there from Schwarzschild-de Sitter to Schwarzschild-AdS does not remove this instability [41]. It would be interesting to analyze this potential instability in detail for our set-up. We do not expect the instabilities to radically alter our story – for example, in the solutions with de Sitter $\mathcal{I}$, we expect the aspherical perturbations to remain small compared to the size of the bubble, as in [42,43]. Finally, we should emphasize that many of the cases considered above are fine-tuned, in the sense that they have been chosen to be time symmetric. + +## 2.3. A special parameter domain + +The arguments of the following sections will be sharpest for a certain range of parameters. This regime is given by $r_d \ge R_t \gg r_+ \gg r_A = 1 \gg \ell_s$ where, as a reminder, $\ell_s$ is the string length, $r_d = 1/\sqrt{\lambda}$ is the de Sitter radius of curvature, $r_+$ is the black hole horizon radius, which for large $\mu$ is given by $r_+ = \mu^{1/3}$, $R_t$ is the domain wall position of the time symmetric solutions (cases A, B) at the turning point, and $r_A$ is the AdS curvature radius (which we have set to one). In this range of parameters the domain wall is very far away from the black hole horizon at all times, and causally disconnected from the right AdS boundary. Solving for the turning point of the effective potential (2.10), we find + +$$R_t \approx r_+ / (1 - \kappa^2)^{1/3}. \qquad (2.12)$$ + +We see that to achieve the condition $R_t \gg r_+$, $\kappa$ must be close to one. Recall that $\kappa \sim \sigma r_A/m_p^2$ where $\sigma$ is the tension of the domain wall. A brief survey of known parts of the landscape reveals regions with $\kappa \gg 1$ and regions with $\kappa \ll 1$. There is no reason not to expect many vacua with $\kappa \sim 1$. Typically, if $\kappa \ll 1$ then $R_t \to r_+$. If $\kappa \gg 1$ then $R_t \to r_+$ and $r_+ \to 1/\kappa$. Physically $\kappa \sim 1$ in AdS units is special because then the domain wall tension balances against the pressure which the true vacuum exerts on the domain wall. In flat space, such a balance would only be possible for one size of the bubble because the energy due to tension is proportional to the surface area of the domain wall, while the energy due to the pressure is proportional to the volume. In AdS, the two forces can almost cancel for a large range of bubble sizes because at scales big compared to the AdS radius, volume is proportional to surface area. For the special case of the static domain wall, we must also set the first derivative of the effective potential to zero, giving $\mu \sim 1/(1-\kappa^2)^2$ and $R_t \sim 1/(1-\kappa^2)$. +---PAGE_BREAK--- + +The curvature at the maximum of the effective potential (2.10) goes to zero as $\kappa \to 1$ and $\lambda \to 0$. The instability growth rate of the static domain wall is governed by this curvature and hence goes to zero in this limit. As $\kappa \to 1$ the static domain wall becomes arbitrarily far away from the black hole horizon and its instability becomes arbitrarily small. Note that the relevant time scale for the instability is the Schwarzschild time of the asymptotic observer $t_o$ and not the proper time of the shell $\tau$. The conversion factor at large $R_t$ is $\tau \sim t_o(R_t/r_A)$. In a typical regime in parameter space, we have $t_{\text{instability}} \sim r_d r_A/R_t$, which can be taken to be large. + +### 3. Properties of the boundary CFT + +In the previous section we constructed a family of de Sitter/Schwarzschild-AdS domain wall spacetimes, shown in Fig. 6, some of which have inflating (de Sitter $\mathcal{I}$) regions. These solutions all have asymptotically AdS regions. Consider first the pure AdS geometry in Fig. 6 describing the stable ground state¹⁵ of Fig. 1. If this is a vacuum of a consistent theory of quantum gravity (as we are assuming) then this theory defines a boundary CFT which represents the bulk via the AdS/CFT correspondence¹⁶. The scalar field representing the horizontal axis in Fig. 1 will be represented in the CFT and it seems plausible that one could excite a large number of its quanta to create the initial data for the de Sitter/Schwarzschild-AdS spacetimes¹⁷. However there is a puzzle about representing de Sitter degrees of freedom that at first glance casts doubt on this simple logic. This has to do with the fact that it naively appears that the boundary CFT must encode the dynamics of an enormous inflating region with far fewer active degrees of freedom accessible to it. + +As we will see below, the resolution to this puzzle is that the boundary CFT is in a mixed rather than a pure state. Some mixed states arise by simply integrating out certain degrees of freedom in the CFT [44]. Conversely, certain pure states are expected to mimic mixed states to a high degree of accuracy [45,46]. We should emphasize that here we are claiming that the relevant mixed states arise because the boundary CFT is entangled with new degrees of freedom associated with de Sitter region behind the horizon. This is analogous to the appearance of the thermal density matrix in the standard eternal black hole [32,33]. We will argue that the entangled degrees of freedom can be described as a + +¹⁵ To ensure stability, we may take this to be a supersymmetric minimum. + +¹⁶ This CFT should in principle contain information about all accessible vacua in the landscape and hence will be an extraordinarily complicated object. We expect that the wide separation of tunneling time scales will enable us to focus on the truncated landscape of Fig. 1. + +¹⁷ All geometrical scales can be taken much longer than string length and the coupling can be taken weak, so these gravity solutions should approximate the behavior of the full theory. +---PAGE_BREAK--- + +cutoff CFT coupled to additional non-CFT modes. The effect of the non-CFT modes can +be made parametrically small, demonstrating that the state is mixed. + +3.1. *The entropy puzzle* + +The arguments presented above seem to imply that the field theory dual to the geometries which incorporate an inflating region is constructed by acting on the vacuum state with an appropriate set of local operators. This would lead us to conclude that the spacetime geometry is dual to the field theory in a particular pure state. + +This picture however cannot be right, as it leads to a paradox regarding the entropy [34,35]. Let us consider a de Sitter bubble with $I^+$. As explained at (2.11), in the time-symmetric set-up, the size of the bubble on the $t=0$ slice, $R_t$, is smaller than the de Sitter radius and also larger than the black hole radius $r_+$. Hence we know that the black hole size $r_+$ is necessarily smaller than the size of the de Sitter cosmological horizon $r_d$, implying that the black hole entropy is smaller than the entropy associated with the de Sitter false vacuum bubble. + +The black hole entropy is associated with the number of active degrees of freedom in the boundary conformal field theory. This is the picture that is naturally suggested by black hole microstate counting using D-brane constructions. The de Sitter entropy, on the other hand, is a measure of the degrees of freedom necessary to define a quantum gravitational theory in de Sitter space [47]. Given this, it is hard to imagine how a pure state that is built out of the fewer black hole degrees of freedom can encapsulate the information required to describe the de Sitter space. This mismatch is what we term the *entropy puzzle*. + +In fact, it is easy to see that this entropy mismatch can be made arbitrarily large — after all, there is no restriction on the allowed de Sitter size at the level of classical geometries, since we could consider arbitrarily small positive values of cosmological constant. In a sense, we would have to use the vastly fewer degrees of freedom accessible to the boundary observer to describe the physics of arbitrarily many degrees of freedom. There exist (time asymmetric) geometries where the de Sitter entropy is less than the black hole entropy. We present the details of these solutions in Appendix C. Although the entropy puzzle is not present for these special cases, for time symmetric solutions the resolution lies in a different direction¹⁸. + +We argued above that the cosmological solutions with asymptotically AdS regions should be described by a boundary CFT. However, the assumption that the geometry with an inflating region is described by a pure state of the CFT leads to an entropy puzzle. Since all of the solutions constructed in Section 2 were found by matching onto + +¹⁸ For a different viewpoint on the entropy issue see [48]. +---PAGE_BREAK--- + +Schwarzschild-AdS solutions, the AdS boundaries are separated from the inflating region by a black-hole horizon. Furthermore, as discussed earlier, this feature is guaranteed in any construction in classical general relativity with matter obeying the null energy condition. It is therefore natural to expect that the boundary CFT is very similar to the thermal field theory dual to a standard Schwarzschild-AdS black hole. A crucial feature of these spacetimes is that the conformal theory living on a boundary is in a mixed state, with density matrix + +$$ \rho_{\beta} = e^{-\beta H} \tag{3.1} $$ + +rather than a pure state¹⁹ [49,32,33]. Here $H$ is the Hamiltonian of the CFT and $\beta$ the inverse temperature. In this section we will argue that the boundary CFTs dual to inflating geometries are also in a mixed state, whose density matrix differs from (3.1) only by small corrections. + +In section 3.2 we will start with a discussion of mixed states in various extensions of Schwarzschild-AdS, before moving on to a more general discussion of mixed states and causal structure in section 3.3. + +## 3.2. Mixed states in asymptotically Schwarzschild-AdS geometries + +We will start with the most symmetric example of domain wall spacetimes, the static domain wall, shown in Fig. 7e. In this geometry the domain wall is at a fixed radial position $R(\tau) = r_0 = R_t$. Despite the fine tuning necessary to attain this geometry, it serves as a simple example to illustrate the general principle we wish to propose. In fact, as discussed in section 2.3, by taking $\kappa \to 1$ we can dial $R_t \gg r_+ \gg r_A$ and make the size of the instability vanishingly small. In the thin wall approximation, the spacetime to the right of the domain wall is identically Schwarzschild-AdS. If the location of the domain wall is far removed from the black hole horizon, $R_t \gg r_+$, we have a large region of spacetime where the usual picture of a Schwarzschild-AdS black hole should hold. + +We will first consider the region of the spacetime in the Schwarzschild-AdS part of the geometry with $r \le R_t$, i.e., imposing a cutoff at a radial scale $r_c \sim R_t$. To describe the physics of just this cutoff spacetime in the dual field theory, recall that for the usual eternal Schwarzschild-AdS black hole (see Fig. 3b) with $r_+ > r_A$ the field theory dual is best described in the thermofield formulation. One associates a complete CFT Hilbert space to each AdS boundary of the black hole, labeled $\mathcal{H}_L$ and $\mathcal{H}_R$ respectively. These Hilbert spaces are non-interacting and the geometry is dual to a particular entangled pure state, the Hartle-Hawking state, in the tensor product Hilbert space $\mathcal{H}_L \otimes \mathcal{H}_R$ [36,32,33]. Tracing over one of the Hilbert spaces, say $\mathcal{H}_L$, leads to a self-contained description in $\mathcal{H}_R$, + +¹⁹ Strictly speaking, this is true only for large black holes, with $r_+ > r_A$. We will focus on this region of parameter space. +---PAGE_BREAK--- + +but in a mixed state. The density matrix is the thermal density matrix (3.1) at the black hole temperature. + +Physics in a cutoff Schwarzschild-AdS background is very similar to that of the non-cutoff geometry. Now, however, the dual CFT is replaced by a conformal field theory cutoff at energy $E_c \sim r_c/r_A^2$. We denote the corresponding Hilbert spaces as $\mathcal{H}_{L,R}^c$. Concentrating on energy scales below $E_c$, we see that the entangled state description in $\mathcal{H}_L^c \otimes \mathcal{H}_R^c$ is still valid. So at low energies the right hand field theory will remain in a mixed state, which is now found by entangling $\mathcal{H}_R^c$ with the cutoff theory coupled to gravity. Of course, this procedure is ambiguous at scales near or above $E_c$, but at low energies the density matrix is given approximately by (3.1), with corrections that vanish as powers of $E/E_c$. Further, it is clear that the mixed state description is the correct one in $\mathcal{H}_R \supset \mathcal{H}_R^c$ so long as energy locality holds. This is a consequence of bulk locality (in $r$). + +These arguments are best controlled when $R_t \gg r_+$ obtained by taking $\kappa \to 1$. But as long as the domain wall is some macroscopic distance from the horizon, a macroscopic fraction of the excited degrees of freedom on the left should be entangled with those on the right, yielding a mixed state with macroscopic entropy of entanglement. The above arguments have been made in the $\mu > 1$ “large” black hole regime. But it seems likely that even small black holes are described by entangled states and so these considerations should also apply to the $\mu < 1$ regime as well. + +Even at scales below the cutoff, the form of the density matrix is not unambiguously defined. In particular, the effective action of the cutoff CFT will include some number of irrelevant operators whose presence becomes important only at energies approaching $E_c$. Some of these effects can be calculated using bulk supergravity techniques. + +We have not yet discussed the effect of the de Sitter region to the left of the domain wall. For all time symmetric collapse and static geometries the de Sitter radius $r_d$ must be larger than $R_t$. So we can treat the de Sitter region as a piece of essentially flat space. This results in significant modifications to the cutoff CFT because bulk massless propagators in flat space decay like powers of the proper distance between points, while in AdS space they decay exponentially. So bulk massless fields can (and do) induce nonlocal terms in the effective CFT. But we will show that these nonlocal effects can be made arbitrarily small by taking the cutoff surface defining the CFT much smaller than the domain wall. + +To be specific take the horizon, cutoff and domain wall radii to have the following relative sizes: $R_t \gg r_c \gg r_+$. Take the most extreme case, a massless bulk field dual to a marginal operator $\mathcal{O}$ in the CFT. Ignoring the de Sitter contribution we have + +$$ \langle \mathcal{O} \mathcal{O}' \rangle \sim \frac{1}{r_c^6 L^6}. \qquad (3.2) $$ + +Here $L$ is the geodesic distance between $\mathcal{O}$ and $\mathcal{O}'$ on the cutoff surface. This formula follows from conformal invariance for marginal operators in a $D=3$ CFT or equivalently from +---PAGE_BREAK--- + +summing over bulk particle paths in the AdS region to build up the massless propagator. + +The de Sitter contribution behaves differently. Particle paths contributing to this behavior traverse a region of AdS space to the domain wall, then propagate in the de Sitter region before re-entering the AdS region and returning to the cutoff surface. We can calculate this contribution as the product of three propagators. Two account for the AdS propagation, each behaving like $(r_c/R_t)^3$. One accounts for the propagation through de Sitter space. This goes like $1/x^2$, the standard massless particle propagator in four dimensions, where $x$ is the distance between points. One can then fold these propagators together and integrate over joining points on the domain wall, as described in Appendix D. We find that the massless propagation in the nearly flat de Sitter region induces a correction to the correlator of order $(R_t L)^{-2}$, which is long range compared to (3.2). Hence the de Sitter bubble introduces “non-local” modifications which become appreciable in the infrared on scales longer than $L^2 > R_t/r_c^3$. These corrections appear because, essentially, modes which are non-normalizable in the full Schwarzschild-AdS geometry become normalizable when the AdS boundary is “cut off” and replaced by the de Sitter bubble. This would have produced a massless graviton coupled the cut-off CFT [50,51] had we been considering the analogous constructions in higher dimensions. These modes are part of the non-CFT degrees of freedom necessary to describe the region beyond the cut-off surface. When $r_c \sim R_t$ the de Sitter region makes a large nonlocal modification to the CFT. But if we choose $R_t \gg r_+^3$ this correction is small compared to (3.2). So in this regime we can continue to make a controlled argument that the theory is in an entangled state. + +While we have focused the discussion above on the case of the static domain wall, a similar situation can occur for any of the cases where the de Sitter bubble wall passes through the region to the left of the black hole, e.g., the cases b, c, d and e in Fig. 6. In particular consider Fig. 6b, where the bubble expands in the far past and future. If the minimum bubble radius is much bigger than the radius of the black hole horizon, $R_t \gg r_+$, there is once again a large region to the right of the domain wall (and to the left of the horizon) where the geometry is Schwarzschild-AdS, and physics may be described in terms of a cutoff CFT. The discussion must be refined for the cases where the bubble shrinks towards either the future or past. However, it is clear that with some tuning, a large portion of Schwarzschild-AdS is relevant and a cutoff CFT can describe the physics for some large interval of time. + +The arguments discussed above are our strongest evidence for the mixed state nature of the CFT description of inflation. + +To summarize, the CFT dual to geometries with an inflating false vacuum bubble is necessarily in a mixed state. In particular, this means that the active degrees of freedom in the boundary field theory, whose number is given by $\exp(S_{bh})$, are entangled in a non-trivial +---PAGE_BREAK--- + +way with the degrees of freedom in the inflating region (which, as we have argued before, could be much larger). In this picture, the black hole entropy $S_{bh}$ is simply a measure of this entanglement. The boundary observer who evaluates correlation functions in the state dual to this geometry will conclude that the theory is in a mixed state with density matrix $\rho_{bdy}$ and an entanglement entropy $S_{bh} = S_{ent} = \text{Tr}(\rho_{bdy} \log \rho_{bdy})$. In our picture, the large number of de Sitter degrees of freedom are entangled with the black hole degrees of freedom. However, bulk locality suggests that they are entangled very weakly. So when these degrees of freedom are traced over, the resulting entanglement entropy $S_{ent}$ is much smaller than $S_{dS}$. Thus the mixed state picture avoids the entropy puzzle described above. + +One striking aspect of this picture is the absence, say in the static domain wall case of Fig. 7e, of a second asymptotic boundary where the traced over degrees of freedom can be localized. This is in contrast to the eternal Schwarzschild-AdS black hole, where one traces over the degrees of freedom associated to one of the conformal boundaries. So it is natural to ask how one should describe the degrees of freedom that are entangled with the boundary CFT. One clue comes from the eternal Schwarzschild-AdS black hole, where bulk fields $\phi(r,t)$ in the right hand quadrant (see Fig. 9b) can be moved to the left hand quadrant by shifting $t$ by half a Euclidean period, $-i\beta/2$. Usually this transformation is used in the $r \to \infty$ limit where $\phi$ becomes an operator in the boundary CFT. This shift then relates the two boundary CFTs. But we can consider finite $r$ bulk fields as well. These can be constructed from the CFT fields by suitable coarse graining$^{20}$. For values of $r < R_t$, bulk fields in the left region, which can be described by the cutoff CFT, are related by this imaginary shift in $t$ to fields in the right region. So these degrees of freedom are accessible via analytic continuation. We will extend these considerations in Section 4. + +The fact that a large number of degrees of freedom in the inflating region are entangled, albeit weakly, with CFT degrees of freedom allows us to use the latter to infer some properties of the former. We clearly cannot reconstruct all the information pertaining to inflation, but by virtue of the entangled state construction we have access to some of the information. Before proceeding to discuss how this information may be encoded in the boundary field theory, we turn to an interesting question: what are the situations in which the boundary theory is in a mixed state? + +### 3.3. *Conditions for the appearance of mixed states* + +We have argued that a broad class of de Sitter bubble solutions must correspond to mixed states in the CFT. However, we now wish to consider to what extent these arguments can be applied to other solutions, like the rapidly collapsing shells. A *prori* it is not clear whether the boundary CFT is in a mixed or a pure state. We will now proceed + +$^{20}$ See [52,53,54] for examples of such coarse graining. +---PAGE_BREAK--- + +to discuss conditions which may delineate when a boundary CFT will be in a mixed state. +We will consider more general situations than the cosmological solutions described above, +and describe several possible scenarios under which mixed states arise. We will try to +formulate certain criteria for the appearance of mixed states; while some of these appear +to be sufficient to guarantee a mixed state description, we are unable to determine which +of these is necessary. + +We will start by reviewing the bulk²¹ explanation for the appearance of mixed states. +According to the AdS/CFT correspondence, correlators of local operators in boundary +field theories are found by taking bulk correlation functions to the boundary and stripping +off the appropriate powers of radial coordinate. When there are regions in the spacetime +that are causally disconnected from the boundary – i.e., regions that are outside both +the past and future light cones of the boundary – then typically these bulk correlation +functions are evaluated in a mixed state. + +To see this, consider a quantum field $\phi$ in an asymptotically AdS spacetime with a +moment of time symmetry $t \rightarrow -t$. The Hilbert space of this scalar field can be written +in a position space basis at time $t = 0$. When the spacetime contains a region causally +disconnected from the boundary, the bulk Hilbert space can be factorized into two pieces +$\mathcal{H}^b = \mathcal{H}_R^b \otimes \mathcal{H}_L^b$, where $\mathcal{H}_R^b$ is spanned by operators located inside the causal wedge of +the boundary and $\mathcal{H}_L^b$ is spanned by operators in the causally disconnected region. To +calculate the expectation value of local boundary operators, we only need to calculate +bulk operators inside the light cone of the boundary. These operators act trivially on $\mathcal{H}_L^b$, +so they are evaluated in the mixed state found by tracing over $\mathcal{H}_L^b$: + +$$ \rho_R^b = \mathrm{Tr}_{\mathcal{H}_L^b} |\psi^b\rangle\langle\psi^b|. \qquad (3.3) $$ + +Here $|\psi^b\rangle$ denotes the (pure) state of the quantum field $\phi$. A priori, $|\psi^b\rangle$ might be of the form $|\psi_L^b\rangle \otimes |\psi_R^b\rangle \in \mathcal{H}_L^b \otimes \mathcal{H}_R^b$, in which case $\rho_R^b$ has zero entropy and describes a pure state. However, one can show that if this is the case then quantum backreaction will be large near the horizon, destroying the spacetime. This is a familiar fact for black hole or Rindler horizons (see e.g., [55]). In black hole geometries the Boulware vacuum factorizes, and leads to a divergent stress tensor at the horizon. The same is true for the Rindler vacuum of an accelerating observer. More generally, if the state $|\psi^b\rangle$ factorizes then the expectation value of a set of local operators jumps discontinuously as one of the operators moves across the bifurcation point. In particular, such a correlation function vanishes unless all of the operators are located on the same side of the bifurcation point. So the value of the stress tensor, which can be found by differentiating a two point function, will typically diverge. We conclude that the density matrix (3.3) describes a genuine mixed state with non-zero + +21 We will distinguish bulk quantum field theory data by an explicit *b* superscript. +---PAGE_BREAK--- + +entropy. In general for any CFT observable which is supported only in some region B of +the boundary, one can show that it is fully determined by the part of the bulk spacetime +which corresponds to the causal wedge of the region B [56]. + +To summarize, if the standard AdS/CFT bulk to boundary dictionary is assumed in +spacetimes with causally disconnected regions, we arrive at the following criterion22 + +*Criterion 1: Correlators of local operators in a boundary CFT are evaluated in a mixed state if there exist regions of spacetime that are causally disconnected from the boundary.* + +We should note that although the description in terms of a single boundary CFT is as +mixed state, there may be additional descriptions of the geometry in terms of a pure state. +For example, in the eternal Schwarzschild-AdS geometry discussed above the spacetime +is described by two boundary CFTs in a particular pure entangled state. It is only by +tracing out degrees of freedom on one side that one obtains the mixed state description +of correlators on the other boundary23. We should emphasize that the description of +the Schwarzschild-AdS geometry as a thermal state with density matrix $\rho_\beta$ on the right +boundary only determines bulk correlators in the causal wedge of the right boundary. It +does not, for example, unambiguously fix correlators of operators near the left boundary +or correlators between operators in the left and right causal regions. This is because there +are many choices of pure state $|\psi\rangle$ in the boundary Hilbert space $\mathcal{H} = \mathcal{H}_L \otimes \mathcal{H}_R$ which lead +to the same density matrix $\rho_R$ upon tracing over $\mathcal{H}_L$. To describe the entire geometry, +one needs to specify the pure state $|\psi\rangle$; typically it is specified by the Euclidean path +integral with appropriate boundary conditions. It is only once one specifies $|\psi\rangle$ that one +can, using, e.g., analytic properties, relate correlators in the entire spacetime to those in a +single boundary CFT. + +In more general spacetimes, such as the inflating geometries described above, it is +not clear how to describe the mixed boundary state as a pure entangled state of a larger +theory (or indeed whether such a pure state description exists). Given the fact that mixed +state correlators unambiguously fix correlators only in the causal region, it is necessary to +make an additional assumption in order to extract behind the horizon physics. As we will +describe in the next section, we will typically assume analyticity in the gravity description, +which in many cases amounts to defining a bulk state $|\psi^b\rangle$ on a complete Cauchy surface + +22 See Appendix E for a critique of this criterion. + +23 There is reason to expect that this behavior is a general feature of spacetimes with multiple asymptotic AdS boundaries. In general, solutions to Einstein's equations contain multiple asymptotic AdS boundaries only under very specific circumstances [57,58,59]. Typically, such solutions have singularities in both the far past and the far future and the conformal boundaries are causally disconnected. Thus the boundary CFTs do not interact, but are evaluated in an entangled state of the form described here. +---PAGE_BREAK--- + +by Euclidean continuation. This leaves as implicit the construction of a pure entangled +state in the dual holographic theory. + +One may wish to conjecture a stronger criterion where “if” is replaced by “if and only if”. However, [44] considers a mixed state described by pure AdS space but where the foliation lends itself to tracing over the CFT degrees of freedom on half of the boundary. Certainly here there are no regions causally disconnected from the full boundary. Rather it is only that degrees of freedom on different components of the same boundary are entangled. Hence a stronger conjecture might be made as + +*Criterion 1': Correlators of local operators in a boundary CFT are evaluated in a mixed state if and only if there exist regions of spacetime that are causally disconnected from the corresponding boundary components.* + +One drawback of either of the criteria outlined above is that the presence of a causally +disconnected region is a global property of the spacetime. In AdS/CFT, one expects bulk +Cauchy evolution to correspond to Hamiltonian evolution in the boundary. One is therefore +tempted to conclude that the prescription of Cauchy data on a spacelike slice suffices to +determine the state of the boundary theory. This criterion depends only on the behavior +in the neighborhood of a spacelike slice, and only very indirectly on global properties of +the spacetime. It should not be necessary to evolve the data, and then infer from this +bulk evolution that the spacetime has a causally disconnected region, to conclude that +dual CFT state is mixed. Furthermore, there appear to be explicit examples with causally +disconnected regions which are nevertheless described by pure boundary states – these are +discussed in Appendix E. + +We have been careful to focus the above discussion on local operators, whose correla- +tion functions are easily extracted from bulk correlators. However, there are other possible +criteria for the existence of mixed states which do not rely on bulk field theory arguments. +For example one can motivate a criterion based on the scales the boundary observer can +probe, which we expect by ideas of holographic renormalization to be related to the proper +size of the spheres in geometries with spherical symmetry. Consider a spacelike slice (say +along $t=0$) with metric + +$$ds_{t=0}^2 = dr^2 + R(r)^2 d\Omega^2 . \qquad (3.4)$$ + +Geodesics with angular momentum *L* in the geometry (3.4) achieve a minimum radial scale, *Rmin* = *L*, before turning back. If we consider correlation functions of high dimension operators at fixed large *r*, the two point function is given by the geodesic length24. Since + +²⁴ Strictly speaking, these geodesics appear as local extrema of a path integral, and an additional calculation is needed to determine where they are the dominant contribution. We’ll ignore this subtlety for now. +---PAGE_BREAK--- + +the geodesic turns around it only samples part of the geometry (3.4). Further, however, the variation of the corresponding correlator with $L$ is a sharp probe sampling the geometry at $R_{min}$ and implicitly the CFT at the corresponding energy scale. If $R(r)$ were monotonic, in the limit $L \to 0$ we can probe any radial interval down to $R(r) = 0$. In contrast, if $R(r)$ is not monotonic, there will be intervals which this probe cannot access. This restriction on the scales that can be probed by the boundary correlators is evocative of the c-theorem, if we assume that the size of the spheres is a sensible measure of the effective number of degrees of freedom. This is suggestive then that whenever we have a Cauchy slice with a non-monotonic proper size for the spheres (as for example in the eternal Schwarzschild-AdS black hole) there are degrees of freedom that are not accessible to the boundary observer. One can therefore conjecture an alternate criterion for a mixed state description in the boundary field theory: + +*Criterion 2: When the spacetime has spherical symmetry, boundary correlators are evaluated in a mixed state only when the radial sizes of the spheres are non-monotonic along the spacelike slice at $t=0$.* + +We should emphasize that this differs from the first criterion described above. In particular, there are spacetimes where $R(r)$ is monotonic, but nevertheless the regions of small $R$ are causally disconnected. An example is a collapse spacetime, where a shell of matter is sent in from asymptotic infinity to create a black hole in the interior, as discussed in Appendix E. Another distinction is that this criterion is local in time – Cauchy data alone suffices to determine whether or not a state is pure. Of course, implicitly here we are only considering time symmetric configurations. As discussed in Appendix C, there are more general solutions where Cauchy slices may be chosen to have the proper sizes of spheres varying either monotonically or non-monotonically. It is far from clear how to extend this criterion to such cases. + +This criterion was derived from holographic considerations and relies strongly on the choice of spacelike slices. One might therefore attempt to formulate a more covariant criterion, in terms of null slices rather than spatial slices. Demanding that the sizes of spheres be monotonic along lightlike rather than spacelike directions is equivalent to the condition that there be no additional holographic screens, as defined by Bousso [60]. It is straightforward to construct asymptotically AdS spacetimes for which this criterion differs from the other two, so for completeness we summarize this as + +*Criterion 3: The correlators of operators in a boundary CFT are evaluated in a mixed state only if the spacetime has additional holographic screens.* + +The criterion conjectured here should probably be refined as one finds additional holographic screens (beyond the AdS boundary) for any collapsing bubbles (even Fig. 7b). +---PAGE_BREAK--- + +Naively, at least, very light bubbles such as this would be in a pure state. Of course, +this just underscores our problem that holography in general spacetimes remains poorly +understood. + +We have not undertaken an extensive classification of the differences between these three criteria. It is however easy to construct examples where we obtain differing results depending on the criterion chosen. For example, in the progression sketched in Fig. 7, Criterion 3 applies to cases (b) - (f), Criterion 1 to cases (c) - (f) and Criterion 2 to cases (d) - (f). As should be apparent, we are unable to offer a single criterion that is both necessary and sufficient for the CFT to be in a mixed state. This remains an interesting open problem. + +4. Probes of inflation in AdS/CFT + +In the preceding sections we have shown that it is possible to construct, within the +classical approximation, domain wall spacetimes that interpolate between de Sitter and +AdS. In some cases, one can obtain a large inflating region, which is separated from an +asymptotic AdS boundary by a black hole horizon. We have argued that in these cases +the dual boundary theory is in a mixed state. In this section we will discuss the extent +to which boundary CFT observables, calculated in this mixed state, contain information +about the inflating region25. + +At first glance, we might worry that the mixed state results from tracing over the +degrees of freedom behind the horizon, including the ones describing inflation, and so no +vestige of these degrees of freedom will be visible. This is not the case26. In fact, the +traced-over degrees of freedom leave a substantial imprint on the mixed state which can + +25 Throughout this section, we consider only the effects that arise due to field propagation in the background de Sitter-Schwarzschild-AdS spacetime. Since the vev of the scalar field is shifted inside the de Sitter bubble, the masses of particles may change in this region because of couplings to the scalar — for large mass particles this should be a minor effect. There may also be mixings between different species. So in general the physics will be more complicated than we have considered. + +26 Heuristically, this can be seen by considering an entangled state in a tensor product of two identical Hilbert spaces, each of dimension *d*, *d* >> 1. While an entangled pure state in the product space is described by *d*2 complex parameters, The density matrix obtained by tracing over one Hilbert space is described by *d*2/2 complex parameters. Roughly speaking, the density matrix can pin down “half” of the entangled state. Unitary transformations of the traced over space leave the density matrix unchanged and account for hidden information. There are only *d* independent pure states in each individual Hilbert space. The density matrix carries far more information than the selection of a pure state. +---PAGE_BREAK--- + +be analyzed. One well known example of this is the eternal Schwarzschild-AdS black hole where correlators in the thermal density matrix describe the degrees of freedom on the right AdS boundary. Analytically continuing the arguments of operators in imaginary time gives correlators describing degrees of freedom on the left boundary$^{27}$. In the following section we will use analyticity to probe degrees of freedom in the inflating region. We expect correlators to be analytic for a wide variety of physically interesting states, including those under discussion here. + +In section 4.1 we will follow the strategy of [29,30,31] to probe physics behind the horizon, and examine correlation functions of high dimension operators in the boundary theory. For such operators, a boundary two point function is given by the length of the bulk spacelike geodesic which connects the two points. We will show that the presence of de Sitter $I^+$ leads to a singularity in the correlation functions of the boundary theory, after appropriate analytic continuation. In section 4.2 we will describe an even more dramatic probe of the inflating region. In the classical approximation, the boundary CFT can be analytically continued to construct dS/CFT correlators living on the asymptotic de Sitter $I^\pm$. Once quantum effects are included this provides a powerful probe of the non-perturbative physics of the inflating region. + +### 4.1. Geodesics probes of domain wall spacetimes + +We will start by considering correlation functions of boundary operators $O(x)$ with large dimension $\Delta$ describing bulk particles with mass $m \sim \Delta$. In the limit where $m$ is large the two point function can be evaluated in semiclassical approximation and is given by + +$$ \langle O(x) O(y) \rangle \sim e^{-m \mathcal{L}(x,y)} \quad (4.1) $$ + +where $\mathcal{L}(x,y)$ is the proper length of the spacelike geodesic connecting the two points on the boundary$^{28}$. This length is formally infinite, so $\mathcal{L}$ is regularized by taking $x$ and $y$ slightly away from the boundary. In this section we will focus on radial geodesics, whose form can be found by patching together geodesics on either side of the domain wall along with an appropriate junction condition across the wall. + +$^{27}$ For reviews of these analyticity concepts see, for example, [28,30,31] + +$^{28}$ There are a few subtleties in this argument, which we will mostly neglect in the following. In particular, one must prove that the geodesics under consideration lie on the path of steepest descent in order to contribute to the correlator. Even if this is not the case, however, the effect described below will still be visible upon analytic continuation of correlation functions. To track the “metastable” geodesic reliably requires $m \to \infty$. The heaviest particles available as probes are wrapped D-branes with masses of order $1/g_s$. So this technique requires the ability to take $g_s \to 0$. There are examples in the landscape [11] where this is possible. +---PAGE_BREAK--- + +For radial geodesics in spacetimes of the form (2.1), the geodesic equations are + +$$ \frac{dt_{\alpha}}{d\tau_g} = \frac{E_{\alpha}}{f_{\alpha}(r)}, \quad \left(\frac{dr}{d\tau_g}\right)^2 = f_{\alpha}(r) + E_{\alpha}^2, \qquad (4.2) $$ + +where $\alpha = i, o$ and $\tau_g$ is the affine parameter along the geodesic. $E_\alpha$ is a conserved quantity associated with the Killing vectors $\left(\frac{\partial}{\partial t_\alpha}\right)^a$ on either side of the domain wall. To trace geodesics through the junction, we assume that the domain wall is transparent i.e., an observer on the wall measures the same energy and momentum for a geodesic on both sides. We also use that $r$ is continuous across the junction. On the other hand, $t$ jumps discontinuously across the domain wall. We can determine the jump in $t$ from the normalization of the 4-velocity $u^\alpha$ of the shell: + +$$ u^a u_\alpha = -1 = -f_\alpha(r) \dot{t}_\alpha^2 + \frac{\dot{r}^2}{f_\alpha(r)}. \qquad (4.3) $$ + +Using $\dot{r}^2 = -V_{\text{eff}}(r)$ gives + +$$ t_{\alpha}(r) = - \int \frac{\beta_{\alpha}(r)}{f_{\alpha}(r) \sqrt{-V_{\text{eff}}(r)}} dr . \qquad (4.4) $$ + +To find the explicit relation between $t_i$ and $t_o$ we would have to invert (4.4). Moreover, to determine where a particular spacelike geodesic intersects the bubble trajectory, we would have to solve the implicit equations $R(\tau) = r(\tau_g)$ and $t_{\text{bubble}}(\tau) = t_{\alpha}(\tau_g)$. Finally, to obtain $t_{\alpha}(\tau_g)$ and $r(\tau_g)$, we must invert the expressions for $\tau_g(t)$ and $\tau_g(r)$ from (4.2). Therefore, except in very special cases, one can not write a closed form expression for the geodesic. + +**Fig. 8:** A few null geodesics in thin domain wall spacetimes. (a) Collapsing false vacuum bubble geometry (b) Inflating false vacuum bubble geometry. +---PAGE_BREAK--- + +For certain special geodesics, $\mathcal{L}$ vanishes – i.e., the geodesic becomes null – and the two point function $\langle \mathcal{O}(x) \mathcal{O}(y) \rangle$ has a singularity. Such null geodesics are found by taking the $E \to \infty$ limit of the spacelike geodesics described above. In this case it is simple to solve for the geodesics explicitly. The Penrose diagrams for two domain wall geometries, along with a few null geodesics, are sketched in Fig. 8. While the black hole singularities are drawn curved [31], on a true Penrose diagram, de Sitter origin and $\mathcal{I}^\pm$ would also bend. + +The null geodesics Fig. 8 are drawn as bouncing off the black hole singularity and de Sitter $\mathcal{I}$. This is because our null geodesics arise as a limit of spacelike geodesics, which are repelled from both the singularity and de Sitter $\mathcal{I}$. To see this, note that near the $r=0$ singularity of Schwarzschild-AdS, the geodesic equations (4.2) become + +$$ \left(\frac{dr}{d\tau_g}\right)^2 = E^2 - \frac{\mu}{r} \implies \frac{dr}{d\tau_g} = 0 \quad \text{and} \quad \frac{d^2r}{d\tau_g^2} > 0 \text{ at } r = \frac{\mu}{E^2}. \qquad (4.5) $$ + +We conclude that a spacelike geodesic is repelled by the singularity at a distance $r_{\min} = \frac{\mu}{E^2}$. Likewise, near the de Sitter $\mathcal{I}^+$ the geodesic equation becomes + +$$ \left(\frac{dr}{d\tau_g}\right)^2 = E^2 - \frac{r^2}{r_d^2} \implies \frac{dr}{d\tau_g} = 0 \quad \text{and} \quad \frac{d^2r}{d\tau_g^2} < 0 \text{ at } r = E r_d . \qquad (4.6) $$ + +So a spacelike geodesic turns around at maximum radius $r_{\max} = E r_d$. This property, that spacelike geodesics are repelled by de Sitter $\mathcal{I}^\pm$, is analogous to the fact that timelike geodesics are repelled by the timelike boundary of AdS. In the limit where the geodesic becomes null, $E \to \infty$, the geodesics simply bounce off the singularity and de Sitter $\mathcal{I}$. + +We conclude that CFT correlators will have additional singularities due to null geodesics behind the horizon, of the form + +$$ \langle \mathcal{O}(t, \Omega) \mathcal{O}(s, -\Omega) \rangle \sim \frac{1}{(s - t'(t))^{2m}}, \qquad (4.7) $$ + +where $t'(t)$ indicates the point where the null geodesic starting at $t$ re-emerges on the boundary and depends on the particulars of the geometry. The operators are at antipodal points on the sphere because any geodesic that returns back to the AdS boundary has to pass through the origin of de Sitter, where it will move to the opposite side of the sphere. + +The singularities (4.7) seen in the analytically continued correlators are not time translation invariant, rather variations in the separation $t'(t)$ and overall coefficient reveal the interesting dynamics of the de Sitter bubble in the left causal region. One particularly strong signal is as follows. The inflating geometry in Fig. 8b gives rise to two classes of null geodesics: those associated with the singularity at $t'_2(t_2)$, which reflect off the de Sitter $\mathcal{I}^+$, and those associated with $t'_1(t_1)$, which miss the de Sitter $\mathcal{I}^+$ and bounce off the black hole singularity. So one will find that the singular behavior associated with +---PAGE_BREAK--- + +$t'_1(t_1)$ is extinguished for a certain range of $t$ as the corresponding geodesic passes the junction of the de Sitter and AdS boundaries. Note that while we argued earlier that once back-reaction is included this junction is replaced by a big crunch singularity, the latter singularity does not repel geodesics. Hence the extinction of the singularity should be even more pronounced. This behavior will not be found in the analytically continued correlators corresponding to non-inflating geometries like Fig. 8a, so provides a distinct signature of inflation in the boundary CFT. + +## 4.2. From AdS/CFT to dS/CFT and beyond + +There are even more powerful probes of the inflating region available in this system, which build on analyticity. + +**Fig. 9:** Conventions for imaginary part of the time coordinate. (a) de Sitter spacetime (b) Schwarzschild-AdS geometry. + +We start by recalling a basic manipulation in the eternal Schwarzschild-AdS black hole mentioned above. Schwarzschild-AdS can be described by four static coordinate patches, whose time coordinates have different imaginary parts. In our conventions, the imaginary part of the time coordinate is shown in Fig. 9b. For example, in the Schwarzschild-AdS geometry each time we cross a horizon going counter-clockwise on the Penrose diagram, we pick up an imaginary part $-\frac{i}{4}\beta$, where $\beta$ is the inverse Hawking temperature. So if we continue time by half a Euclidean period $\beta$, $t \rightarrow t - i\beta/2$, we take a point on the right boundary of Schwarzschild-AdS to one on the left boundary. + +In the domain wall spacetimes, however, there is no left boundary. So, instead we can consider a point at large but finite $r$. Now continue time by half a period. In the thin wall approximation the metric is just Schwarzschild-AdS (assuming $r$ is not too large). So this continuation yields a point in the left quadrant at the same $r$ and $\Re(t)$. Now one +---PAGE_BREAK--- + +can move into the de Sitter region by continuing in (real) $r$. We here go beyond the thin +wall approximation and assume, as is plausible, that the domain wall is smooth. There +will be a sharp signal in a correlator as one of its points crosses the domain wall. (For +concreteness imagine a two point function where both points have been continued to near +the domain wall.) This will be a first sheet effect, visible at finite $g_s$, unlike the signal due +to subdominant geodesics discussed above. It is not impossible for a two point function +that is smooth on the AdS boundary to display a sharp signal under analytic continuation. +As an example imagine eternal Schwarzschild-AdS with an operator inserted on the left +boundary and the two correlated points on the right boundary. On continuation by half a +period the correlated points can collide with the left hand operator giving a large signal. +In fact, at finite $r$ the signal will be nonsingular, but as we move to the boundary $r \to \infty$ +a domain wall of fixed bulk width becomes sharper and sharper in boundary variables. +The boundary correlator will be singular, and possibly non-analytic. This is somewhat +analogous to the Janus solution of [61,62]. + +Once we have changed $r$ enough to enter the de Sitter region, we may cross the de +Sitter horizon by shifting the imaginary part of $t$ once more. Again, this is because in +static coordinates de Sitter is covered by four coordinate patches, whose time coordinates +have different imaginary parts – see Fig. 9a. So we may move through the de Sitter horizon +by taking $t \rightarrow t - i\beta/2 - i\beta_d/4$, where $\beta_d$ is the inverse de Sitter temperature. We should +mention that smoothing out the domain wall to a small but finite thickness will change +these Euclidean shifts by a small amount. Because the metric has bounded first derivative, +these changes are uniformly small. Thus going beyond the thin wall approximation will +not alter the basic picture we are describing. + +Having moved points past the de Sitter horizon we can now study behavior near de +Sitter future infinity by taking $r \to \infty$. If we consider a bulk scalar field on this classical +geometry we know that correlators near de Sitter future infinity give the conformally +invariant results of dS/CFT [19]. But correlators near the AdS boundary give AdS/CFT +results. We thus see that analytic continuation takes AdS/CFT correlators into dS/CFT +correlators. We will work out an explicit example of this in Appendix F, for the spacetimes +described by Coleman and de Luccia [63]. In this case one can write down explicit formulas +for boundary correlators that continue from AdS/CFT to dS/CFT. In this particular +example, however, the AdS boundary will typically be destroyed by backreaction. So the +explicit calculation in Appendix F may be thought of as a toy model for the full analytic +continuation required to go from a Schwarzschild-AdS boundary to the de Sitter boundary. + +We should be able to extend these ideas beyond the classical supergravity approx- +imation. Assuming we have a non-perturbative definition of the boundary correlators, +continuing them gives a precise description of certain aspects of quantum gravity near +what was de Sitter future infinity. In principle, this should include effects like bubble +---PAGE_BREAK--- + +nucleation of other “pocket universes” in the far future [64]. The correlators taken to the boundary are natural diffeomorphism-invariant observables. Working at finite but large $r$ introduces some scheme dependence probably related to the ambiguities in defining measures in eternal inflation. It is an interesting question to investigate which quantities have a sensible, unambiguous large $r$ limit. + +**5. Can inflation begin by tunneling?** + +After Farhi and Guth [26] established that beginning inflation classically required a past singularity, Farhi, Guth and Guven [27] (FGG) made the interesting proposal that inflation could be initiated by quantum tunneling. They computed a nonzero rate for this process using a Euclidean instanton. This rate was also derived using Hamiltonian techniques [65,66] (see also [67]). + +Roughly speaking, FGG envisioned a process where an initial state (a “buildable” state) was constructed by classical field evolution. This state would then undergo tunneling. The initial configuration would look like the bound trajectory on the left hand side of the effective potential²⁹ (Fig. 2a). It would tunnel “through the effective potential” to the unbound inflating trajectory with the same energy on the right hand side. + +A number of authors have argued that this process is not physically allowed. In particular, Banks [37] argued that since the de Sitter entropy of the inflating region is characteristically greater than the entropy of the black hole surrounding it, ideas of black hole complementarity and holography prohibit the process. Susskind [38] has given a somewhat different entropic argument that conflicts with the instanton rate. + +The picture developed in this paper allows us to give a sharp argument against FGG tunneling, at least in the AdS context. The initial buildable state is clearly obtainable by unitary quantum time evolution, and so is a pure state. The final state has an inflating region, and so by the arguments in Section 3 is a mixed state. But unitary quantum evolution cannot take a pure state to a mixed state. So this process cannot occur. In fact, no state corresponding to inflation can ever result from any pure state process. This argument is close in spirit to that of [37], since the large entropy of the de Sitter region requires that the state be mixed. But the argument presented here is more general, since there exist time asymmetric situations where the de Sitter entropy is less than the black hole entropy where the state is mixed and hence creation by any process is ruled out. + +Such a simple argument demonstrates the power of embedding a physical phenomenon in a well defined non-perturbative formalism. But it is still important to understand the loophole in the FGG argument. + +²⁹ In order to avoid singularity theorem constraints and hence be buildable the initial geometry must be (a small deformation) of the lower half of Fig. 7b. +---PAGE_BREAK--- + +## 6. Discussion + +We started our discussion by assuming the existence of the string landscape, with many de Sitter and AdS vacua. We restricted our attention to a stable (supersymmetric) AdS vacuum and a neighboring de Sitter minimum. Focusing on the low energy gravity dynamics (and choosing points on the landscape, and hence parameters, appropriately) we solved for the geometry of the system using the thin wall approximation. As expected from previous work [26,27], we found parameter domains with inflating behavior behind a black hole horizon. The stable AdS minimum should, by general arguments, be described by a boundary CFT. Excitations of the CFT should probe inflationary physics. One of our basic conclusions is that these inflating regions must be described by a mixed state, i.e. a density matrix in the CFT. Our strongest argument interpreted the static domain wall as a cutoff version of the eternal Schwarzschild-AdS black hole, a system known to be described by a mixed state. The inflating geometries will certainly be mixed if this one is. Additional degrees of freedom besides the cutoff CFT are necessary to describe the region beyond the domain wall. This mixed state description resolves an entropy puzzle because the large number of inflating degrees of freedom need not be explicitly represented in the CFT. This description raises several important questions. First, as we smoothly increase the size of the initial bubble, moving through the progression of geometries illustrated in Fig. 7, when does a mixed state become necessary for a CFT description? We have discussed several possible answers to this question but it still remains open. More generally, given a rather arbitrary mixed state in the CFT, what is its geometric interpretation? Finally, a striking aspect of this description is the necessity of using a mixed state to describe a geometry with one asymptotic region (as in the static domain wall example). Most previous examples requiring mixed state descriptions had other asymptotic (AdS) boundaries. They could be given a pure state description if all boundary degrees of freedom were kept. Mixed states resulted when some boundaries were traced over. Here we do not have an explicit representation of these extra degrees of freedom, although we know many of their properties. This provides a rather well controlled system in which to search for new, non-boundary, descriptions of non-perturbative quantum gravity. + +We described techniques for probing the inflating region, even though the degrees of freedom there were not given explicitly. These techniques relied on analyticity. The first used geodesics. These geometries have nearly null geodesics that bounce off de Sitter $I$, so they give subdominant singular contributions to certain correlators. To study them one must take $g_s$ very small, which suppresses bubble nucleation. Chaotic eternal inflation should still be visible with these probes. More generally one can continue correlators to complex time. We argued that AdS boundary correlators should continue to de Sitter boundary correlators in the classical limit. At finite $g_s$ the complex pattern of bubble nucleation and other non-perturbative processes should be visible in these continued +---PAGE_BREAK--- + +correlators, assuming we had a way of precisely computing them. + +Even without the ability to calculate the full density matrix beyond the supergravity approximation we were able to draw some general conclusions, relying only on general features of the picture we have developed. In particular we were able to argue that inflating regions could not be produced, even by quantum mechanical tunneling, in a scattering process because a pure state cannot evolve into a mixed state under Hamiltonian time evolution. More results of this general type would certainly be welcome. + +**Acknowledgments:** We would like to thank Ofer Aharony, Tom Banks, Raphael Bousso, Petr Hořava, Gary Horowitz, Shamit Kachru, Matt Lippert, Juan Maldacena, Don Marolf, Michael Peskin, Simon Ross, Yasuhiro Sekino, Eva Silverstein, Lenny Susskind, and Erik Verlinde for stimulating discussions. VH and MR would also like to thank Gary Horowitz and Sandip Trivedi for initial collaboration on related issues. VH, AM, and MR would like to acknowledge KITP and the Fields Institute for hospitality during this work. VH and MR would also like to thank the organisers of the 2005 Amsterdam String theory workshop for hospitality. This work was supported in part by the Stanford Institute for Theoretical Physics, NSF grant PHY-9870115, the Department of Energy under contract DE-AC02-76SF00515, the funds from the Berkeley Center for Theoretical Physics, DoE grant DE-AC02-05CH11231, and the NSF grant PHY-0098840. In addition, research at the Perimeter Institute is supported in part by funds from NSERC of Canada and MEDT of Ontario. RCM is further supported by an NSERC Discovery grant. + +## Appendix A. Details of the thin wall geometries + +In Section 2 we briefly outlined the construction of thin domain wall spacetimes and stated the main results for the specific case of interest, the de Sitter/Schwarzschild-AdS junction. In this appendix we will go into more detail and derive these results. As many of these results apply in a broader set-up than presented above, we first present the effective potential and extrinsic curvatures for a more general junction between two spacetimes, with two free parameters (the black hole mass and the cosmological constant) each. + +### A.1. *Effective potential and extrinsic curvatures* + +We consider $D$-dimensional metrics of the form (2.1), (2.2), with + +$$f_{\alpha}(r) = 1 - \lambda_{\alpha} r^2 - \frac{\mu_{\alpha}}{r^{D-3}}, \quad (A.1)$$ + +where $\alpha$ stands for *i* or *o*. Here $\lambda$ is related to the cosmological constant, which can have either sign, and $\mu$ to the black hole mass. (Thus for example, $\lambda_i > 0$ and $\lambda_o < 0$) +---PAGE_BREAK--- + +with generic $\mu_{\alpha} > 0$ would correspond to the Schwarzschild-de Sitter/Schwarzschild-AdS junction.) The effective potential is + +$$ +\begin{equation} +\begin{aligned} +V_{\text{eff}}(r) = & - \left[ \lambda_o + \frac{(\lambda_o - \lambda_i - \kappa^2)^2}{4 \kappa^2} \right] r^2 + 1 - \left[ \mu_o + \frac{(\lambda_o - \lambda_i - \kappa^2)(\mu_o - \mu_i)}{2 \kappa^2} \right] \frac{1}{r^{D-3}} \\ +& - \frac{(\mu_o - \mu_i)^2}{4 \kappa^2} \frac{1}{r^{2D-4}}, +\end{aligned} +\tag{A.2} +\end{equation} +$$ + +and the extrinsic curvatures are + +$$ +\begin{align*} +\beta_i(r) &= \left( \frac{\lambda_o - \lambda_i + \kappa^2}{2\kappa} \right) r + \left( \frac{\mu_o - \mu_i}{2\kappa} \right) \frac{1}{r^{D-2}} \\ +\beta_o(r) &= \left( \frac{\lambda_o - \lambda_i - \kappa^2}{2\kappa} \right) r + \left( \frac{\mu_o - \mu_i}{2\kappa} \right) \frac{1}{r^{D-2}}, +\end{align*} +\tag{A.3} +$$ + +for this choice of geometries. + +Let us consider some generic features of the effective potential (A.2). First, at small $r$, the last term dominates (provided $\mu_o \neq \mu_i$ as will be the case whenever the shell carries energy), implying $V_{\text{eff}}(r) \to -\infty$ as $r \to 0$. Hence it is always possible to have a shell which implodes to zero size. Such a shell crashes into the black hole singularity at $r=0$ in a finite proper time. + +Secondly, at large $r$, the first term in (A.2) dominates (again, provided its coefficient does not vanish). The $r^2$ coefficient may have either sign, but noting that we can re-express it as + +$$ +-\left[\lambda_o + \frac{(\lambda_o - \lambda_i - \kappa^2)^2}{4\kappa^2}\right] = -\frac{1}{4\kappa^2}[(\lambda_o + \lambda_i + \kappa^2)^2 - 4\lambda_o\lambda_i], \quad (\text{A.4}) +$$ + +we see that whenever the inside and outside cosmological constants have opposite sign, this +coefficient is necessarily negative. This means that in such cases $V_{\text{eff}}(r) \rightarrow -\infty$ as $r \rightarrow \infty$, +so that it is possible to have a shell which expands forever and (after infinite proper time) +hits the boundary of both spacetimes. For $\lambda_i > 0$ this describes inflation. + +On the other hand, if the $r^2$ coefficient (A.4) of the effective potential is positive, then +the boundary $r = \infty$ is not attainable by any shell, and a time symmetric situation is always +possible. We can similarly read off the more detailed behavior of the shell by considering +the extrinsic curvatures, as we do next in the 4-dimensional de Sitter/Schwarzschild-AdS +context. This will justify the results presented in Section 2. + +A.2. Thin wall trajectories + +Focussing now on the specific case of 4-dimensional de Sitter/Schwarzschild-AdS junc- +tion given by (2.9), the effective potential (A.2) simplifies to (2.10). To see where the shell +is allowed to appear on the appropriate Penrose diagrams, we will first consider the be- +havior in the vicinity of small and large $r$. As mentioned in Section 2, this is determined +---PAGE_BREAK--- + +by the sign of the extrinsic curvatures on the two sides of the shell. These are given by +(A.3), which for the de Sitter/Schwarzschild-AdS junction simplify to + +$$ +\beta_i(r) = \frac{\kappa^2 - \lambda - 1}{2\kappa} r + \frac{\mu}{2\kappa} \frac{1}{r^2}, \quad \beta_o(r) = -\frac{\kappa^2 + \lambda + 1}{2\kappa} r + \frac{\mu}{2\kappa} \frac{1}{r^2}. \qquad (\text{A.5}) +$$ + +Fig. 10: Allowed and disallowed types of behavior of the de Sitter/Schwarzschild-AdS domain wall at small $r$. The dotted curves depict the shell's trajectory (on (a) de Sitter and (b) Schwarzschild-AdS Penrose diagrams) and the arrows the correspond to the outward-pointing normal. The checks indicate allowed scenario while the crosses label disallowed scenario. (a) In de Sitter, the shell starting from or ending at the left origin is allowed, whereas starting/ending on the right origin is not allowed. (b) In Schwarzschild-AdS, the shell starting towards the right from the past singularity or moving towards the left before hitting the future singularity is allowed, whereas the opposite behavior is not allowed. + +Clearly, as $r \to 0$, the second term dominates, and is always positive. Hence both $\beta_i(r) \to +\infty$ and $\beta_o(r) \to +\infty$ as $r \to 0$. This means that when the shell is sufficiently small, the outward-pointed normal has to point toward larger $r$. Fig. 10 summarizes the allowed and disallowed scenarios³⁰ On the de Sitter side (where the ‘origin’ $r=0$ are the north and south poles described by the vertical dashed lines in Fig. 10a), the normals point + +³⁰ Although a complete shell trajectory is sketched in Fig. 10, only the $r \to 0$ part is of relevance – i.e., the full trajectory may or may not be allowed, based on additional constraints to be discussed later. Also, Fig. 10 is not intended to indicate the time of impact $t_\alpha(r \to 0)$ of the shell, but only the sign of $\frac{dt_\alpha}{dr}(r \to 0)$. +---PAGE_BREAK--- + +toward increasing $r$ only on the left side of the Penrose diagram. Hence the shell is allowed to hit only the left origin but not the right origin, as indicated in Fig. 10a. Similarly, for the Schwarzschild-AdS part of the spacetime, the shell can start out from the past singularity towards the right on the Penrose diagram, and hit the future singularity moving towards the left, but not vice-versa, as indicated in Fig. 10b. + +**Fig. 11:** Allowed and disallowed types of behavior of the de Sitter/Schwarzschild-AdS domain wall at large $r$. (a) In de Sitter, the shell starting from de Sitter $I^-$ towards the right or ending at $I^+$ veering left is allowed for small tension (namely $\kappa^2 < \lambda+1$); whereas the opposite behavior is allowed for large tension ($\kappa^2 > \lambda+1$). (b) In Schwarzschild-AdS, the shell starting from or ending at the left boundary is allowed, whereas the shell starting from or ending at the right boundary is not allowed. + +At large $r$, the first terms in (A.5) dominate; here the sign of $\beta_i$ depends on the shell's tension and the de Sitter cosmological constant. For $\kappa^2 < \lambda + 1$, the first term is negative, so that $\beta_i(r) \to -\infty$ as $r \to \infty$, i.e., the outward normal from the de Sitter side points towards decreasing $r$. Conversely, for large tension $\kappa^2 > \lambda + 1$, the extrinsic curvature remains positive, so the outward normal points toward larger $r$. This is indicated in Fig. 11a, by the right and left trajectories, respectively. The corresponding behavior on Schwarzschild-AdS side, sketched in Fig. 11b, is more universal: Here $\beta_o \to -\infty$ as $r \to \infty$ for all $\kappa$ and $\lambda > 0$, so that the shell can hit the left boundary but not the right boundary. + +We can now determine the full trajectory of the shell on the Penrose diagram. This requires us to solve for $R(\tau)$ using the expression for $V_{\text{eff}}$, while keeping in mind the sign of the extrinsic curvatures $\beta_\alpha$. These extrinsic curvatures are plotted along with the effective potential, for the various cases (A — E of Fig. 2) in Fig. 12. The parameters $\lambda, \mu$, and $\kappa$ +---PAGE_BREAK--- + +**Fig. 12:** Various possible effective potentials (thick, concave down curve) and extrinsic curvatures (thin, concave up curves, where $\beta_i(r) > \beta_o(r)$) describing the de Sitter/Schwarzschild-AdS junction. The specific parameters $(\lambda, \mu, \kappa)$ used were: (A1,B2) $\lambda = 0.5, \mu = 0.75, \kappa = 2$; (A2,B1) $\lambda = 1, \mu = 0.5, \kappa = 1$; (C1) $\lambda = 1, \mu = 2, \kappa = 1$; (C2) $\lambda = 1, \mu = 1, \kappa = 2$; and (D,E) $\lambda = 2, \mu = 0.89, \kappa = 1$. + +are of order unity in all cases³¹. The top two plots (labeled as cases A and B, consistently with the notation employed in Fig. 2) exemplify the situations with $V_{max} > 0$. For each potential there are two possible trajectories: (A) the shell expands from zero size and re-contracts and (B) the shell contracts from infinite size and re-expands. The middle two plots in Fig. 12 (cases C) describe a time asymmetric situation since $V_{max} < 0$. The two columns are distinguished by the sign of $\beta_i(r \to \infty)$. Finally, the bottom plot depicts a fine-tuned situation with $V_{max} = 0$. + +Let us now consider the corresponding extrinsic curvatures, which will enable us to construct the full Penrose diagrams. As we can see from Fig. 12(A1), $\beta_o$ remains positive everywhere along the A trajectory, so that the shell's turning point must lie in region I. + +³¹ The parameters chosen to plot the effective potential and extrinsic curvatures in Fig. 12 are chosen to clarify the geometrical aspects and are not necessarily in the physically interesting regime. +---PAGE_BREAK--- + +(right side) of the Schwarzschild-AdS Penrose diagram. Conversely, in Fig. 12(A2), $\beta_o$ becomes negative before $V_{\text{eff}}$ becomes positive, which implies that the shell must pass through region III rather than region I on the Schwarzschild-AdS Penrose diagram$^{32}$. Similarly, the sign of $\beta_i$ along the B trajectory distinguishes the cases (B1) and (B2). In the former case $\beta_i$ is positive, whereas in the latter it is negative. Finally, as a consistency check, one can verify that we cannot have $\beta_i$ becoming negative at smaller $r$ then where $V_{\text{eff}}$ becomes positive, or $\beta_o$ becoming negative at larger $r$ then where $V_{\text{eff}}$ becomes negative again$^{33}$. + +A few remarks are in order. First, where exactly the shell passes with respect to the bifurcation point of the horizons depends on the details of the set-up, which distinguishes the cases A1 from A2, or B1 from B2, but which are not drawn separately in Fig. 6; i.e., (A) of Fig. 6 would strictly speaking correspond only to (A1) of Fig. 4, etc.. Second, note that the Penrose diagrams corresponding to the time reverse cases C' and E' (in the notation of Fig. 2) would be obtained by vertically flipping the diagrams (C) and (E), respectively. + +## Appendix B. False vacuum bubbles in scalar-gravity systems + +First, we will briefly review the general formalism for obtaining a full spacetime by using the initial-value (3 + 1) formulation of GR. Given an initial slice, the induced 3-metric, the corresponding extrinsic curvature, and the initial values and velocities of the matter fields, Einstein's equations split into two equations describing the evolution and imposing constraints on the initial data. We are interested in gravity coupled to a scalar field $\phi$ with a given potential $V(\phi)$. The action for the system will be taken to be: + +$$ S = \int \sqrt{-g} \left[ \frac{1}{2} R - \frac{1}{2} (\nabla \phi)^2 - V(\phi) \right] . \qquad (B.1) $$ + +$^{32}$ Note that for (A2), since the trajectory on the Schwarzschild-AdS diagram must still bend towards the left while passing through the left region, we would expect that this case can occur for much smaller region in the parameter space than the more typical case (A1). Indeed, we confirm that this is the case by plotting the potentials as in Fig. 12: we find that it is much harder to obtain $V_{\text{max}} > 0$ with the $\beta_o$ intercept occurring at smaller $r$ than the $V_{\text{eff}}$ intercept. + +$^{33}$ In particular, denoting the zero-intercepts of the extrinsic curvatures by $r_{\beta_i}$ and $r_{\beta_o}$, we find that $V'_{\text{eff}}(r_{\beta_i}) = -2\lambda (\frac{\mu}{1+\lambda-\kappa^2})^{1/3} < 0$, while $V'_{\text{eff}}(r_{\beta_o}) = (3+\lambda+\kappa^2)(\frac{\mu}{1+\lambda+\kappa^2})^{1/3} > 0$. This automatically implies that if $V_{\text{max}} > 0$, then $V_{\text{eff}}(r_{\beta_i}) > 0$ and $V_{\text{eff}}(r_{\beta_o}) < 0$. Finally, by monotonicity of the extrinsic curvatures, this implies that the $\beta_i$ intercept occurs at larger $r$ than where $V_{\text{eff}}$ first becomes positive, and similarly that the $\beta_o$ intercept occurs at smaller $r$ than where $V_{\text{eff}}$ becomes negative again. +---PAGE_BREAK--- + +Consider for concreteness a 4-dimensional, spherically symmetric spacetime, with a +3-dimensional spacelike initial slice $\Sigma_0$. Further assume the metric on $\Sigma_0$ + +$$ds^2 = \frac{dr^2}{1 - \frac{2m(r)}{r}} + r^2 (d\theta^2 + \sin^2\theta d\phi^2). \qquad (\text{B.2})$$ + +The unit normal to the surface $\Sigma_0$ will be taken to be $(\frac{\partial}{\partial t})^a$. To describe how this hy- +persurface fits into the full spacetime, we have to prescribe the extrinsic curvature. For +simplicity, we will choose the extrinsic curvature to be proportional to the induced metric +on the Cauchy slice, + +$$K_{ab} = h(r) g_{ab} . \qquad (B.3)$$ + +Furthermore, we have a scalar field $\phi$ with a potential $V(\phi)$. We denote the radial variation of $\phi$ by $\phi'$, and the time evolution by $\dot{\phi}$. Specification of initial data will involve picking appropriate functions for $\phi(r)$, $\dot{\phi}(r)$ and $h(r)$. The constraint equations are + +$$\dot{\phi}\phi' = -2h' \qquad (B.4)$$ + +and + +$$2 m' + m r \phi'^2 = \frac{r^2}{2} \left( \dot{\phi}^2 + \phi'^2 + 2V - 6h^2 \right) . \qquad (\text{B.5})$$ + +Substituting (B.4) into (B.5) we find + +$$2 m(r) = \int_{0}^{r} \hat{r}^{2} \left( V - 3h^{2} + \frac{2h'^{2}}{\phi'^{2}} + \frac{1}{2}\phi'^{2} \right) e^{-\frac{1}{2} \int_{\hat{r}}^{r} \bar{r} \phi'^{2} d\bar{r}} d\hat{r} . \quad (\text{B.6})$$ + +Thus specifying the initial field profile $\phi(r)$ in a given potential $V(\phi)$ determines the mass function $m(r)$ on $\Sigma_0$ and thus the metric. Note that we can then find the total ADM mass in the usual way, by considering $m$ at large $r$ and possibly adding an appropriate counter-term. + +We will now motivate the procedure for picking the correct initial data in a scalar-gravity system, which is guaranteed to have the desired features, de Sitter $\mathcal{I}$ and AdS boundary. + +We first make a trivial remark about causality. If we take any spacetime $(M, g_{ab})$ +and consider some spacelike a-chronal (and possibly compact) surface $\Sigma$ and impose the +corresponding induced metric $h_{ab}$ and extrinsic curvature $K_{ab}$ on $\Sigma$ as our initial conditions, +then within the domain of dependence of $\Sigma$, we are guaranteed to evolve to the spacetime +$g_{ab}$. +---PAGE_BREAK--- + +**Fig. 13:** Initial slices on a de Sitter Penrose diagram which are guaranteed to contain a piece of de Sitter $\mathcal{I}^+$ in their evolution. (The vertical lines are the poles $r=0$ of de Sitter, the dashed diagonal lines are the cosmological horizons $r=r_d$. Horizontal slices represent 3-spheres, with their equator along the vertical midpoint of the diagram.) + +This implies that if we want to consider (a piece of) initial data which is guaranteed to evolve to a de Sitter $\mathcal{I}^+$, we must take compatible initial conditions on a large enough initial surface (which spans the entire past domain of influence of the desired piece of de Sitter $\mathcal{I}$). This is illustrated on the de Sitter Penrose diagram in Fig. 13. If we wish to take the initial surface in a time-symmetric fashion (i.e. $\Sigma_0$ at $t=0$), then we must take $\Sigma_0$ to cover more than half of the sphere, in which case the radial coordinate is not monotonically increasing along the entire slice. On the other hand, if we consider a slice at some later time $t$, say $\Sigma_t$, we can easily contain the same piece of $\mathcal{I}^+$ under evolution, while $\Sigma_t$ covers only a small part of the sphere and therefore has increasing radial coordinate. This makes the formalism sketched above applicable; in particular, we can use (B.6) to find the mass. Moreover, in a domain wall construction, if we imagine setting up a (non-static) domain wall to pass at the right end of $\Sigma_t$ in Fig. 13, we no longer have the requirement that the de Sitter size has to be bigger than that of the black hole: all we need to satisfy is that $R(t) > r_d$ and $R(t) > r_+$ at the time $t$. Such constructions are discussed in detail in Appendix C. + +Let us now see more explicitly how to construct a spacetime containing both de Sitter and AdS using a scalar field in a potential. We use the potential $V(\phi)$ as sketched in Fig. 14a. Note that if the field $\phi(r)$ were homogeneous and sat in the positive local minimum of the potential, namely $\phi(r)=0$, the spacetime would correspond to a static, spherically symmetric spacetime with a positive cosmological constant, namely de Sitter. Likewise, if the field sat in the negative local minimum of the potential, $\phi(r) = \phi_1$, the spacetime would correspond to static, spherically symmetric spacetime with a negative +---PAGE_BREAK--- + +**Fig. 14: (a) The potential $V(\phi)$ and (b) an initial profile of the field $\phi(r)$.* + +cosmological constant, namely AdS or Schwarzschild-AdS, depending on the mass. + +To interpolate between de Sitter and Schwarzschild-AdS, consider the field profile as sketched in Fig. 14b, with $\dot{\phi} = 0$ in the regions $r < R_0$ and $r > R_1$. For $r < R_0$, the field sits in the positive local minimum of the potential, so that in the domain of dependence of $r < R_0$, the spacetime corresponds to a part of the de Sitter spacetime. By arranging the corresponding initial surface $\Sigma_t$ to be late enough, this will include a part of the de Sitter $I^+$, as in Fig. 13. Similarly, for $r > R_1$, the field sits in³⁴ the negative local minimum of the potential, corresponding to a part of Schwarzschild-AdS spacetime. + +On a spacetime diagram, the full evolution might look like the one sketched in Fig. 15. Inside the domain of dependence of $r < R_0$ (left wedge), we obtain de Sitter $I^+$. Similarly, inside the domain of dependence of $r > R_1$ (right wedge), we have Schwarzschild-AdS spacetime. Depending on the mass (in part determined by the size of the de Sitter region), the evolution may or may not reach a singularity. We note in passing that if there is a singularity in this region, as assumed in Fig. 15, then it will be of the Schwarzschild-AdS type, i.e., spacelike geodesics will bounce off the singularity. We also note that in order to have $r$ increasing monotonically on $\Sigma_t$, we cannot have $\Sigma_t$ go through region 3 of Schwarzschild-AdS; this implies that in Fig. 15 there can be future or past Schwarzschild-AdS singularity, + +³⁴ There is a slight subtlety: if we require the profile $\phi(r)$ to be an analytic function of $r$, there has to be a slight deviation from $\phi$ being exactly constant in the regions $r < R_0$ and $r > R_1$. For de Sitter, this deviation does not change our arguments, since de Sitter is stable. On the other hand, the AdS minimum is more sensitive: if a homogeneous field is slightly off the minimum, it will big crunch. However, here we don't have homogeneous fields, and we can tune the deviation from the minimum to be infinitesimal, which implies by the well-posedness of the initial value formulation in general relativity that the resultant spacetime should be arbitrarily close to Schwarzschild-AdS. +---PAGE_BREAK--- + +**Fig. 15:** Sketch of possible causal diagram for spacetime evolved from the initial data of Fig. 14. + +but not both. The regions marked by ‘?’ in Fig. 15 refers to the domain of dependence of the interpolating region $r \in [R_0, R_1]$ in the scalar profile Fig. 14b, for which the evolution needs to be determined numerically. + +The above arguments illustrate that, at least within the realm of classical general relativity coupled to a scalar field, we can obtain de Sitter $\mathcal{I}$ within an asymptotically AdS spacetime. + +## Appendix C. Construction allowing de Sitter $\mathcal{I}$ and $r_d < r_+$ + +In this Appendix, we build on the set-up introduced in Appendix B to give a rigorous construction of spacetimes with de Sitter entropy smaller than the black hole entropy. We have indicated in Section 2 that for time asymmetric configurations it is possible to obtain $r_d$ both larger and smaller than $r_+$, depending on the parameters. In this Appendix we will demonstrate that in fact, geometries with $r_d < r_+$ are not only possible, but indeed required, if the initial slice $\Sigma$ has monotonically increasing $r$ and the de Sitter $\mathcal{I}$ is guaranteed by evolution from $\Sigma$. + +Consider the thin domain wall spacetimes separating de Sitter and Schwarzschild-AdS, compatible with an initial surface $\Sigma$ on which $r$ increases monotonically. Recall from (A.5) that both the de Sitter and the Schwarzschild-AdS extrinsic curvatures are positive for small $r$ and negative for large $r$ (though $\beta_i(r \to \infty)$ depends on $\kappa$; but this will not affect our discussion). Moreover, since $\beta_i(r) > \beta_o(r)$ for all $r$ (which follows from that requirement that the bubble have positive energy), the respective radii $r_{\beta_i}$ and $r_{\beta_o}$ where the de Sitter and Schwarzschild-AdS extrinsic curvatures vanish must be related as $r_{\beta_i} > r_{\beta_o}$. We will now spell out the sequence of steps to determine the Penrose diagram: + +* To have an initial slice $\Sigma$ with monotonic $r$, the shell cannot be confined entirely to the right wedge of de Sitter. Likewise, it cannot be confined entirely to the left wedge of Schwarzschild-AdS. More specifically, $\Sigma$ cannot enter into these regions. This is because +---PAGE_BREAK--- + +we want $r$ to increase towards the right on the initial slice, whereas it would necessarily +increase towards the left in the above regions. + +* To ensure de Sitter $I^+$ by causality arguments, the shell must end up at $r = \infty$. Evidently, the shell's trajectory must intersect the boundary of the past domain of influence of the desired piece of $I^+$; this implies that it must pass through the upper and/or right wedge of de Sitter. But since $\Sigma$ cannot enter into the right wedge of de Sitter, the shell must pass through the upper wedge. This ensures that the shell must end up at $I^+$ since it follows a timelike trajectory. + +* In order for the shell to end up at $r = \infty$, it must end up in the left wedge of Schwarzschild-AdS (and in the upper wedge of de Sitter, as explained in the previous point). Since $r$ is continuous across the shell, the shell must end on one of the AdS boundaries. The negativity of the AdS extrinsic curvature at large $r$ implies that the shell cannot end up at the right boundary, since this would require the extrinsic curvature to be positive (the outward normal would necessarily have to point to larger $r$). + +* In order for the shell to end up in the left wedge of Schwarzschild-AdS, it cannot pass through the right wedge of Schwarzschild-AdS. This is implied simply by the fact that the shell follows a timelike trajectory. + +* The first and the last points imply that the shell must pass through the lower wedge of Schwarzschild-AdS. In fact, since the shell follows a timelike trajectory, this further implies that it must start in the lower wedge of Schwarzschild-AdS, i.e., at the singularity, $r=0$. Note that this requires that the effective potential for the domain wall motion be negative semi-definite, $V_{\text{eff}}(r) < 0$ for all $r$, which is easy to achieve for suitable choices of parameters. + +* Finally, in order for the shell to start at $r=0$ and therefore the de Sitter extrinsic curvature be positive, it must start in the left wedge of de Sitter. This is again implied by the incompatibility of extrinsic curvature with starting at the origin in the right wedge of de Sitter: for any such scenario, the de Sitter extrinsic curvature would have to be negative, since the outward normal would point toward decreasing $r$. + +Thus far, we have established that from the de Sitter point of view, the shell must start on the left origin and end on the upper $I^+$; whereas from the Schwarzschild-AdS point of view, it must start at the past singularity and end up on the left boundary. To refine this picture, we can use the facts about the extrinsic curvatures further to say that the shell must start out going to the right and end up going to the left on the Penrose diagram. The complete trajectory is illustrated in Fig. 16. + +Finally, if we combine these on a single Penrose diagram and draw in the initial surface +\Sigma, the resulting diagram would look like Fig. 17. Note that the de Sitter horizon crosses +the shell's trajectory lower (i.e., at smaller r) than the black hole horizon, which implies +that r_d < r_+. Also note that on Fig. 16, the initial slice Σ would cut the de Sitter Penrose +---PAGE_BREAK--- + +**Fig. 16:** The domain wall trajectory on de Sitter and Schwarzschild-AdS Penrose diagrams which is necessitated by the type of set-up specified above: and initial slice $\Sigma$ with monotonically increasing $r$, and a guarantee of $dS I^+$ under evolution. + +**Fig. 17:** Initial slice $\Sigma$ with monotonically increasing $r$ on a combined Penrose diagram (obtained by taking the left part of $dS$ and right part of S-AdS in Fig. 16). The shaded region is uncertain under a reasonable evolution. + +diagram in the upper wedge whereas it would cut the Schwarzschild-AdS Penrose diagram +in the lower wedge. While $\Sigma$ in Fig. 17 has $r$ monotonically increasing, we this property +does not continue to hold for all later spacelike slices which would cover the full Penrose +diagram. In this sense, Fig. 15 is somewhat misleading: $r$ is not a good coordinate globally. + +¿From the above arguments we see that Fig. 16 represents the only self-consistent set- +up with thin domain wall, and initial slice with monotonically increasing $r$ and a guaranteed +piece of $dS$ scri under evolution. Explicitly, in order for $r$ to increase monotonically on +our initial data slice $\Sigma$, we must have $R_1 < r_+$. On the other hand, to guarantee that +the domain of dependence of the part of $\Sigma$ with $r < R_0$ includes a part of $dS$ scri, we +must have $R_0 > r_d$. Since according to our setup, $R_0 < R_1$, the two conditions together +guarantee that the de Sitter radius (and therefore its entropy) is smaller than that of the +---PAGE_BREAK--- + +black hole. + +Although the above argument has been phrased in the thin wall context, it actually applies in general: assuming $r$ is monotonic on the Cauchy slice, in order to guarantee de Sitter $I^+$ within the domain of dependence of our Cauchy slice, we require $r_d < r_+.$ + +## Appendix D. Computation of dS-SAdS Propagators + +This appendix contains the computation of correlators on the cutoff surface $r_c$, with a dS region behind a domain wall at $R_t$. For simplicity, we will take $r_c$ and $R_t$ to be much larger than $r_+$ and $r_A = 1$ and much smaller than the de Sitter radius $r_d$. We will also consider just a massless field, although similar computations apply to massive fields. We will restrict our attention to Euclidean space propagators. + +In this geometry, the geodesic length between two points $(r_1, \Omega_1)$, $(r_2, \Omega_2)$ can be evaluated in the large $r$ limit + +$$L \sim \ln r_1 r_2 + \ln \left( \frac{\sin^2(\Omega_1 - \Omega_2)}{2} + \frac{1}{r_1^2} + \frac{1}{r_2^2} \right) + \dots \qquad (\text{D.1})$$ + +We are neglecting here terms that vanish as powers of $r_+/r$, as well as higher powers of $1/r$. So the propagator of a massless field ($\Delta = 3$) in AdS$_4$ between a point $(r_c, \Omega)$ and $(R_t, \Omega')$ becomes, since $R_t \gg r_c$, + +$$e^{-\Delta L} \sim \frac{1}{(r_c R_t)^3 (\sin^2(\Omega - \Omega')/2 + r_c^{-2})^3}. \qquad (\text{D.2})$$ + +We also need the propagator of a massless field in dS. In the limit where the dS radius is large, this reduces to the flat space formula. For two points on the domain wall $(R_t, \Omega'_1)$, $(R_t, \Omega'_2)$, this is + +$$\frac{1}{L^2} \sim \frac{1}{R_t^2 \sin^2(\Omega'_1 - \Omega'_2)/2}. \qquad (\text{D.3})$$ + +So the contribution to a two point function between points $(r_c, \Omega_1)$ and $(r_c, \Omega_2)$ is found by multiplying two AdS and one dS propagator together, and integrating over points $\Omega'_1$ and $\Omega'_2$ on the $R_t$ surface. This gives + +$$A(\Omega_1, \Omega_2) = \frac{1}{r_c^6 R_t^2} \int \frac{d^3 \Omega'_1 d^3 \Omega'_2}{\sin^2(\Omega'_1 - \Omega'_2)/2 (\sin^2(\Omega_1 - \Omega'_1)/2 + r_c^{-2})^3 (\sin^2(\Omega_2 - \Omega'_2)/2 + r_c^{-2})^3} \quad (\text{D.4})$$ + +Note that we've pulled a power of $R_t^6$ out from the volume element on the two 3-spheres, so here $\Omega$ denotes a point on the unit size 3-sphere. + +It is straightforward to estimate this integral. The term in the denominator, $(\sin^2(\Omega - \Omega')/2 + r_c^{-2})^{-3}$, is roughly $r_c^6$ near $\Omega = \Omega'$ and order one for points where $\Omega$ is far from +---PAGE_BREAK--- + +Ω'. The section of the Ω' 3-sphere where this term goes like $r_c^6$ has area $r_c^{-3}$. The full six dimensional integral over this domain gives a contribution of order $r_c^6$. So the order one piece will be subleading when $r_c \gg 1$, and the integral can be approximated as + +$$A(\Omega_1, \Omega_2) \sim \frac{1}{r_c^6 R_t^2} \left( \frac{r_c^6}{\sin^2(\Omega_1 - \Omega_2)/2} + \dots \right) \sim \frac{1}{R_t^2 \sin^2(\Omega_1 - \Omega_2)/2}. \quad (D.5)$$ + +We wish to compare this to the standard AdS massless propagator + +$$A_{\text{AdS}}(\Omega_1, \Omega_2) \sim \frac{1}{r_c^6 \sin^6(\Omega - \Omega')/2}. \qquad (\text{D.6})$$ + +We conclude that $A$ can be safely ignored when + +$$R_t \gg r_c^3. \qquad (D.7)$$ + +When $R_t$ is less than $r_c^3$ the new contribution to the propagator represents a non-local contribution to the two point functions, which scales as $L^{-2}$ rather than $L^{-6}$, where $L \sim \sin(\Omega_1 - \Omega_2)/2$ is the proper length on the sphere. This term becomes important for length scales longer than $L > \sqrt{R_t r_a^4/r_c^3}$, i.e. energy scales below $\sqrt{r_c^2/R_t r_a^4}$. + +## Appendix E. A pure state description of spacetimes with causally disconnected regions? + +In section 4, one of the criteria we proposed for the boundary field theory to be in a mixed state was that the spacetime geometry have regions that are causally disconnected from the boundary. However, in some special cases we can construct spacetimes containing a causally inaccessible region which nevertheless appear to have a pure state description in the CFT. In this section we will describe two examples of this. In both cases, a subset of CFT correlators are insensitive to the causally disconnected region and act as though they are in a mixed state. + +**Fig. 18:** Sketch of possible causal diagram for collapse spacetime created by an imploding null shell in AdS. Here the spacetime inside the shell is pure AdS. +---PAGE_BREAK--- + +The simplest example is a spherically symmetric null shell of energy which is launched +from the boundary at an instant of time, say $t = 0$. For simplicity we construct a time- +symmetric geometry, so the shell is launched towards the future and the past, as shown in +Fig. 18. The overall picture, then, is of a null shell expanding out to the boundary, reflecting +off, and collapsing back in. Since the usual AdS boundary conditions are reflecting, such +a configuration can be constructed as a solution of the source-free equations of motion. + +The overall structure of the spacetime depends on the mass of the shell. If the col- +lapsing shell is massive enough to make a stable black hole in AdS (one with $r_+ > r_A$ so as +to be stable in the canonical ensemble), then the spacetime has a region which is causally +disconnected from the boundary, as shown in Fig. 18. By making the mass of the shell +large, we can make the inaccessible region large in AdS units. + +To prove the existence of an inaccessible region, note that inside the null shell the metric is pure AdS, while outside it is Schwarzschild-AdS. If light rays emitted radially from the origin at $t=0$ escape out to the boundary, every other point in the spacetime will be accessible to the boundary. By “accessible,” we mean that every bulk point is either in the future lightcone or the past lightcone of at least one boundary point. On the other hand, if radial light rays launched from $t=0, r=0$ end in the singularity, there exists a spacetime region which is causally disconnected from the boundary. The light ray of interest propagates in pure AdS inside the shell, so we only need to know the behavior of null geodesics in AdS to infer causal accessibility. + +The question therefore is whether the radius $r_0$ at which the light ray crosses the shell +is bigger or smaller than the Schwarzschild radius of the black hole. If it is smaller, then +the light ray must match onto a point inside the horizon and end in the singularity. It +turns out that the radius $r_0$ at which the light ray intersects the shell is precisely the AdS +radius $r_A$. So anytime we form a stable black hole with $r_+ > r_A$ the light rays from the +origin will be unable to escape to the boundary and we have an inaccessible region. To +prove that $r_0 = r_A$, it suffices to note that in pure AdS spacetime two radial null geodesics, +one originating at the origin $r=0$ and the other at the boundary meet precisely at $r=r_A$. +It is easy to check that for pure AdS, $r_{ray}(t) = r_A \tan(t/r_A)$ and $r_{shell}(t) = r_A \cot(t/r_A)$, +respectively, which intersect at $r=r_A$. As an aside, it is possible to show that time +symmetric collapse geometries with $r_+ > r_A$ always contain a region causally disconnected +from the AdS boundary. + +There are strong reasons to believe that this solution can correspond to a pure state +in the CFT. On the time symmetric slice, the shell solution differs from pure AdS only +by the excitation of some massless fields near the boundary. We can think of creating +the state by acting on the vacuum with some bulk operators near the boundary. Since +bulk operators near the boundary are easy to map to the field theory, in the CFT we +should also be able to obtain this state by acting with an operator on the vacuum, which +---PAGE_BREAK--- + +manifestly gives a pure state. We can imagine exciting the massless fields in the bulk by +the insertion of an operator $\mathcal{S}$ in the boundary at time $t = 0$; the geometry in Fig. 18 +corresponds to a state $\mathcal{S}|0\rangle$ of the field theory. For example, we can construct a null shell +with the desired properties by a dilaton wave packet in the bulk, which in the boundary +theory corresponds to the choice + +$$ \mathcal{S} = \exp \left( i \int d^3 \vec{x} dt j(t) \operatorname{Tr} (F_{\mu\nu}(x,t) F^{\mu\nu}(x,t)) \right). \qquad (\text{E.1}) $$ + +By picking the support $j(t)$ we can ensure that we have a configuration with null shell that +is launched symmetrically away from the boundary and localized in time near $t = 0$. + +Although the field theory is in a pure state, the most obvious field theory operators are +insensitive to physics in the inaccessible region, suggesting a mixed state. At the level of +bulk field theory, the insertion of unitary operators $\mathcal{U}$ with support only in the inaccessible +region will not affect spacelike separated operators. Consider the bulk correlation functions +(see Fig. 18): + +$$ \langle 0 | S^\dagger O_1 O_2 \cdots O_n S | 0 \rangle \quad \text{and} \quad \langle 0 | S^\dagger U^\dagger O_1 O_2 \cdots O_n U S | 0 \rangle \quad (\text{E.2}) $$ + +If $\mathcal{U}$ is unitary and spacelike separated from all the $O_n$, bulk locality guarantees that $[\mathcal{U}, O_n] = 0$; hence the two correlation functions in (E.2) are identical (using $\mathcal{U}^\dagger \mathcal{U} = 1$). Taking the operators $O_n$ out to the boundary we can recover boundary correlation functions using the usual AdS/CFT rules. So boundary correlators which are obtained as limits of bulk correlators are not sensitive to excitations in the causally inaccessible region. (Note that this argument is specific to insertion of unitary operators in the causally inaccessible region; we can not reach the same conclusion with Hermitian operators.) Thus correlation functions of local operators can be calculated by explicitly tracing over the inaccessible region, implying that they are evaluated in a mixed state in the boundary description. + +Nonlocal operators, such as Wilson loops, appear to be able to detect the physics of +the inaccessible region. In fact, since the $t=0$ slice is identical to pure AdS except for +excitations at infinity, one expects that the holographic mapping is unchanged and so large +Wilson loops should be able probe the “inaccessible” region³⁵. The CFT would have to +have the following strange property. In the state $S|0\rangle$, the bulk excitations corresponding +to $\mathcal{U}$ should presumably be captured by excitations of nonlocal operators. The strange +feature of these excitations is that they cannot affect any correlation function of local +gauge-invariant operators at any time. It is unclear whether the field theory needs to have + +³⁵ While the choice of time slicing is not unique, one expects to be able to map bulk operators to CFT operators on each time slice. +---PAGE_BREAK--- + +this property exactly, since our argument relies on bulk locality and thus may only be +approximately true. If we were to focus only on correlation functions of local operators in +the CFT, we would see a mixed state which describes physics outside the horizon. + +A second example of a spacetime with causally disconnected regions that appears to be described by a pure state involves the “Swedish Geons” of [68,69,70,28,71,72]. These spacetimes are orbifolds of AdS$_3$, which have multiple boundaries and future and past singularities, much like the BTZ black hole. However, unlike the BTZ black hole, these geometries have regions in the interior that are causally disconnected from the all of the boundaries. In fact, the casual wedge associated to each boundary is metrically identical to the casual wedge of the BTZ black hole. There is a reasonably straightforward construction of a pure state in these spacetimes, following the Hartle-Hawking construction of [28,71]. These spacetimes have a simple Euclidean section, whose boundary is a higher genus Riemann surface. The Hartle-Hawking state, which is a pure state entangling degrees of freedom living on the various boundaries, is found by performing a path integral over this Riemann surface. The arguments above involving unitary operators will apply to these spacetimes as well. + +**Appendix F. Analyticity in Coleman-de Luccia spacetimes** + +As an example of continuing correlators from the AdS to de Sitter boundaries, it is +useful to consider the relatively simple example of Coleman and de Luccia [63]. Although +once backreaction is included the AdS boundaries are removed, if one ignores backreaction +this simple geometry is an instructive toy model where many calculations can be done +explicitly. We will focus on the three dimensional case, but most of our formulas are easily +be extended to higher dimensional cases. We start with the Euclidean Coleman-De Luccia +geometry + +$$ds^2 = d\rho^2 + f(\rho) d\Omega_2^2, \qquad (\text{F.1})$$ + +where $\rho$ runs from 0 to $\pi r_d$. The geometry approaches Euclidean AdS (the hyperboloid) as $\rho \to 0$, and Euclidean de Sitter (the sphere) $\rho \to \pi r_d$. So + +$$f(\rho) = \begin{cases} r_a^2 \sinh^2 \frac{\rho}{r_a}, & \text{as } \rho \to 0 \\ r_d^2 \sin^2 \frac{\rho}{r_d}, & \text{as } \rho \to \pi r_d \end{cases} \qquad (\text{F.2})$$ + +where $r_a$ and $r_d$ denote the AdS and de Sitter radii, respectively. The form of $f(\rho)$ depends on the profile of the domain wall. In the thin wall approximation, $f$ is found by matching the two asymptotic functions (F.2) at an intermediate value of $\rho$. In actuality, $f(\rho)$ is a complicated function obeying (F.2), whose exact form depends on the details of the domain wall. +---PAGE_BREAK--- + +If we Wick rotate one of the angles of the sphere, so that + +$$ +d\Omega_2^2 = dt_E^2 + \cos^2 t_E d\theta^2 \rightarrow -dt^2 + \cosh^2 t d\theta^2, \quad (F.3) +$$ + +becomes the metric on a 2 dimensional de Sitter space, then (F.1) describes a Lorentzian geometry interpolating between AdS and de Sitter. Note that when we apply (F.3) to (F.1), we obtain AdS and de Sitter coordinate patches whose constant $\rho$ slices are copies of $dS_2$. These patches cover only a portion of the full geometry, as we will describe below. + +We can now evaluate the correlators of a quantum field $\phi$ in this background. The geometry (F.1) is non-singular, so can be used to define a Euclidean vacuum state, in which correlators are defined by Wick rotation from Euclidean correlators on (F.1). In the limit where the mass of the scalar field is large, these Euclidean two point functions may be evaluated in position space as + +$$ +\langle \phi(x_1) \phi(x_2) \rangle \sim e^{-m \mathcal{L}(x_1, x_2)}, \qquad (\text{F.4}) +$$ + +where $\mathcal{L}(x_1, x_2)$ is the proper length of the geodesic between $x_1$ and $x_2$. In the AdS and de Sitter limits given by (F.2) this geodesic length is + +$$ +\mathcal{L} = \begin{cases} +\cosh^{-1} \left( r_a^2 \left( \cosh \frac{\rho_1}{r_a} \cosh \frac{\rho_2}{r_a} - \sinh \frac{\rho_1}{r_a} \sinh \frac{\rho_2}{r_a} \cos \ell_2 \right) \right), & \text{as } \rho_1, \rho_2 \to 0 \\ +\cos^{-1} \left( r_d^2 \left( \cos \frac{\rho_1}{r_d} \cos \frac{\rho_2}{r_d} + \sin \frac{\rho_1}{r_d} \sin \frac{\rho_2}{r_d} \cos \ell_2 \right) \right), & \text{as } \rho_1, \rho_2 \to \pi r_d +\end{cases} +\tag{F.5} +$$ + +where $\ell_2$ is the angular separation on the 2 sphere. In the interior, + +$$ +\mathcal{L} = \int_{\rho_1}^{\rho_2} \frac{d\rho}{\sqrt{1 - L^2/f^2}}, \qquad (\text{F.6}) +$$ + +where *L* is the conserved angular momentum of a geodesic, + +$$ +L^2 = f^4 \left( \dot{t}^2 + \cos^2 t \dot{\theta}^2 \right), \qquad (\text{F.7}) +$$ + +which is related to the $\ell_2$ by + +$$ +\ell_2 = \int \frac{d\rho}{f\sqrt{f^2/L^2 - 1}} . \qquad (\text{F.8}) +$$ + +The expression (F.6) is an analytic function as one moves from the AdS to the de Sitter regions, since the matching function $f(\rho)$ is analytic. So it allows us analytically continue correlators through the domain wall. In fact, for particular forms of $f(\rho)$, we can even continue correlators from the AdS boundary to the de Sitter $\mathcal{I}$. +---PAGE_BREAK--- + +This requires an extra step, since the Wick rotation (F.3) of the metric (F.1) gives us +only certain patches of AdS$_3$ and dS$_3$, respectively, whose constant $\rho$ slices are copies of +dS$_2$. Thus to go from the patch of AdS$_3$ containing the domain wall to another patch that +contains the asymptotic AdS boundary we must analytically continue in both $t$ and $\rho$. To +see this, recall that AdS$_3$ may be written in terms of de Sitter slices as + +$$ds^2 = d\rho^2 + r_a^2 \sinh^2 \frac{\rho}{r_a} (-dt^2 + \cosh^2 t \, d\theta^2). \qquad (\text{F.9})$$ + +The size of the de Sitter slices, $r_a^2 \sinh^2 \rho/r_a$, goes to zero as $\rho \to 0$, indicating that the +dS$_2$ slices are becoming null. One may continue across this horizon to a patch foliated by +hyperbolic slices + +$$ds^2 = -d\rho'^2 + r_a^2 \sin^2 \frac{\rho'}{r_a} (dt'^2 + \sinh^2 t' \, d\theta'^2) \qquad (\text{F.10})$$ + +by taking $\rho \to \rho' = i\rho$ and $t \to t' = t + i\pi/2$; note that the shift in $t$ is necessary to +keep $d\theta^2$ spacelike$^{36}$. The dS$_2$ slice that becomes null as $\rho \to 0$ matches onto an $H_2$ slice +that becomes null as $\rho' \to 0$. Then one can evolve forward in $\rho'$ until the hyperbolic slice +shrinks again to zero size as $\rho' \to \pi r_a$. One then crosses this horizon by a similar analytic +continuation to find a second coordinate system of the form (F.9). This second patch is +related to the original coordinate patch by $t \to t'' = t + i\pi$ and $\rho \to \rho'' = \rho + i\pi r_a$. The +$(\rho'', t'')$ patch contains an asymptotic AdS boundary at $\rho'' \to \infty$. + +Since constant $\rho''$ slices are copies of dS$_2$, the AdS/CFT correspondence in these coor- +dinates yields a dual boundary CFT living on dS$_2$; this is in contrast to the more familiar +examples of the sphere or the plane in global and Poincaré coordinates, respectively. + +Of course, as mentioned above, when we go beyond the thin wall approximation (as is +necessary to obtain analytic correlators) the geometry will typically develop a big crunch +singularity before one can reach the AdS boundary. This is because the patch (F.10) +has a surface of infinite blueshift at $\rho' \to \pi r_a$, so any matter present will cause a strong +backreaction. However, there certainly exist choices of analytic function $f(\rho)$ where this is +not the case. In these cases the metric is completely smooth in all of the patches described +above. Although such an $f(\rho)$ will not typically solve the appropriate equations of motion, +this case still presents an interesting toy model where analytic continuation can be studied +explicitly. We should emphasize that in the full de Sitter-Schwarzschild-AdS spacetimes +the AdS boundary to the right of the black hole horizon is not removed by backreaction. +So an analytic continuation similar to the one described here should apply. + +To go from the $\rho \to \pi r_d$ region of the de Sitter patch of dS$_3$ out to $\mathcal{I}^+$ we must +analytically continue in both $t$ and $\rho$. In particular, the patch of dS$_3$ foliated by copies of + +36 In this section we will be careful to label the various $(\rho, t)$ coordinates in different patches of the spacetime by primes to emphasize that they describe different coordinate systems. +---PAGE_BREAK--- + +$dS_2$, with + +$$ds^2 = d\rho^2 + r_d^2 \sin^2 \frac{\rho}{r_d} (-dt^2 + \cosh^2 t \, d\theta^2) , \quad (F.11)$$ + +has a horizon at $\rho \to \pi r_d$. By taking $t \to t''' = t + i\pi/2$ and $\rho \to \rho''' = i\rho - \pi R_d$ this horizon may be patched on to a region of $dS_3$ with hyperbolic slices + +$$ds^2 = -d\rho'''^2 + r_d^2 \sinh^2 \frac{\rho'''}{r_d} (dt'''^2 + \sinh^2 t'''\, d\theta^2) . \quad (F.12)$$ + +The de Sitter boundary is found by taking $\rho'''\to\infty$. Note that in this coordinate system the boundary is a copy of $H_2$; this $H_2$ covers only part of the full de Sitter boundary $S^2$ that one finds in global coordinates. Correlators of bulk fields in this coordinate system define boundary dS/CFT correlators on $H_2$, as opposed to boundary correlators on the sphere or the plane described by [19]. Putting this together with (F.6), we have an explicit analytic continuation of two point functions from points near an AdS boundary to points near a de Sitter boundary. + +The considerations described above imply that boundary CFT correlators on the AdS boundary have very interesting analytic behavior. Expression (F.5) tells us that near the AdS boundary correlators are found by continuing $t \to t + \pi$, $\rho \to i\pi r_a$, and taking the large $\rho_1, \rho_2$ limit. Stripping off the factors of $e^{\rho_1}$ and $e^{\rho_2}$, this gives us the usual form for conformal two point functions on $dS_2$, + +$$\langle O_{\phi}(x_1) O_{\phi}(x_2) \rangle \sim \left( \frac{1}{\sin^2 \ell_2 / 2} \right)^m + \text{subleading}, \quad (F.13)$$ + +where $\ell_2$ is now the geodesic length on $dS_2$. In the small $\ell$ limit this gives the usual short distance behavior$^{37}$ $\ell_2^{-2m}$. The more complicated form at finite distance is the standard formula for conformal two point functions on $dS_2$; it can be thought of as arising from the Weyl anomaly for conformal field theories on de Sitter backgrounds. Near the de Sitter boundary, a similar prescription can be used to define correlators of a Euclidean conformal field theory on $H_2$ + +$$\langle O_\phi(x_1) O_\phi(x_2) \rangle \sim \left( \frac{1}{\sin^2 \ell_2 / 2} \right)^{im} + \text{subleading} , \quad (F.14)$$ + +$^{37}$ We should emphasize that the expression (F.5) has branch cuts in the complex $\ell_2$ plane, so in deriving (F.13) we have made a specific branch choice. This appearance of branch ambiguities is rather common when analytically continuing correlators in curved spacetime, and can be thought of as arising from the ambiguity of vacuum choice in cosmological spacetimes. In this case, there is a clear choice of branch prescription; we simply choose the branch which matches the usual short distance behavior of the standard AdS vacuum. +---PAGE_BREAK--- + +where now $\ell_2$ is the geodesic length on $H_2$. At short distances this gives the short distance behavior $\ell_2^{2im}$ associated to a field of imaginary weight, as one usually obtains³⁸ in dS/CFT. + +³⁸ We should note that in terms of the boundary field theories, the analytic continuation from the CFT on $dS_2$ in (F.13) to the ECFT on $H_2$ given by (F.14) is $t \to t + i\pi/2$. +---PAGE_BREAK--- + +References + +[1] E. W. Kolb and M. S. Turner, *The Early Universe*, Addison-Wesley, Redwood City (1990). + +[2] A.D. Linde, *Particle Physics and Inflationary Cosmology*, Harwood, Chur, Switzerland (1990). + +[3] A.R. Liddle and D.H. Lyth, *Cosmological Inflation and Large-Scale Structure*, Cambridge University Press, Cambridge, England (2000). + +[4] R. Bousso and J. Polchinski, *Quantization of four-form fluxes and dynamical neutralization of the cosmological constant*, JHEP 0006, 006 (2000) [arXiv:hep-th/0004134]. + +[5] S. Kachru, R. Kallosh, A. Linde and S. P. Trivedi, *De Sitter vacua in string theory*, Phys. Rev. D 68, 046005 (2003) [arXiv:hep-th/0301240]. + +[6] F. Denef, M. Douglas, B. Florea, A. Grassi and S. Kachru, *Fixing all moduli in a simple F-theory compactification*, [arXiv:hep-th/0503124]; F. Denef, M. Douglas and B. Florea, *Building a better racetrack*, JHEP 0406 (2004) 034 [arXiv:hep-th/0404257]; P. S. Aspinwall and R. Kallosh, *Fixing all moduli for M-theory on $K_3 \times K_3$*, [arXiv:hep-th/0506014]. + +[7] J. Conlon, F. Quevedo and K. Suruliz, *Large-volume flux compactifications: Moduli spectrum and $D3/D7$ soft supersymmetry breaking*, [arXiv:hep-th/0505076]; P. Berglund and P. Mayr, *Non-perturbative superpotentials in F-theory and string duality*, [arXiv:hep-th/0504058]; V. Balasubramanian, P. Berglund, J. Conlon and F. Quevedo, *Systematics of moduli stabilisation in Calabi-Yau flux compactifications*, JHEP 0503 (2005) 007 [arXiv:hep-th/0502058]. + +[8] A. Saltman and E. Silverstein, *A new handle on de Sitter compactifications*, [arXiv:hep-th/0411271]; A. Maloney, E. Silverstein and A. Strominger, *de Sitter space in non-critical string theory*, [arXiv:hep-th/0205316]. B. Acharya, *A moduli fixing mechanism in M-theory*, [arXiv:hep-th/0212294]. B. de Carlos, A. Lukas and S. Morris, *Non-perturbative vacua for M-theory on $G2$ manifolds*, JHEP 0412 (2004) 018 [arXiv:hep-th/0409255.] + +[9] G. Curio, A. Krause and D. Lüst, *Moduli stabilization in the heterotic/IIB discretuum*, [arXiv:hep-th/0502168]; S. Gurrieri, A. Lukas and A. Micu, *Heterotic on Half-flat*, Phys.Rev. D70 (2004) 126009 [arXiv:hep-th/0408121]; K. Becker, M. Becker, K. Dasgupta, P. Green and E. Sharpe, *Compactifications of Heterotic Strings on Non-Kahler complex manifolds II*, Nucl.Phys. B678 (2004) 19 [arXiv:hep-th/0310058]; M. Becker, G. Curio and A. Krause, *De Sitter Vacua from Heterotic M Theory*, Nucl.Phys. B693 (2004) 223 [arXiv:hep-th/0403027]; R. Brustein and S.P. de Alwis, *Moduli Potentials in String Compactifications with Fluxes: Mapping the Discretuum*, Phys.Rev. D69 (2004) 126006 [arXiv:hep-th/0402088]; S. Gukov, S. Kachru, X. Liu and L. McAllister, *Heterotic Moduli Stabilization with Fractional Chern-Simons Invariants*, Phys.Rev. D69 +---PAGE_BREAK--- + +(2004) 086008 [arXiv:hep-th/0310159]; E. Buchbinder and B. Ovrut, *Vacuum Stability in Heterotic M-theory*, Phys. Rev. **D69** (2004) 086010 [arXiv:hep-th/0310112]; G. Cardoso, G. Curio, G. Dall'Agata and D. Lüst, *Heterotic string theory on non-Kahler manifolds with H-flux and gaugino condensate*, Fortsch. Phys. **52** (2004) 483 [arXiv:hep-th/0310021]; G. Cardoso, G. Curio, G. Dall'Agata and D. Lüst, *BPS action and superpotential for heterotic string compactifications with fluxes*, JHEP **0310** (2003) 004 [arXiv:hep-th/0306088] G. Curio and A. Krause, *G-fluxes and non-perturbative stabilization of heterotic M-theory*, Nucl.Phys. **B643** (2002) 131 [hep-th/0108220]. + +[10] I. Antoniadis and T. Maillard, *Moduli stabilization from magnetic fluxes in type I string theory*, Nucl. Phys. B **716**, 3 (2005) [arXiv:hep-th/0412008]; I. Antoniadis, A. Kumar and T. Maillard, *Moduli stabilization with open and closed string fluxes*, [arXiv:hep-th/0505260]. + +[11] O. DeWolfe, A. Giryavets, S. Kachru and W. Taylor, *Type IIA moduli stabilization*, JHEP **0507**, 066 (2005) [arXiv:hep-th/0505160]. + +[12] L. Susskind, *The anthropic landscape of string theory*, arXiv:hep-th/0302219. + +[13] T. Banks, *Cosmological breaking of supersymmetry or little Lambda goes back to the future. II*, arXiv:hep-th/0007146. + +[14] T. Banks and W. Fischler, *M-theory observables for cosmological space-times*, arXiv:hep-th/0102077. + +[15] E. Witten, *Quantum gravity in de Sitter space*, arXiv:hep-th/0106109. + +[16] W. Fischler, A. Kashani-Poor, R. McNees and S. Paban, *The acceleration of the universe, a challenge for string theory*, JHEP **0107**, 003 (2001) [arXiv:hep-th/0104181]. + +[17] S. Hellerman, N. Kaloper and L. Susskind, *String theory and quintessence*, JHEP **0106**, 003 (2001) [arXiv:hep-th/0104180]. + +[18] R. Bousso, *Positive vacuum energy and the N-bound*, JHEP **0011**, 038 (2000) [arXiv:hep-th/0010252]. + +[19] A. Strominger, *The dS/CFT correspondence*, JHEP **0110**, 034 (2001) [arXiv:hep-th/0106113]. + +[20] G. L. Alberghi, D. A. Lowe and M. Trodden, *Charged false vacuum bubbles and the AdS/CFT correspondence*, JHEP **9907**, 020 (1999) [arXiv:hep-th/9906047]. + +[21] J. M. Maldacena, *The large N limit of superconformal field theories and supergravity*, Adv. Theor. Math. Phys. **2**, 231 (1998) [Int. J. Theor. Phys. **38**, 1113 (1999)] [arXiv:hep-th/9711200]. + +[22] E. Witten, *Anti-de Sitter space and holography*, Adv. Theor. Math. Phys. **2**, 253 (1998) [arXiv:hep-th/9802150]. + +[23] S. S. Gubser, I. R. Klebanov and A. M. Polyakov, *Gauge theory correlators from non-critical string theory*, Phys. Lett. B **428**, 105 (1998) [arXiv:hep-th/9802109]. + +[24] O. Aharony, S. S. Gubser, J. M. Maldacena, H. Ooguri and Y. Oz, *Large N field theories, string theory and gravity*, Phys. Rept. **323**, 183 (2000) [arXiv:hep-th/9905111]. +---PAGE_BREAK--- + +[25] S. K. Blau, E. I. Guendelman and A. H. Guth, *The Dynamics Of False Vacuum Bubbles*, Phys. Rev. D **35**, 1747 (1987). + +[26] E. Farhi and A. H. Guth, *An Obstacle To Creating A Universe In The Laboratory*, Phys. Lett. B **183**, 149 (1987). + +[27] E. Farhi, A. H. Guth and J. Guven, *Is It Possible To Create A Universe In The Laboratory By Quantum Tunneling?*, Nucl. Phys. B **339**, 417 (1990). + +[28] J. Maldacena and L. Maoz, *Wormholes in AdS*, JHEP **0402**, 053 (2004) [arXiv:hep-th/0401024]. + +[29] J. Louko, D. Marolf and S. F. Ross, *On geodesic propagators and black hole holography*, Phys. Rev. D **62**, 044041 (2000) [arXiv:hep-th/0002111]. + +[30] P. Kraus, H. Ooguri and S. Shenker, *Inside the horizon with AdS/CFT*, Phys. Rev. D **67**, 124022 (2003) [arXiv:hep-th/0212277]. + +[31] L. Fidkowski, V. Hubeny, M. Kleban and S. Shenker, *The black hole singularity in AdS/CFT*, JHEP **0402**, 014 (2004) [arXiv:hep-th/0306170]. + +[32] J. M. Maldacena, *Eternal black holes in Anti-de-Sitter*, JHEP **0304**, 021 (2003) [arXiv:hep-th/0106112]. + +[33] V. Balasubramanian, P. Kraus, A. E. Lawrence and S. P. Trivedi, *Holographic probes of anti-de Sitter space-times*, Phys. Rev. D **59**, 104021 (1999) [arXiv:hep-th/9808017]. + +[34] T. Banks, *Landskepticism or why effective potentials don't count string models*, arXiv:hep-th/0412129. + +[35] R. Bousso, *Cosmology and the S-matrix*, Phys. Rev. D **71**, 064024 (2005) [arXiv:hep-th/0412197]. + +[36] W. Israel, *Thermo Field Dynamics Of Black Holes*, Phys. Lett. A **57**, 107 (1976). + +[37] T. Banks, *Heretics of the false vacuum: Gravitational effects on and of vacuum decay*. II, arXiv:hep-th/0211160. + +[38] L. Susskind, private communication. + +[39] W. Israel, *Singular Hypersurfaces And Thin Shells In General Relativity*, Nuovo Cim. B **44S10**, 1 (1966) [Erratum-ibid. B **48**, 463 (1967 NUCIA,B44,1.1966)]. + +[40] A. Aguirre and M. C. Johnson, *Dynamics and instability of false vacuum bubbles*, arXiv:gr-qc/0508093. + +[41] M. C. Johnson, private communication. + +[42] J. Garriga and A. Vilenkin, *Perturbations on domain walls and strings: A Covariant theory*, Phys. Rev. D **44**, 1007 (1991). + +[43] J. Garriga and A. Vilenkin, *Quantum fluctuations on domain walls, strings and vacuum bubbles*, Phys. Rev. D **45**, 3469 (1992). + +[44] R. Emparan, *AdS/CFT duals of topological black holes and the entropy of zero-energy states*, JHEP **9906**, 036 (1999) [arXiv:hep-th/9906040]. + +[45] V. Balasubramanian, V. Jejjala and J. Simon, *The library of Babel*, arXiv:hep-th/0505123. +---PAGE_BREAK--- + +[46] V. Balasubramanian, J. de Boer, V. Jejjala and J. Simon, *The library of Babel: On the origin of gravitational thermodynamics*, arXiv:hep-th/0508023. + +[47] G. W. Gibbons and S. W. Hawking, *Cosmological Event Horizons, Thermodynamics, And Particle Creation*, Phys. Rev. D **15**, 2738 (1977). + +[48] T. Jacobson, *On the nature of black hole entropy*, arXiv:gr-qc/9908031. + +[49] J. M. Maldacena and A. Strominger, *AdS(3) black holes and a stringy exclusion principle*, JHEP **9812**, 005 (1998) [arXiv:hep-th/9804085]. + +[50] L. Randall and R. Sundrum, *A large mass hierarchy from a small extra dimension*, Phys. Rev. Lett. **83**, 3370 (1999) [arXiv:hep-ph/9905221]. + +[51] S. S. Gubser, *AdS/CFT and gravity*, Phys. Rev. D **63**, 084017 (2001) [arXiv:hep-th/9912001]. + +[52] T. Banks, M. R. Douglas, G. T. Horowitz and E. J. Martinez, *AdS dynamics from conformal field theory*, arXiv:hep-th/9808016. + +[53] A. Hamilton, D. Kabat, G. Lifschytz and D. A. Lowe, *Local bulk operators in AdS/CFT: A boundary view of horizons and locality*, arXiv:hep-th/0506118. + +[54] S. B. Giddings, *The boundary S-matrix and the AdS to CFT dictionary*, Phys. Rev. Lett. **83**, 2707 (1999) [arXiv:hep-th/9903048]. + +[55] N. Birrell and P. Davies, *Quantum Fields in Curved Space*, Cambridge University Press, Cambridge (1982). + +[56] D. Marolf, *States and boundary terms: Subtleties of Lorentzian AdS/CFT*, JHEP **0505**, 042 (2005) [arXiv:hep-th/0412032]. + +[57] E. Witten and S. T. Yau, *Connectedness of the boundary in the AdS/CFT correspondence*, Adv. Theor. Math. Phys. **3**, 1635 (1999) [arXiv:hep-th/9910245]. + +[58] G. J. Galloway, K. Schleich, D. M. Witt and E. Woolgar, *Topological censorship and higher genus black holes*, Phys. Rev. D **60**, 104039 (1999) [arXiv:gr-qc/9902061]. + +[59] G. J. Galloway, K. Schleich, D. Witt and E. Woolgar, *The AdS/CFT correspondence conjecture and topological censorship*, Phys. Lett. B **505**, 255 (2001) [arXiv:hep-th/9912119]. + +[60] R. Bousso, *Holography in general space-times*, JHEP **9906**, 028 (1999) [arXiv:hep-th/9906022]. + +[61] D. Bak, M. Gutperle and S. Hirano, *A dilatonic deformation of AdS(5) and its field theory dual*, JHEP **0305**, 072 (2003) [arXiv:hep-th/0304129]. + +[62] A. B. Clark, D. Z. Freedman, A. Karch and M. Schnabl, *The dual of Janus* ((<:) ↔ (>:)) an interface CFT, Phys. Rev. D **71**, 066003 (2005) [arXiv:hep-th/0407073]. + +[63] S. R. Coleman and F. De Luccia, *Gravitational Effects On And Of Vacuum Decay*, Phys. Rev. D **21**, 3305 (1980). + +[64] A. H. Guth and E. J. Weinberg, *Could The Universe Have Recovered From A Slow First Order Phase Transition?*, Nucl. Phys. B **212**, 321 (1983). +---PAGE_BREAK--- + +[65] W. Fischler, D. Morgan and J. Polchinski, *Quantum Nucleation Of False Vacuum Bubbles*, Phys. Rev. D **41**, 2638 (1990). + +[66] W. Fischler, D. Morgan and J. Polchinski, *Quantization Of False Vacuum Bubbles: A Hamiltonian Treatment Of Gravitational Tunneling*, Phys. Rev. D **42**, 4042 (1990). + +[67] A. D. Linde, *Hard art of the universe creation* (stochastic approach to tunneling and baby universe formation), Nucl. Phys. B **372**, 421 (1992) [arXiv:hep-th/9110037]. + +[68] S. Aminneborg, I. Bengtsson, D. Brill, S. Holst and P. Peldan, *Black holes and wormholes in 2+1 dimensions*, Class. Quant. Grav. **15**, 627 (1998) [arXiv:gr-qc/9707036]. + +[69] K. Krasnov, *Holography and Riemann surfaces*, Adv. Theor. Math. Phys. **4**, 929 (2000) [arXiv:hep-th/0005106]. + +[70] G. T. Horowitz and D. Marolf, *A new approach to string cosmology*, JHEP **9807**, 014 (1998) [arXiv:hep-th/9805207]. + +[71] J. Louko and D. Marolf, *Single-exterior black holes and the AdS-CFT conjecture*, Phys. Rev. D **59**, 066002 (1999) [arXiv:hep-th/9808081]. + +[72] A. Maloney, *AdS3 Cosmology*, to appear. \ No newline at end of file diff --git a/samples_new/texts_merged/339686.md b/samples_new/texts_merged/339686.md new file mode 100644 index 0000000000000000000000000000000000000000..30de4c4764613635b8a3144ba8dccb0583ddaa05 --- /dev/null +++ b/samples_new/texts_merged/339686.md @@ -0,0 +1,125 @@ + +---PAGE_BREAK--- + +## 7.1 Vector Spaces + +A **vector space** ($\mathbf{V}$, $\mathbb{F}$) is a set of vectors $\mathbf{V}$, a set of scalars $\mathbb{F}$, and two operators that satisfy the following properties: + +* **Vector Addition** + + - **Associative:** $\vec{u} + (\vec{v} + \vec{w}) = (\vec{u} + \vec{v}) + \vec{w}$ for any $\vec{v}, \vec{u}, \vec{w} \in \mathbf{V}$. + + - **Commutative:** $\vec{u} + \vec{v} = \vec{v} + \vec{u}$ for any $\vec{v}, \vec{u} \in \mathbf{V}$. + + - **Additive Identity:** There exists an additive identity $\vec{0} \in \mathbf{V}$ such that $\vec{v} + \vec{0} = \vec{v}$ for any $\vec{v} \in \mathbf{V}$. + + - **Additive Inverse:** For any $\vec{v} \in \mathbf{V}$, there exists $-\vec{v} \in \mathbf{V}$ such that $\vec{v} + (-\vec{v}) = \vec{0}$. We call $-\vec{v}$ the additive inverse of $\vec{v}$. + + - **Closure under vector addition:** For any two vectors $\vec{v}, \vec{u} \in \mathbf{V}$, their sum $\vec{v} + \vec{u}$ must also be in $\mathbf{V}$. + +* **Scalar Multiplication** + + - **Associative:** $\alpha(\beta\vec{v}) = (\alpha\beta)\vec{v}$ for any $\vec{v} \in \mathbf{V}$, $\alpha, \beta \in \mathbb{F}$. + + - **Multiplicative Identity:** There exists $1 \in \mathbb{F}$ where $1 \cdot \vec{v} = \vec{v}$ for any $\vec{v} \in \mathbb{F}$. We call $1$ the multiplicative identity. + + - **Distributive in vector addition:** $\alpha(\vec{u} + \vec{v}) = \alpha\vec{u} + \alpha\vec{v}$ for any $\alpha \in \mathbb{F}$ and $\vec{u}, \vec{v} \in \mathbf{V}$. + + - **Distributive in scalar addition:** $(\alpha + \beta)\vec{v} = \alpha\vec{v} + \beta\vec{v}$ for any $\alpha, \beta \in \mathbb{F}$ and $\vec{v} \in \mathbf{V}$. + + - **Closure under scalar multiplication:** For any vector $\vec{v} \in \mathbf{V}$ and scalar $\alpha \in \mathbb{F}$, the product $\alpha\vec{v}$ must also be in $\mathbf{V}$. + +You have already seen vector spaces before! For example, $(\mathbb{R}^n, \mathbb{R})$ is the vector space of all $n$-dimensional vectors. With the definitions of vector addition and scalar multiplication defined in the previous notes you could show that it satisfies all the properties above. In fact, matrices also are a vector space $(\mathbb{R}^{n \times m}, \mathbb{R})$ since they fulfill all of the properties above as well – but in this class we will generally only deal with vector spaces containing vectors in $\mathbb{R}^n$ or $\mathbb{C}^n$. + +**Additional Resources** For more on vector spaces, read *Strang* pages 123 - 125 and try Problem Set 3.1. + +In Schaum's, read pages 112-114 and try problems 4.1, 4.2, and 4.71 to 4.76. Extra: Read and Understand Polynomial Spaces, Spaces of Arbitrary "Field." +---PAGE_BREAK--- + +### 7.1.1 Bases + +We can use a series of vectors to define a vector space. We call this set of vectors a **basis**, which we define formally below: + +**Definition 7.1 (Basis):** + +Given a vector space $(V, \mathbb{F})$, a set of vectors $\{\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\}$ is a **basis** of the vector space if it satisfies the following two properties: + +* $\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n$ are linearly independent vectors + +* For any vector $\vec{v} \in V$, there exist scalars $\alpha_1, \alpha_2, \dots, \alpha_n \in \mathbb{F}$ such that $\vec{v} = \alpha_1\vec{v}_1 + \alpha_2\vec{v}_2 + \dots + \alpha_n\vec{v}_n$. + +Intuitively, a basis of a vector space is the *minimum* set of vectors needed to represent all vectors in the vector space. If a set of vectors is linearly dependent and “spans” the vector space, it is still not a basis because we can remove at least one vector from the set and the resulting set will still span the vector space. + +The next natural question to ask is: Given a vector space, is the basis unique? Intuitively, it is not because multiplying one of the vectors in a given basis by a nonzero scalar will not affect the linear independence or span of the vectors. We could alternatively construct another basis by replacing one of the vectors with the sum of itself and any other vector in the set. + +To illustrate this mathematically, suppose $\{\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\}$ is a basis for the vector space we are considering. +Then + +$$ \{\alpha \vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\} \qquad (1) $$ + +where $\alpha \neq 0$ is also a basis because, just as we've seen in Gaussian elimination row operations, multiplying a row by a nonzero constant does not change the linear independence or dependence of the rows. We can generalize this to say that multiplying a vector by a nonzero scalar also does not change the linear independence of the set of vectors. In addition, we know that + +$$ \operatorname{span}(\{\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\}) = \operatorname{span}(\{\alpha \vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\}). \qquad (2) $$ + +because any vector in $\operatorname{span}(\{\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\})$ can be created as a linear combination of the set $\{\alpha\vec{v}_1, \vec{v}_2, \dots, \vec{v}_n\}$ by dividing the scale factor on $\vec{v}_1$ by $\alpha$. We can use a similar argument to show that $\{\vec{v}_1 + \vec{v}_2, \vec{v}_2, \dots, \vec{v}_n\}$ is also a basis for the same vector space. + +**Example 7.1 (Vector space ($\mathbb{R}^3, \mathbb{R}$)):** Let's try to find a basis for the vector space $(\mathbb{R}^3, \mathbb{R})$. We want to find a set of vectors that can represent any vector of the form $\begin{bmatrix} a \\ b \\ c \end{bmatrix}$ where $a,b,c \in \mathbb{R}$. One basis could be the set of standard unit vectors: + +$$ \left\{ \begin{bmatrix} 1 \\ 0 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 1 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 0 \\ 1 \end{bmatrix} \right\} $$ +---PAGE_BREAK--- + +The set of vectors is linearly independent and we can represent any vector $[\begin{matrix} a \\ b \\ c \end{matrix}]$ in the vector space using the three vectors: + +$$ \begin{bmatrix} a \\ b \\ c \end{bmatrix} = a \begin{bmatrix} 1 \\ 0 \\ 0 \end{bmatrix} + b \begin{bmatrix} 0 \\ 1 \\ 0 \end{bmatrix} + c \begin{bmatrix} 0 \\ 0 \\ 1 \end{bmatrix}. \quad (3) $$ + +Alternatively, we could show that + +$$ \left\{ \begin{bmatrix} 1 \\ 1 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 1 \\ 1 \end{bmatrix}, \begin{bmatrix} 1 \\ 0 \\ 1 \end{bmatrix} \right\} $$ + +is a basis for the vector space. + +Now that we have defined bases, we can define the dimension of a vector space. + +**Definition 7.2 (Dimension):** The dimension of a vector space is the number of basis vectors. + +Since each basis vector can be scaled by one coefficient, the dimension of a space as the fewest number of parameters needed to describe an element or member of that space. The dimension can also be thought of as the degrees of freedom of your space – that is, the number of parameters that can be varied when describing a member of that space. + +**Example 7.2 (Dimension of ($\mathbb{R}^3, \mathbb{R}$)):** Previously, we identified a basis + +$$ \left\{ \begin{bmatrix} 1 \\ 0 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 1 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 0 \\ 1 \end{bmatrix} \right\} $$ + +for the vector space $(\mathbb{R}^3, \mathbb{R})$. The basis consists of three vectors, so the dimension of the vector space is three. + +**Note that a vector space can have many bases, but each basis must have the same number of vectors.** + +We will not prove this rigorously, but let's illustrate our arguments. Suppose a basis for the vector space we're considering has $n$ vectors. This means that the minimum number of vectors we can use to represent all vectors in the vector space is $n$, because the vectors in the basis would not be linearly independent if the vector space could be represented with fewer vectors. Then we can show that any set with less than $n$ vectors cannot be a basis because it does not have enough vectors to span the vector space — there would be some vectors in the vector space that cannot be expressed as a linear combination of the vectors in the set. In addition, we can show that any set with more than $n$ vectors must be linearly dependent and therefore cannot be a basis. Combining the two arguments, we have that any other set of vectors that forms a basis for the vector space must have exactly $n$ vectors. + +We introduced quite a few terms in this lecture note, and we'll see how we can connect these with our understanding of matrices in the next lecture note! +---PAGE_BREAK--- + +**Additional Resources** For more on bases, read *Strang* pages 167 - 171 and try Problem Set 3.4. +*Extra: Read Sections on Matrix and Function Space.* + +In Schaum's, read pages 124-126 and pages 127-129. Try Problems 4.24 to 4.28, 4.97 to 4.103, and 4.33 to 4.40. + +## 7.2 Practice Problems + +These practice problems are also available in an interactive form on the course website. + +1. True or False: $\{\begin{bmatrix} -3 \\ 1 \end{bmatrix}, \begin{bmatrix} -1 \\ 0 \end{bmatrix}, \begin{bmatrix} 5 \\ 2 \end{bmatrix}\}$ spans $\mathbb{R}^2$. + +2. True or False: $\{\begin{bmatrix} 1 \\ 2 \\ 3 \end{bmatrix}, \begin{bmatrix} 5 \\ -2 \\ 1 \end{bmatrix}, \begin{bmatrix} -3 \\ 6 \\ 5 \end{bmatrix}\}$ is a basis for $\mathbb{R}^3$. + +3. The following vectors span $\mathbb{R}^3$: + +$$ \vec{x}_1 = \begin{bmatrix} 1 \\ 2 \\ 2 \end{bmatrix}, \vec{x}_2 = \begin{bmatrix} 2 \\ 5 \\ 4 \end{bmatrix}, \vec{x}_3 = \begin{bmatrix} 1 \\ 3 \\ 2 \end{bmatrix}, \vec{x}_4 = \begin{bmatrix} 2 \\ 7 \\ 4 \end{bmatrix}, \vec{x}_5 = \begin{bmatrix} 1 \\ 1 \\ 0 \end{bmatrix} $$ + +Which vectors of this set form a basis for $\mathbb{R}^3$? + +(a) $\vec{x}_1, \vec{x}_2, \vec{x}_3, \vec{x}_4, \vec{x}_5$ + +(b) $\vec{x}_1, \vec{x}_3, \vec{x}_5$ + +(c) $\vec{x}_1, \vec{x}_2, \vec{x}_4$ + +(d) $\vec{x}_1, \vec{x}_3, \vec{x}_4, \vec{x}_5$ \ No newline at end of file diff --git a/samples_new/texts_merged/3495399.md b/samples_new/texts_merged/3495399.md new file mode 100644 index 0000000000000000000000000000000000000000..1c033011ebe32fd3b63ce2ce3ca04cb3bda3a8b2 --- /dev/null +++ b/samples_new/texts_merged/3495399.md @@ -0,0 +1,382 @@ + +---PAGE_BREAK--- + +# The Paramagnetic Ground State of Ruby—Revisited + +J. Shell¹ + +A more accurate formula for the ruby spin Hamiltonian (than used in earlier JPL programs) is presented for calculating the ground-state paramagnetic spectrum of ruby and transition probability matrix elements between quantum states induced by radio-frequency magnetic fields. A coordinate system is chosen that simplifies the expressions for the radio-frequency magnetic field. Applications of the computer program to several past and current Deep Space Network maser designs are presented. The program is included in an appendix along with a sample output. + +## I. Introduction + +The low-noise maser amplifiers in the Deep Space Network (DSN) use ruby as the active material. The quantum states of the paramagnetic chromium ion in the ruby crystal are used in the amplification process. An external static magnetic field, $\vec{H}_{dc}$, is applied to the ruby to generate the quantum states. The nature of these states depends on the strength and orientation of this field relative to the ruby crystal c-axis. Transitions between these quantum states are induced by radio frequency (rf) magnetic fields. These transitions are used in two distinct ways. In the first instance, microwave energy from a pump source is used to alter the distribution of spins amongst the energy levels. This creates the population inversion necessary for the ruby to amplify an incoming signal. In the second instance, the process of stimulated emission amplifies the transitions resulting from an incoming “signal.” This incoming signal may be from a distant spacecraft, for example. + +A good model and understanding of the ruby's paramagnetic behavior are necessary for maser design. In particular, the low-lying energy levels, which are used in cryogenic low-noise amplifiers, are of interest. The ability to calculate the transitions between levels induced by an rf field is also necessary for good maser design. This article contains a computer program that models these effects. The program can be used to select static magnetic-field strengths and orientations and microwave magnetic-field orientations and polarization. This program can aid in the understanding of current and past DSN ruby masers. + +In 1970, a Fortran program was written to calculate these same quantities using a different coordinate system and different numerical values for the parameters used to describe the ruby [1]. This program was used to generate many sets of tables for maser design. Some tables exist today, but the program is no longer readily available. In 1978, the National Bureau of Standards (NBS) published a report describing the use of ruby as a standard reference material in electron paramagnetic resonance experiments [2]. It published precise values of the spectroscopic splitting factors and the zero-field splitting for ruby. + +¹ Communications Ground Systems Section. + +The research described in this publication was carried out by the Jet Propulsion Laboratory, California Institute of Technology, under a contract with the National Aeronautics and Space Administration. +---PAGE_BREAK--- + +The program described in this article uses these more recent values. The program also uses a different coordinate system that simplifies the task of calculating transition probabilities due to an rf field. Rather than aligning the ruby crystal c-axis in the z-direction, the applied static magnetic field is chosen along the z-direction [3]. In addition, the advent of new commercial software specifically designed to work with matrices allows for a much simpler program [4]. The program listing and a sample output are included in Appendix A. + +## II. Spin Hamiltonian for Ruby + +A very concise description of the low-lying states, often referred to as the ground state, is made possible through the concept of an effective spin Hamiltonian. This approach includes such effects as the Zeeman splitting of the states due to applied magnetic fields, including anisotropy of this splitting. It also describes the splitting of energy levels due to the electrostatic field of surrounding atoms. In the case of ruby, this appears as a quadrupole interaction. Excellent discussions of this concept can be found in several books [5,6]. + +The presence of the crystal field makes the form of the Hamiltonian dependent on the orientation of the coordinate system. For example, if the ruby crystal c-axis is chosen along the z-direction, then the spin Hamiltonian, $H_s$, is given by + +$$H_s = g_1\beta H_z S_z + g_2\beta(H_x S_x + H_y S_y) + D \left[ S_z^2 - \frac{1}{3}S(S+1) \right] \quad (1)$$ + +Here, $g_1$ and $g_2$ are spectroscopic splitting factors, $\beta$ is the Bohr magneton, and $\vec{H}_{dc} = (H_x, H_y, H_z)$ is the applied static magnetic field. The spin vector is denoted by $\vec{S}' = (S_x, S_y, S_z)$. Here, $S_x, S_y, S_z$ are spin matrices, given below. The variable $D$ represents one half of the zero-field splitting between the $S_z = \pm 1/2$ spin states and the $S_z = \pm 3/2$ spin states. The quantity $S(S+1)$ is the eigenvalue of the operator $S^2 = S_x^2 + S_y^2 + S_z^2$. Equation (1) is very similar to the expression used in [1]. The coordinate system appropriate to this form is shown in Fig. 1(a). + +Personnel at Bell Telephone Laboratories used a Hamiltonian wherein the z-axis is along the applied static magnetic field [3]. The ruby crystal c-axis is specified by the polar angle, $\theta$, with respect to the dc magnetic field and an azimuthal angle, $\varphi$, with respect to the x-axis. Their result is + +Fig. 1. The coordinate system used in (a) Eq. (1) and (b) Eq. (2). +---PAGE_BREAK--- + +$$ +\begin{align} +H_s = {}& (g_1 \cos^2 \theta + g_2 \sin^2 \theta) \beta H_z S_z \nonumber \\ +& + D \left( \cos^2 \theta - \frac{1}{2} \sin^2 \theta \right) \left[ S_z^2 - \frac{1}{3} S(S+1) \right] \nonumber \\ +& + D \left( \frac{1}{2} \right) \left( \cos \theta \sin \theta \right) \left[ e^{-j\varphi} (S_z S_+ + S_+ S_z) + e^{j\varphi} (S_z S_- + S_- S_z) \right] \nonumber \\ +& + D \left( \frac{1}{4} \right) \sin^2 \theta \left( e^{-2j\varphi} S_+^2 + e^{2j\varphi} S_-^2 \right) \tag{2} +\end{align} +$$ + +Here, $S_+ = S_x + jS_y$, $S_- = S_x - jS_y$, and $j = \sqrt{-1}$. We use the values for the spectroscopic splitting factors $g_1 = 1.9817$ and $g_2 = 1.9819$, and the zero-field splitting $D = -3.8076 \times 10^{-17}$ ergs, published by the National Bureau of Standards. This is the form that will be used for the results presented in this article. + +The coordinate system appropriate to the Hamiltonian of Eq. (2) is shown in Fig. 1(b). From the point of view of the crystal, it's a more natural choice to choose the z-axis along the c-axis direction. From the point of view of the rf magnetic fields, it makes more sense to let the direction of the c-axis be unrestricted. The result is a more complex expression for the spin Hamiltonian. However, since a digital computer performs the calculation, the additional complexity is not a concern. Equation (2) can be shown to be almost exactly equal to Eq. (1). We have neglected terms involving the difference between $g_1$ and $g_2$ because they are nearly equal. Demonstration of the equivalence is discussed in Appendix B. + +The values predicted by this program are different from the values published by Berwin [1] or Siegman [6]. This is due to the slightly different values of the spectroscopic splitting factor and zero-field splitting used by the two programs. For example, with a 2600-gauss magnetic field oriented 90 degrees to the ruby c-axis, Berwin calculates the 1–2 transition frequency to be 2.6083 GHz. The current program predicts 2.5677 GHz, a difference of 40.6 MHz, or about 1.5 percent. + +In addition to choosing a coordinate system, we must choose a representation for the spin operators. This means choosing a set of base states in terms of which the spin quantum states can be expressed. The usual choice for a spin system is the set of states that are simultaneous eigenstates of the total angular momentum squared and the projection of the angular momentum along some axis, usually the z-axis. In this representation, the matrices representing $S^2$ and $S_z$ are diagonal. We also adopt this convention. For a spin $S = 3/2$ system, such as the Cr$^{+3}$ ion in ruby, $S^2$ and $S_z$ are given by $(2S+1)$-by-$(2S+1)$ matrices. In particular, + +$$ +\begin{align} +S^2 &= \frac{15}{4} \cdot \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} \notag \\ +S_z &= \frac{1}{2} \cdot \begin{bmatrix} 3 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & -1 & 0 \\ 0 & 0 & 0 & -3 \end{bmatrix} \tag{3a} +\end{align} +$$ + +In this representation, the matrices representing the spin operators $S_x$ and $S_y$ are given by +---PAGE_BREAK--- + +$$ S_x = \frac{1}{2} \cdot \begin{bmatrix} 0 & \sqrt{3} & 0 & 0 \\ \sqrt{3} & 0 & 2 & 0 \\ 0 & 2 & 0 & \sqrt{3} \\ 0 & 0 & \sqrt{3} & 0 \end{bmatrix} \qquad (3b) $$ + +$$ S_y = \frac{1}{2} \cdot \begin{bmatrix} 0 & -\sqrt{3}j & 0 & 0 \\ \sqrt{3}j & 0 & -2j & 0 \\ 0 & 2j & 0 & -\sqrt{3}j \\ 0 & 0 & \sqrt{3}j & 0 \end{bmatrix} $$ + +From Eqs. (1) or (2) and (3), it can be seen that the spin Hamiltonian is a 4-by-4 matrix. The eigenvalues of the matrix are the energies of the discrete quantum states available to the spins. The difference in energies divided by Planck's constant determines the resonant transition frequencies. The eigenvector associated with an eigenvalue is a representation of the quantum state having that energy. The transition frequencies are calculated and displayed by the program. The eigenvectors are used to calculate the spin vectors discussed in the next section. The eigenvectors are not normally displayed, although it is a simple matter to do so. + +### III. Transition Probability Matrix Elements and Spin Vectors + +The ability of the rf magnetic field to induce transitions between the quantum states of ruby is fundamental to maser design. If the rf field is the signal from a spacecraft, this ability is related to the gain of the maser. If the rf field is from a microwave pump source, this ability is related to the amount of pump energy needed to saturate the transition. A measure of the ability of a given rf field to induce a transition is given by a matrix element. + +The transition probability between quantum states $i$ and $j$ induced by an rf magnetic field is + +$$ W_{i \to j} = \frac{1}{4} \gamma^2 g(f) |\langle j | \vec{H}_{rf}^* \cdot \vec{S} | i \rangle|^2 \quad (4) $$ + +where $\gamma = g\beta\mu_o/\hbar$ and $g(f)$ is the line-shape function. The matrix element mentioned above is given by $\langle j | \vec{H}_{rf}^* \cdot \vec{S} | i \rangle$. The quantum states, $\langle j | , | i \rangle$, are represented by the eigenvectors of the spin Hamiltonian. The spin vector is shorthand for $\vec{S} = (S_x, S_y, S_z)$, where the spin matrices are given above. + +As seen in Eq. (4), the operator describing the interaction between the spin and the rf magnetic field has much the same form as the operator describing a spin in a static magnetic field. It takes the form of a dot product between the conjugate of the rf magnetic field vector and the spin vector. The magnetic field vector can be pulled outside the brackets, leading to the expression + +$$ +\begin{aligned} +\vec{H}_{rf}^* \cdot (\langle j | \vec{S} | i \rangle) &= \vec{H}_{rf}^* \cdot \{\langle j | S_x | i \rangle \hat{x} + \langle j | S_y | i \rangle \hat{y} + \langle j | S_z | i \rangle \hat{z}\} \\ +&= H_x^* S_x^{ij} + H_y^* S_y^{ij} + H_z^* S_z^{ij} = \vec{H}_{rf}^* \cdot \vec{S}^{ij} +\end{aligned} +$$ + +In general, $H_x^*, S_x^{ij}, H_y^*, S_y^{ij}, H_z^*, S_z^{ij}$ are complex numbers. Thus, the transition probability between two states depends on the magnitude, orientation, and polarization of the rf magnetic field. The spin vectors, $\vec{S}^{ij} = \langle j | \vec{S} | i \rangle$, as well as the quantities $T_{ij} = |\vec{H}_{rf}^* \cdot \vec{S}^{ij}|^2$, for a user-specified rf field, are calculated by the program. +---PAGE_BREAK--- + +# IV. Program Description and Examples Using the Program + +The program is written in the high-level language MATLAB. This is commercial software specifically designed to handle matrices. MATLAB has intrinsic eigenvalue and eigenvector routines. This greatly reduces the program length. After the Hamiltonian is entered into the program, the eigenvalues and eigenvectors are calculated by executing one statement. The eigenvectors are ordered with the one corresponding to the lowest energy, $e_1$, labeled $v_1$, and the next one labeled $v_2$, and so on. The eigenvectors calculated by MATLAB are also orthogonal and normalized. For a general choice of the azimuth angle, $\varphi$, the eigenvectors are complex. If the c-axis is chosen in the x–z plane, that is, $\varphi = 0$ or 180 degrees, the eigenvectors are real. + +The program input consists of the static magnetic-field strength, the angles $\theta$ and $\varphi$ specifying the c-axis orientation and the rf magnetic field in phasor form. The program calculates and displays the transition frequencies (in GHz), the associated spin vectors, and the quantity $T_{ij}$ for all the transitions. A sample output follows the program listing. + +The user can check the transition frequencies for selected field strength and orientation against the NBS tables. The NBS tables include values for $T_{x'}^{\alpha\beta} = |\langle\alpha|S_{x'}|\beta\rangle|^2$ and $T_{y'}^{\alpha\beta} = |\langle\alpha|S_{y'}|\beta\rangle|^2$. These can be compared against the $T_{ij}$ calculated by the program by entering $H_{rf} = (1,0,0)$ and $H_{rf} = (0,1,0)$, respectively, as program input. Note that the levels in the NBS tables are labeled in the opposite order, with level 1 being the highest and level 4 being the lowest. + +In the following subsections, the program is used to analyze or describe past and current DSN masers. + +## A. Example 1: S-Band Coaxial Cavity Masers + +Our first example of the use of the program will be a comparison of two early 2.36-GHz (S-band) coaxial cavity masers. The first such cavity had the ruby oriented in the coaxial line, as shown in Figs. 2(a) and 2(b).² The static magnetic field was oriented perpendicular to the coaxial line. Its strength was approximately 2500 gauss. The rf magnetic-field lines of constant magnitude are circles surrounding the center conductor in a plane perpendicular to the center conductor, as shown in Fig. 2(c). The ruby c-axis is in a plane perpendicular to the static magnetic field and oriented 30 degrees out of the plane of the rf magnetic field. + +With the right-hand x–y–z coordinate system in Fig. 2(a), we set $\varphi = 60$ degrees and $\theta = 90$ degrees. The rf-field lines of constant magnitude form circles in the y–z plane, and the polarization is linear. The + +Fig. 2. The first S-band coaxial cavity: (a) a perspective drawing showing the direction of the static magnetic field and the crystal c-axis, (b) a side view, and (c) a top view (a typical rf magnetic field line is also shown). + +² R. C. Clauss, personal communication, Jet Propulsion Laboratory, Pasadena, California, February 2002. +---PAGE_BREAK--- + +interaction of the ruby with the linear rf field depends on the angle $\psi$, shown in Fig. 2(c). We can generate a table of transition probabilities as a function of $\psi$ by changing the relative magnitude of the y- and z-components of the rf magnetic field. Because of the symmetry, we need only cover 1/4 of the circumference of the circle. We choose 10-degree increments. + +A word about our notation is in order. We will represent the rf magnetic field in the form $H_{rf} = H_1(a, b, c)$, where $a, b, c$ can be complex and satisfy $|a|^2 + |b|^2 + |c|^2 = 1$. In its most general form, $H_1$ would be $H_1 = he^{j\alpha}$. The actual rf field is given by multiplying $H_{rf}$ by $e^{j\omega t}$ and taking the real part. In our examples, $H_1$ will be chosen equal to one. For example, a right-hand circular polarized wave in the x-y plane would be written as $H_{rf} = (1, -j, 0)$. If the wave is viewed as propagating toward the observer, then if the fingers of the right hand curl in the direction of vector rotation, the thumb will point toward the observer. The linear rf field phasors are listed in Table 1 along with the associated value of $T_{12}$. For the 1-2 transition, the average value of $T_{12}$ per unit rf field strength is $T_{12}/H_1 = 0.623$. + +To accurately estimate the ruby absorption, we would have to account for the stronger field near the shorted end of the ruby cavity, as well as the variation of the field strength from the center conductor to the outer conductor. Since the second maser geometry in this comparison is the same as the first, we will neglect these effects. The second maser geometry is shown in Figs. 3(a) and 3(b) [7]. Now the static magnetic field is along the center conductor of the coaxial line, and the ruby c-axis is in the plane perpendicular to it. It is also the plane of the rf magnetic field, as seen in Fig. 3(c). For this orientation, we set $\theta = 90$ degrees and $\phi = 0$ degrees. Again we vary $H_{rf}$, at 10-degree increments, around 1/4 of the circumference of the circle in the x-y plane. The transition probabilities are shown in Table 2. For the 1-2 transition, the average value of $T_{12}$ per unit rf field strength is $T_{12}/H_1 = 0.892$. Therefore, the second maser geometry should be significantly better, with a transition probability for the signal transition about 43 percent greater than the first geometry. + +## B. Example 2: X-band Coupled-Cavity Maser + +The next example concerns the behavior of ruby as it might appear in a DSN 8.42-GHz (X-band) coupled-cavity maser. This is shown schematically in Fig. 4. The ruby crystal is shown in a cavity with a signal broadbanding cavity on the left and a pump broadbanding cavity on the right. To the left of the signal broadbanding cavity is a stepped-height pump reject filter. An applied static magnetic + +Table 1. First S-band coaxial cavity. + +
HrfT12
(0, 1, 0)1.2451
(0, 0.985, 0.174)1.2081
(0, 0.949, 0.342)1.1002
(0, 0.866, 0.500)0.9338
(0, 0.766, 0.643)0.7306
(0, 0.643, 0.766)0.5148
(0, 0.500, 0.866)0.3113
(0, 0.342, 0.940)0.1456
(0, 0.174, 0.985)0.0377
(0, 0, 1)0.0
0.623
(average)
+---PAGE_BREAK--- + +Fig. 3. The second S-band coaxial cavity: (a) a perspective drawing showing the direction of the static magnetic field and the crystal c-axis, (b) a side view, and (c) a top view (a typical rf magnetic field line is also shown). + +Table 2. Second S-band coaxial cavity. + +
HrfT12
(1, 0, 0)1.5985
(0.985, 0.174, 0)1.5565
(0.949, 0.342, 0)1.4341
(0.866, 0.500, 0)1.2451
(0.766, 0.643, 0)1.0144
(0.643, 0.766, 0)0.7695
(0.500, 0.866, 0)0.5384
(0.342, 0.940, 0)0.3505
(0.174, 0.985, 0)0.2280
(0, 1, 0)0.1851
0.892
(average)
+ +Fig. 4. A perspective view of an X-band coupled-cavity maser. The cavities are drawn for illustrative purposes only; they are not to scale. +---PAGE_BREAK--- + +field of 4,981 gauss is oriented 90 degrees to the crystal c-axis. The signal transition is chosen between levels 1 and 2 and occurs at 8.421 GHz. The first pump transition is between levels 1 and 3 and occurs at 24.05 GHz. A second pump transition is between levels 3 and 4 and occurs at 19.21 GHz. The spin vectors for these transitions are very important to the maser design. + +The spin vector for the signal transition is $\vec{S}_{12} = (-1.0735, 0.65443j, 0)$. Since we have chosen $\varphi = 0$, the c-axis is in the x-direction. Thus, if the rf fields of the signal are linearly polarized, as in the case of the coupled-cavity maser, the interaction with the ruby is stronger if the rf magnetic field is predominantly in the x-direction rather than the y-direction. The value of $T_{12}$ with $H_{rf} = (1, 0, 0)$ is 1.1525. The value of $T_{12}$ with $H_{rf} = (0, 1, 0)$ is 0.4282. Thus, the advantage is 2.69. Therefore, elongating the cavity in the x-direction will increase the coupling with the rf magnetic field. From this we can also see that rf magnetic fields in the z-direction, along the applied static magnetic field, are ineffective in inducing transitions. + +The spin vector indicates that the optimum rf field polarization is elliptical. If an rf field of unit amplitude is linearly polarized in the x-direction, then $T_{12} = 1.1524$. That is the best you can do with a linearly polarized signal. However, if the rf field has the proper elliptical polarization and is of unit amplitude, then $H_{rf} = (0.854, -0.521j, 0)$ and $T_{12} = 1.582$. There also exists an rf field polarization in this plane that does not induce a response. It is $H_{rf} = (0.521, 0.854j, 0)$. + +The spin vector for the first pump transition is $\vec{S}_{13} = (0, 0, 0.4140)$. Thus, a linearly polarized field in the z-direction will be required to stimulate this transition. Therefore, the pump waveguide feeding the ruby cavity must support a 24-GHz mode whose electric field is perpendicular to the applied magnetic field. Finally, the spin vector for the second pump is $\vec{S}_{34} = (-0.7229, 1.0051j, 0)$. It is similar to the signal component, except the roles of the x- and y-directions are reversed. The value of $T_{34}$ with $H_{rf} = (1, 0, 0)$ is 0.5225. The value of $T_{34}$ with $H_{rf} = (0, 1, 0)$ is 1.0102. Now the transition probability is almost twice as strong for the linear rf field polarized in the y-direction as compared to the x-direction. + +### C. Example 3: Ka-Band Coupled-Cavity Maser + +Our last example will concern the behavior of ruby as it is used in the current DSN 31.8- to 32.3-GHz (Ka-band) coupled-cavity maser. This is shown schematically in Fig. 5. A static magnetic field of 11,881 gauss is applied along the z-direction, and the ruby c-axis is oriented 54.735 degrees to this direction. The signal transition occurs between levels 2 and 3 at frequencies around 32 GHz. The spin vector for this transition is $\vec{S} = (-0.9777, 0.9786j, -0.0424)$. Therefore, for maximum transition probability, the rf magnetic field should be $H_{rf} = (0.707, -0.707j, 0.031)$. This is a circularly polarized + +Fig. 5. A perspective view of a Ka-band coupled-cavity maser. The cavities are drawn for illustrative purposes only; they are not to scale. +---PAGE_BREAK--- + +signal in the x-y plane. For this reason, the orientation of the c-axis in azimuth is not important. The c-axis can lie anywhere on a cone at 54.735 degrees to the applied field without affecting the signal transition probability. + +Two pump transitions typically are used for this operating point. The first pump between levels 1 and 3 occurs at 66.25 GHz. The spin vector for this transition is $\vec{S} = (-0.1455, 0.1519j, 0.0990)$. For maximum transition probability, the rf magnetic field should be $\vec{H}_{rf} = (0.6259, -0.6534j, -0.4259)$. This is nearly a circularly polarized signal in the x-y plane, with a significant, but smaller, component in the z-direction. For this reason, this transition normally is pumped with waveguide modes whose electric fields lie along the applied static magnetic field. + +The second pump between levels 2 and 4 also occurs at 66.25 GHz. The spin vector for this transition is $\vec{S} = (-0.1289, 0.1183j, 0.0990)$. Therefore, for maximum transition probability, the rf magnetic field should be $\vec{H}_{rf} = (0.6399, -0.5873j, -0.4955)$. This is more elliptical than the first pump, but the difference between $T_{24}$ for an x-polarized rf field and a y-polarized rf field is never more than 17 percent as the c-axis is varied in azimuth. Again, the z-component is smaller than either the x- or y-component. The waveguide modes mentioned above are also used for pumping this transition. It is a fortunate situation that pump energy at the same frequency and in the same waveguide mode is effective in pumping both transitions. This is especially helpful at this operating point where the pump transitions are very weak. If $H_{rf} = (0.7071, 0.7071, 0)$, $T_{13}/T_{23} = 0.023$ and $T_{24}/T_{23} = 0.016$. This is the main reason for having the ruby cavity resonant at both the signal and pump frequencies in the coupled-cavity maser design. + +## V. Conclusion + +A program has been written to calculate the ground state spectrum of ruby and the transition probability due to an rf magnetic field. This information is used in the design and analysis of masers using ruby as the active material. It is based on a Hamiltonian where the z-axis is along the static magnetic field and the x- and y-axes are chosen to simplify the expressions for the rf magnetic field. The direction of the c-axis is specified by two polar angles. It is written in the language of MATLAB and is included in Appendix A for reference purposes. A discussion of some DSN masers using the results of the program is presented. + +## References + +[1] R. Berwin, *Paramagnetic Energy Levels of the Ground State of Cr+3 in Al2O3 (Ruby)*, Technical Memorandum 33-440, Jet Propulsion Laboratory, Pasadena, California, January 15, 1970. + +[2] T. Chang, D. Foster, and A. H. Kahn, “An Intensity Standard for Electron Paramagnetic Resonance Using Chromium-Doped Corundum (Al2O3:Cr3+),” *Journal of Research of the National Bureau of Standards*, vol. 83, no. 2, pp. 133–164, March–April 1978. + +[3] E. O. Schulz-Du Bois, “Paramagnetic Spectra of Substituted SAPPHIRES—Part I: Ruby,” *Bell System Technical Journal*, vol. 38, p. 271, January 1959. + +[4] MATLAB, Version 5, The MathWorks, Inc., Natick, Massachusetts, copyright 1984–1998. +---PAGE_BREAK--- + +[5] A. Abragam and B. Bleaney, *Electron Paramagnetic Resonance of Transition Ions*, New York: Dover Publications, Inc., 1986. + +[6] A. E. Siegman, *Microwave Solid State Masers*, New York: McGraw-Hill Book Company, 1964. + +[7] R. C. Clauss, "A 2388 Mc Two-Cavity Maser for Planetary Radar," *Microwave Journal*, May 1965. +---PAGE_BREAK--- + +# Appendix A + +## Ruby Energy Level Program and Sample Output + +The MATLAB program listing follows. Statements following a “%” are comments. (Notice that MATLAB denotes $\sqrt{-1}$ by $i$.) + +* an m-file called rubylevels.m to calculate the eigenvalues + +* and eigenvectors of the spin hamiltonian for ruby + +* it calculates the spin vector and the transition frequencies (in GHz) + +* and also the transition probabilities for a given r-f magnetic field + +* Hdc is along the z-axis and the c-axis direction is unrestricted + +g1=1.9817; % use the values for g1, g2 and D +g2=1.9819; % suggested by the National Bureau +D=-3.8076e-17; % of Standards +beta=9.273e-21; + +h=4981 % enter the magnetic field strength +thetad=90.0 % enter the polar angle +phid=0.0 % enter the azimuthal angle +Hrf=[0.854; -0.521i; 0.0] % enter the r-f field polarization + +theta=pi*(thetad/180.0); % convert polar angle to radians +phi=pi*(phid/180.0); % convert azimuthal angle to radians + +% construct the spin hamiltonian +Sx=(0.5)*[0 1.732 0 0;1.732 0 2 0;0 2 0 1.732;0 0 1.732 0]; +Sy=(0.5)*[0 -1.732i 0 0;1.732i 0 -2i 0;0 2i 0 -1.732i;0 0 1.732i 0]; +Sz=(0.5)*[3 0 0 0;0 1 0 0;0 0 -1 0;0 0 0 -3]; + +Sp=Sx+i*Sy; Sm=Sx-i*Sy; +sh1=(g1*(cos theta))^2+g2*(sin theta))^2*beta*h*Sz; +sh2=D*((cos theta))^2-(0.5)*(sin theta))^2*(Sz^2-1.25*eye(4)); +sh3=D*(sin theta)*(cos theta)*(0.5)*exp(-i*phi)*(Sz*Sp+Sp*Sz); +sh4=D*(sin theta)*(cos theta)*(0.5)*exp(i*phi)*(Sz*Sm+Sm*Sz); +sh5=D*(0.25)*(sin theta))^2*(exp(-2*i*phi)*Sp^2+exp(2*i*phi)*Sm^2); +sh sh1+sh2+sh3+sh4+sh5; + +% calculate the eigenvectors and eigenvalues +[evec,eval]=eig(sh); + +e1=eval(1,1); e2=eval(2,2); e3=eval(3,3); e4=eval(4,4); + +% the eigenvector associated with the first eigenvalue is the first +% column of the matrix evect, the 2nd eigenvector is the 2nd column, etc + +v1=evect(:,1); v2=evect(:,2); v3=evect(:,3); v4=evect(:,4); + +% order the eigenvalues such that the most negative one is labeled e1 +% and the most positive one is labeled e4, carry the eigenvectors +% along with the eigenvalues + +if e1>e2 + et=e1; vt=v1; + e1=e2; v1=v2; + e2=et; v2=vt; +end +---PAGE_BREAK--- + +```pascal +if e1>e3 + et=e1; vt=v1; + e1=e3; v1=v3; + e3=et; v3=vt; +end + +if e1>e4 + et=e1; vt=v1; + e1=e4; v1=v4; + e4=et; v4=vt; +end + +if e2>e3 + et=e2; vt=v2; + e2=e3; v2=v3; + e3=et; v3=vt; +end + +if e2>e4 + et=e2; vt=v2; + e2=e4; v2=v4; + e4=et; v4=vt; +end + +if e3>e4 + et=e3; vt=v3; + e3=e4; v3=v4; + e4=et; v4=vt; +end +``` + +% calculate and display the transition frequencies +f12=(e2-e1)/6.626e-18, f13=(e3-e1)/6.626e-18, f14=(e4-e1)/6.626e-18, +f23=(e3-e2)/6.626e-18, f24=(e4-e2)/6.626e-18, f34=(e4-e3)/6.626e-18, + +% calculate and display the spin vectors +S12=[v2'*Sx*v1; v2'*Sy*v1; v2'*Sz*v1] +S13=[v3'*Sx*v1; v3'*Sy*v1; v3'*Sz*v1] +S14=[v4'*Sx*v1; v4'*Sy*v1; v4'*Sz*v1] +S23=[v3'*Sx*v2; v3'*Sy*v2; v3'*Sz*v2] +S24=[v4'*Sx*v2; v4'*Sy*v2; v4'*Sz*v2] +S34=[v4'*Sx*v3; v4'*Sy*v3; v4'*Sz*v3] + +%display the "transition probabilities" for the rf signal +T12=(Hrf*S12)*(Hrf*S12)', T13=(Hrf*S13)*(Hrf*S13)', +T14=(Hrf*S14)*(Hrf*S14)', T23=(Hrf*S23)*(Hrf*S23)', +T24=(Hrf*S24)*(Hrf*S24)', T34=(Hrf*S34)*(Hrf*S34)' + +The sample output follows. The user specifies the values of h, thetad, phid, and Hrf. The program determines the frequencies, spin vectors, and transition probabilities. The numbers 1,2,3,4 identify the quantum states, with 1 being the lowest energy state and 4 being the highest. + +h = 4981 +thetad = 90 +phid = 0 +Hrf = 0.8540 + 0 - 0.5210i + 0 +---PAGE_BREAK--- + +$$f_{12} = 8.4214$$ + +$$f_{13} = 24.0415$$ + +$$f_{14} = 43.2512$$ + +$$f_{23} = 15.6201$$ + +$$f_{24} = 34.8298$$ + +$$f_{34} = 19.2097$$ + +$$S_{12} = -1.0735 + 0.6544i$$ + +$$0 + 0.6544i$$ + +$$0$$ + +$$S_{13} = 0$$ + +$$0$$ + +$$0.4140$$ + +$$S_{14} = -0.0287 + 0.0899i$$ + +$$0 + 0.0899i$$ + +$$0$$ + +$$S_{23} = -0.9078 + 1.0264i$$ + +$$0 + 1.0264i$$ + +$$0$$ + +$$S_{24} = 0$$ + +$$0$$ + +$$0.2858$$ + +$$S_{34} = -0.7229 + 1.0051i$$ + +$$0 + 1.0051i$$ + +$$0$$ + +$$T_{12} = 1.5819$$ + +$$T_{13} = 0$$ + +$$T_{14} = 0.0051$$ + +$$T_{23} = 1.7160$$ + +$$T_{24} = 0$$ + +$$T_{34} = 1.3018$$ +---PAGE_BREAK--- + +# Appendix B + +## Derivation of the Hamiltonian Used in Equation (2) + +The reader may be convinced of the equivalence of Eqs. (1) and (2) in the following way. First, Eq. (1) is expressed in spherical coordinates. This gives the result + +$$H_s = g_1\beta H \cos\theta S_z + g_2\beta H (\sin\theta \cos\varphi S_x + \sin\theta \sin\varphi S_y) - D \left[S_z^2 - \frac{1}{3}S(S+1)\right] \quad (B-1)$$ + +Then the coordinate system is rotated three times. First the coordinate system is rotated about the z-axis by an angle $\varphi$ until the static magnetic field is in the $x'-z'$ plane. Then the coordinate system is rotated by an angle $-\theta$ about the y'-axis until the dc magnetic field is along the $z''$-direction. Finally, the coordinate system is rotated about the $z''$-axis by the angle $(\pi - \varphi)$. The rotation matrix relating the unprimed coordinates and the triple-primed coordinates is the product of the three rotation matrices: + +$$\begin{bmatrix} x \\ y \\ z \end{bmatrix} = \begin{bmatrix} \cos \varphi & -\sin \varphi & 0 \\ \sin \varphi & \cos \varphi & 0 \\ 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} \cos \theta & 0 & \sin \theta \\ 0 & 1 & 0 \\ -\sin \theta & 0 & \cos \theta \end{bmatrix} \begin{bmatrix} -\cos \varphi & -\sin \varphi & 0 \\ \sin \varphi & -\cos \varphi & 0 \\ 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} x''' \\ y''' \\ z''' \end{bmatrix}$$ + +Now we use the rather remarkable fact that the spin matrices transform just like the components of a vector. Thus, the relationship between the unprimed spin operators and the triple-primed spin operators is the same as the above relationship between the coordinates. Thus, we can write + +$$\begin{bmatrix} S_x \\ S_y \\ S_z \end{bmatrix} = \begin{bmatrix} -\cos\theta\cos^2\varphi - \sin^2\varphi & -\sin\varphi\cos\varphi\cos\theta + \sin\varphi\cos\varphi & \sin\theta\cos\varphi \\ -\cos\theta\sin\varphi\cos\varphi + \sin\varphi\cos\varphi & -\cos\theta\sin^2\varphi - \cos^2\varphi & \sin\theta\sin\varphi \\ \sin\theta\cos\varphi & \sin\theta\sin\varphi & \cos\theta \end{bmatrix} \begin{bmatrix} S_{x'''} \\ S_{y'''} \\ S_{z'''} \end{bmatrix}$$ + +Expressing the spin operators $S_x, S_y, S_z$ in Eq. (B-1) in terms of $S_{x'''}$, $S_{y'''}$, $S_{z'''}$ leads to Eq. (2), where the triple primes have been dropped. Equation (2) neglects Zeeman terms involving differences between $g_1$ and $g_2$. \ No newline at end of file diff --git a/samples_new/texts_merged/3594993.md b/samples_new/texts_merged/3594993.md new file mode 100644 index 0000000000000000000000000000000000000000..883a9862ec6545db81d06fd018e26bb322db7806 --- /dev/null +++ b/samples_new/texts_merged/3594993.md @@ -0,0 +1,309 @@ + +---PAGE_BREAK--- + +# On coloring box graphs + +CrossMark + +Emilie Hogana, Joseph O'Rourkeb, Cindy Traubc, Ellen Veomettd,* + +a Pacific Northwest National Laboratory, United States + +b Smith College, United States + +c Southern Illinois University Edwardsville, United States + +d Saint Mary's College of California, United States + +## ARTICLE INFO + +**Article history:** +Received 5 November 2013 +Received in revised form 6 September 2014 +Accepted 13 September 2014 +Available online 23 October 2014 + +**Keywords:** +Graph coloring +Box graph +Chromatic number + +## ABSTRACT + +We consider the chromatic number of a family of graphs we call box graphs, which arise from a box complex in *n*-space. It is straightforward to show that any box graph in the plane has an admissible coloring with three colors, and that any box graph in *n*-space has an admissible coloring with *n* + 1 colors. We show that for box graphs in *n*-space, if the lengths of the boxes in the corresponding box complex take on no more than two values from the set {1, 2, 3}, then the box graph is 3-colorable, and for some graphs three colors are required. We also show that box graphs in 3-space which do not have cycles of length four (which we call "string complexes") are 3-colorable. + +© 2014 Elsevier B.V. All rights reserved. + +## 1. Introduction and results + +There are many geometrically-defined graphs whose chromatic numbers have been studied. Perhaps the most famous such example is the Four Color Theorem, which states that any planar graph is 4-colorable [1]. Another famous example is the chromatic number of the plane. More specifically, a graph $G = (V, E)$ is defined where $V = \mathbb{R}^2$ and $(x, y) \in E$ precisely when $\|x - y\|_2 = 1$ (where $\| \cdot \|_2$ is the usual Euclidean norm in the plane). Through simple geometric constructions, one can show that $4 \le \chi(G) \le 7$ for this graph, although the precise value is still not known; see [8], for example. + +In this article, we consider graphs that arise from box complexes. We first define what a box complex is: + +**Definition 1.** An *n*-dimensional box is a set $B \subset \mathbb{R}^n$ that can be defined as: + +$$B = \{x = (x_1, x_2, \dots, x_n) \in \mathbb{R}^n : a_i \le x_i \le b_i\}$$ + +where $a_i < b_i$ for $i = 1, 2, \dots, n$. + +An *n*-dimensional *box complex* is a set of finitely many *n*-dimensional boxes $\mathcal{B} = \{B_1, B_2, \dots, B_m\}$ such that if the intersection of two boxes $B_i \cap B_j$ is nonempty, then $B_i \cap B_j$ is a face (of any dimension) of both $B_i$ and $B_j$, for any $i$ and $j$ (see Fig. 1). + +Now we can define a box graph: + +**Definition 2.** An *n*-dimensional *box graph* is a graph defined on an *n*-dimensional box complex. The box graph $G(\mathcal{B}) = (V, E)$ defined on the box complex $\mathcal{B} = \{B_1, B_2, \dots, B_m\}$ is the undirected graph whose vertex set is the boxes: + +$$V = \{B_1, B_2, \dots, B_m\}$$ + +* Corresponding author. +E-mail address: erv2@stmarys-ca.edu (E. Veomett). +---PAGE_BREAK--- + +Fig. 1. Examples in $\mathbb{R}^2$. + +Fig. 2. Defining a 2-dimensional box graph. + +and whose edges $(B_i, B_j) \in E$ record when $B_i \cap B_j$ is an $(n-1)$-dimensional face of both $B_i$ and $B_j$. In other words, the box graph is the dual graph of the box complex, and the colorings we are considering are in some sense “solid colorings.” + +When it eases understanding, we may use the terms box complex and box graph interchangeably. We also may use boxes and vertices interchangeably. + +The following proposition shows that, as far as the corresponding box graphs are concerned, we may as well restrict ourselves to box complexes where each of the vertices of the boxes has integer coordinates (and thus all boxes have integer lengths). + +**Proposition 1.** Let $\mathcal{B} = \{B_1, B_2, \dots, B_m\}$ be a box complex and let $G(\mathcal{B}) = (V, E)$ be its corresponding box graph. There exists a box complex $\{C_1, C_2, \dots, C_m\}$ where the vertices of each $C_i$ ($i = 1, 2, \dots, m$) have all integer coordinates, such that the box graph corresponding to complex $\{C_1, C_2, \dots, C_m\}$ is the same graph $G$. + +We will prove **Proposition 1** in Section 2. + +We ask the following natural question: + +**Question 1.** What is the minimum number of colors $k$ that are required so that every $n$-dimensional box graph has an admissible $k$-coloring? + +From Fig. 2(c), we can see that three colors may be necessary to color a 2-dimensional box graph. In fact, as we will prove in Section 2, three colors are also sufficient: + +**Proposition 2.** Any box graph in $n$-space has an admissible coloring with $n + 1$ colors. + +Our goal is to answer **Question 1** in dimension 3, which is still open. In the case where the "boxes" are zonotopes (as opposed to right-angled bricks), sometimes 4 colors are needed [4], and in the case where the "boxes" are now touching spheres, the chromatic number is between 5 and 13 [2]. Analogously, for simplicial complexes in $\mathbb{R}^n$, $n+1$ colors suffice [6]. We suspect that any 3-dimensional box graph is 3-colorable, and we can show that this is true for a few families of 3-dimensional box graphs. The following are the main results of this paper: + +**Theorem 1.** Let $G$ be an $n$-dimensional box graph such that the lengths of all of the boxes in the corresponding box complex take on no more than two values from the set $\{1, 2, 3\}$. That is, all the side lengths of the boxes are 1 or 2, or all the side lengths are 1 or 3, or all the side lengths are 2 or 3. Then $G$ is 3-colorable. + +**Theorem 2.** Let $G$ be a 3-dimensional box graph that has no cycles on four vertices. Then $G$ is 3-colorable. + +The rest of this paper is organized as follows: in Section 2 we will state and prove some straightforward results on box graphs. We will prove **Theorem 1** in Section 3, and we will prove **Theorem 2** in Section 4. + +## **2. Straightforward results on box graphs** + +As promised, we will start with proofs of **Propositions 1** and **2**. +---PAGE_BREAK--- + +**Proof of Proposition 1.** Suppose {$B_1, B_2, \dots, B_m$} is a box complex in $\mathbb{R}^n$, so that each vertex of each box has $n$ coordinates. Let $x_0, x_1, \dots, x_k$ be the list of all of the different first coordinates of all of the vertices of the boxes in the box complex. Order them so that + +$$x_0 < x_1 < \cdots < x_k.$$ + +Now make a new box complex {$B_1^1, B_2^1, \dots, B_m^1$} such that the vertices are all the same except the first coordinates. Specifically, if the first coordinate of a vertex in $B_j$ is $x_i$, then the first coordinate of the corresponding vertex in $B_j^1$ is the integer $i$. Thus, the vertex $(x_i, y_2, y_3, \dots, y_n)$ of $B_j$ becomes the vertex $(i, y_2, y_3, \dots, y_n)$ of $B_j^1$. + +Note that each $B_i^1$ is still a box, and this does not change the intersection pattern of the boxes. That is, if $B_j \cap B_\ell$ is $d$-dimensional, then so is $B_j^1 \cap B_\ell^1$. (And if $B_j \cap B_\ell$ was empty, then so is $B_j^1 \cap B_\ell^1$.) + +We continue with this process for the 2nd, 3rd, ..., $n$th coordinates. Finally, we get a box complex {$B_1^n, B_2^n, \dots, B_m^n$} with the same intersection pattern as $B_1, B_2, \dots, B_m$ but with all integer coordinates for the vertices. Thus, the box graph for complex {$B_1^n, B_2^n, \dots, B_m^n$} is the same as the box graph for complex {$B_1, B_2, \dots, B_m$}. $\square$ + +In order to prove Proposition 2 we first give the definition of *k*-degenerate graphs, and show the well-known result that *k*-degenerate graphs are *k* + 1-colorable [5]. + +**Definition 3.** A graph G is *k*-degenerate if each of its induced subgraphs has a vertex of degree k. + +**Lemma 1.** Every *k*-degenerate graph is *k* + 1-colorable. + +**Proof.** Let $G = (V, E)$ be a $k$-degenerate graph. We will proceed by induction on $|V|$, the size of the vertex set. If $|V| = 1$ then certainly $G$ is $k$-colorable for any $k \ge 1$. Now, suppose that $|V| = m \ge 2$, and assume as the induction hypothesis that any $k$-degenerate graph on $m-1$ vertices is $k+1$-colorable. + +Then, since $G$ is $k$-degenerate we know there exists a vertex $v \in V$ with $\deg(v) = k$. Consider the graph $G-v$, formed by removing vertex $v$ and all of its incident edges, with $m-1$ vertices. This graph must be $k$-degenerate since it is an induced subgraph of $G$. Therefore, by the induction hypothesis we can color $G-v$ using $k+1$ colors. Now, when $v$ and its edges are added back into $G$ we must have at least one available color since $v$ has only $k$ neighbors and there are $k+1$ total colors. Therefore, by induction, any $k$-degenerate graph is $k+1$-colorable. $\square$ + +We now prove Proposition 2 by showing that any box graph is *n*-degenerate. + +**Proof of Proposition 2.** Let $G = (V, E)$ be a box graph, so that each $v \in V$ is a box in the corresponding box complex. We will label each box in V by its "right, forward, top" vertex. More precisely, each box can be defined as + +$$\{x = (x_1, x_2, \dots, x_n) \in \mathbb{R}^n : a_i \le x_i \le b_i\}$$ + +where $a_i < b_i$ for $i = 1, 2, \dots, n$. We then label this box with $(b_1, b_2, \dots, b_n)$. + +Now find a "right, forward, top" box in the graph. That is, find a vertex $u \in V$ with corresponding label $(u_1, u_2, \dots, u_n)$ such that for any other $v \in V$ with label $(v_1, v_2, \dots, v_n)$ and $(u, v) \in E$, we have + +$$u_1 \ge v_1, u_2 \ge v_2, \dots, u_n \ge v_n.$$ + +(Such a box is guaranteed to exist because G is finite.) Note that, by our choice of *u*, *u* has at most *n* neighbors. + +Since we began with an arbitrary box graph, the existence of a degree *n* vertex must be true for all induced subgraphs of G. Therefore, any box graph corresponding to a box complex in $\mathbb{R}^n$ is *n*-degenerate, and by Lemma 1 is *n* + 1 colorable. $\square$ + +We note that the above argument is the *n*-dimensional analogue to the "elbow" argument in [7]. + +We state the following result as a reminder to the reader: + +**Proposition 3.** Let $G = (V, E)$ be a graph. Then the following are equivalent: + +1. The graph G contains no odd cycle. + +2. The graph G is bipartite. + +3. The graph G is 2-colorable. + +**Proof.** Proposition 3 is a well-known introductory graph theory result. See Section I.2 of [3], for example. $\square$ + +The following proposition shows that if a box graph cannot be colored with just 2 colors, it must have some boxes with side lengths that are different from each other. + +**Proposition 4.** Suppose a box complex only contains boxes that are cubes; that is, boxes with all side lengths equal. Then the corresponding box graph is 2-colorable. + +**Proof.** Suppose a box complex contains only cubes, and let $G = (V, E)$ be the corresponding box graph. Without loss of generality, we may assume that G is connected. Thus, since all of the boxes in the corresponding box complex are cubes, they must all be cubes of the same size; let the side length of the cubes be $k$. By the proof of Proposition 1, we can assume that $k \in \mathbb{N}$ and the coordinates of all the vertices of the boxes in the box complex are integer multiples of $k$. +---PAGE_BREAK--- + +Just as we did in the proof of Proposition 2, label each $v \in V$ with the “right, forward, top” vertex. Let $(v_1, v_2, \ldots, v_n)$ be the label for vertex $v$. Color vertex $v$ with color + +$$ \frac{1}{k} (v_1 + v_2 + \cdots + v_n) \pmod{2}. $$ + +Note that exactly two colors are used. If two vertices are adjacent: $(u, v) \in E$, then we know that their corresponding labels $(u_1, u_2, \ldots, u_n)$ and $(v_1, v_2, \ldots, v_n)$ must be the same in every coordinate except one, in which they differ by $k$. That is, there exists $i \in \{1, 2, \ldots, n\}$ such that + +$$ \begin{aligned} u_j &= v_j & \text{if } j \in \{1, 2, \ldots, n\} \text{ and } j \neq i \\ u_i &= v_i \pm k. \end{aligned} $$ + +Thus, if two vertices are adjacent then their colors must be different. Thus, this is a valid 2-coloring of G. $\square$ + +In [4] it was proved that any box complex in $\mathbb{R}^3$ that is homeomorphic to a ball is 2-colorable. + +### 3. Proof of Theorem 1 + +We shall prove Theorem 1 in parts via a few lemmas. Here is the first of our lemmas: + +**Lemma 2.** Suppose that each side length of each box in a box complex is a positive integer which is congruent to either 1 or 2 mod 3. Then the corresponding box graph is 3-colorable. + +**Proof.** Consider an $n$-dimensional box complex $\{B_1, B_2, \ldots, B_m\}$, and label each box again by its “right, forward, top” vertex coordinates, $(b_1, b_2, \ldots, b_n)$. Now, color each box by $(b_1 + b_2 + \cdots + b_n)$ mod 3. We claim that this is a valid coloring. + +If two boxes, $B_i$, $B_j$ are adjacent then their right, forward, top vertices will differ in exactly one coordinate. Let $(b_{i,1}, b_{i,2}, \ldots, b_{i,n})$ be the label for $B_i$ and $(b_{j,1}, b_{j,2}, \ldots, b_{j,n})$ the label for $B_j$. Then, WLOG, $b_{i,1} \neq b_{j,1}$ and $b_{i,k} = b_{j,k}$ for $k=2, 3, \ldots, n$. These two boxes will have the same color iff $b_{i,1} - b_{j,1} \equiv 0 \pmod{3}$. However, this value is the side length of one of these boxes which we have restricted to not equal any multiple of 3. Therefore neighboring boxes may not have the same color, so this 3-coloring is admissible. $\square$ + +The following corollary follows directly from Lemma 2: + +**Corollary 1.** Suppose a box complex in $\mathbb{R}^n$ has boxes with side lengths only equal to 1 or 2. Then the corresponding box graph is 3-colorable. + +The next in our series of lemmas: + +**Lemma 3.** Suppose that each side length of each box in a box complex is an odd integer. Then the corresponding box graph is 2-colorable. + +**Proof.** We will prove this by showing that there can be no odd cycles in the graph (see Proposition 3). + +Assume we have a box complex $\mathcal{B} = \{B_1, \ldots, B_k\}$. Consider any cycle within the corresponding box graph. Label the vertices of this cycle by the “right, forward, top” corner of the corresponding box, and label each of the edges of the cycle with the distances between those corners, mod 2. In other words, if the neighboring vertices are labeled (1, 1, ..., 1) and (4, 1, ..., 1) then we label the edge with 3 mod 2 = 1. Moreover, we will choose a direction of travel around the cycle and sign the length of the edge positive if we are moving along that edge in the positive direction, and negative if we move along the edge in the negative direction. Thus, for example, if we move from vertex (1, 1, ..., 1) to (4, 1, ..., 1), the edge is labeled with 1 since moving from 1 to 4 is in the positive direction in the first coordinate, whereas if we move from vertex (4, 1, ..., 1) to (1, 1, ..., 1), the edge is labeled with -1. + +We now claim that the sum of the integers along the cycle must be 0. This is because in each dimension, any length we move in the positive direction must be traveled again in the negative direction, and therefore their parity must also be equal. + +Finally, we note that, by assumption, all of the lengths are odd. Thus, all edge labels must be either 1 or -1. Since we have a list of edges labeled 1 or -1 and the sum of the labels is 0, there must be an even number of edges in the cycle. $\square$ + +The following corollary follows directly from Lemma 3: + +**Corollary 2.** Suppose a box complex in $\mathbb{R}^n$ has boxes with side lengths only equal to 1 or 3. Then the corresponding box graph is 3-colorable. + +The proof for Theorem 1 when blocks have dimensions 2 or 3, given in the remainder of this section, relies on placing a partial order on the box graph corresponding to a given box complex. The elements of the partially ordered set (poset) are the vertices of the box graph, i.e., the individual boxes that comprise the box complex. As before, we label box $\{x = (x_1, x_2, \ldots, x_n) \in \mathbb{R}^n : a_i \le x_i \le b_i\}$ by its “right, forward, top” vertex coordinates, $(b_1, b_2, \ldots, b_n)$. The order relation for this poset is induced by the following cover relation: box $B_i$ with label $(b_1, b_2, \ldots, b_n)$ covers box $B_j$ with label +---PAGE_BREAK--- + +Fig. 3. All edges above the ones drawn do not change in length after *T* is applied. + +(c₁, c₂, . . . , cₙ) if and only if the two boxes are adjacent and Σn_{k=1} b_k ≥ Σn_{k=1} c_k. Since these adjacent boxes must share an (n − 1)-dimensional face, their labels will differ in exactly one coordinate, by a difference equal to the dimension of box Bᵢ orthogonal to shared face Bᵢ ∩ Bⱼ. + +We note further that the sum $r(B_i) = \sum_{k=1}^{n} b_k$ of the entries of the label of a given box is a rank function for this poset. We will use the rank function and the poset structure to describe valid colorings of the box graph. This technique will consider an initial drawing of the poset (and subsequent re-drawings) with all nodes at integer heights. We then refer to the *length* of an edge in the poset as the positive vertical distance between its endpoints. + +Here is the last of the lemmas that we will need for Theorem 1: + +**Lemma 4.** Suppose a box complex has boxes with side lengths only equal to 2 or 3. Then the corresponding box graph is 3-colorable. + +**Proof.** Consider now the case in which all dimensions of the boxes in a box complex $\mathcal{B} = \{B_1, B_2, \dots, B_m\}$ are 2 or 3. We produce the associated poset $\mathcal{P}$ described above, and make an initial drawing of $\mathcal{P}$ with nodes having heights corresponding to their ranks. Note that this implies that if two boxes $B_i$ and $B_j$ which are adjacent in the box graph are drawn with heights $h_i$ and $h_j$ respectively, then $r(B_i) - r(B_j) = h_i - h_j$, and $h_i - h_j$ is either 2 or 3 if $h_i > h_j$. In other words, all lengths of the edges in the poset are either 2 or 3. Without loss of generality, we can make this drawing so that all rank-minimal vertices have height $h$-value of 0. We now describe how to redraw the poset $\mathcal{P}$ in such a way that all adjacencies and cover relations are preserved, but all edges have lengths equivalent to 1 or 2 mod 3. + +We now consider the lengths of edges in the poset, working our way in order of increasing height $h$ of the terminal endpoints. Since the first nodes occur on the line $h=0$ and all edges have length 2 or 3, no edges terminate on $h=1$, and edges that terminate on $h=2$ have length 2, which is among the desired values. Edges terminating on $h=3$ or above may have length 2 or length 3. We perform the following transformation on the drawing of the poset. Let $h_i$ denote the height of vertex $B_i$ in the initial drawing of the poset. We perform transformation $T$ below to the drawing of the poset: + +$$ T(h_i) = \begin{cases} h_i & \text{if } h_i \le 2, \\ h_i + 2 & \text{if } h_i \ge 3. \end{cases} $$ + +Note that $T$ has no effect on the length of edges terminating at or below $h=2$, and no effect on the length of edges commencing at or above $h=3$. For edges that include the interval $[2, 3]$, two units are added to their length. In the new drawing of the poset, no edges will terminate on lines $h=3$ or $h=4$. Edges terminating on $h=5$ were either originally of length 3 commencing from $t=0$ or of length 2 commencing at $h=1$. The former now have length 5, while the length of the latter is now 4. In either case, edges terminating on $h=5$ have lengths equivalent to 1 or 2 mod 3. A similar argument shows that edges in the revised drawing that terminate on $h=6$ or $h=7$ are either of length 2, 4, or 5. (See Fig. 3.) + +Any edges terminating on *h*-values of 8 or higher were not affected by the first stretch, and thus may have length 3. +Continue the stretching/redrawing procedure as before, extending the interval [7, 8] by two units and redrawing the poset. +This procedure only changes the lengths of edges which include the interval [7, 8], so in particular it does not change +the lengths of any prior edges. Since our complex is finite, only finitely many re-drawings are needed to draw the poset +with edges all having length equivalent to 1 or 2 mod 3. At that time, the nodes can be colored using the argument from +Lemma 2. □ + +We can now finally prove Theorem 1: + +**Proof of Theorem 1.** This is a direct consequence of Corollaries **1**, **2**, and **Lemma 4**. □ +---PAGE_BREAK--- + +**Fig. 4.** This 2 × 2 pattern (a 4-cycle in the dual) is forbidden as part of a string complex. + +**Fig. 5.** An example of a string complex. + +**4. Proof of Theorem 2** + +First, a couple of definitions: + +**Definition 4.** A *string complex* is any box complex in $\mathbb{R}^3$ that does not contain a 2 × 2 pattern of boxes shown in Fig. 4. The dual of the forbidden pattern is a 4-cycle, which is the shortest cycle possible in a box complex. So in other words, a string complex is a 3-dimensional box complex in whose corresponding graph has no 4-cycle (see Fig. 5). + +We use the term “string complex” because, without the 2 × 2 pattern in Fig. 4, the box complex is forced to have lots of “holes” and be “stringy.” + +**Definition 5.** A 3-dimensional box complex {$B_1, B_2, B_3, \dots, B_m$} is *reducible* to the 3-dimensional box complex {$A_1, A_2, \dots, A_\ell$} ($\ell \le m$) if one can sequentially remove boxes from complex {$B_1, B_2, \dots, B_m$} of degree $\le 2$ in order to obtain complex {$A_1, A_2, \dots, A_\ell$}. More specifically, there exists an ordering $B_1, B_2, \dots, B_m$ such that + +$$B_i = A_i \quad \text{for } i = 1, 2, \dots, \ell$$ + +and for $j = 0, 1, 2, \dots, m - \ell - 1$, the box $B_{m-j}$ has degree $\le 2$ in the box complex + +$$\{B_1, B_2, \dots, B_{m-j}\}.$$ + +A box complex is *irreducible* if every vertex is of degree $\ge 3$. + +Note that a complex may be reducible to a smaller complex which is itself irreducible. +The following lemma is analogous to the tools we used in the proof of Proposition 2: + +**Lemma 5.** If a 3-dimensional box complex is reducible to the empty complex, then its corresponding box graph is 3-colorable. + +**Proof.** We prove by induction on $m$, the number of boxes in the box complex. Certainly if $m=1$, the box graph is 3-colorable. Suppose that $m \ge 2$, and that for any 3-dimensional box complex on $m-1$ boxes which is reducible to the empty complex, the corresponding box graph is 3-colorable. Suppose that the box complex {$B_1, B_2, \dots, B_m$} is reducible to the empty complex. That is, for $i=1, 2, \dots, n$, the box $B_i$ has degree $\le 2$ in the complex + +$$\{B_1, B_2, \dots, B_n\}.$$ + +Note that the box complex {$B_1, B_2, \dots, B_{m-1}$} is also reducible to the empty complex and has $m-1$ boxes in it. Thus, by our inductive assumption, the corresponding graph is 3-colorable. Now, because $B_m$ had degree $\le 2$ in the box complex {$B_1, B_2, \dots, B_m$}, we can choose to color $B_m$ a color which is different from the colors of its neighbors. Thus, we have proven the lemma. $\square$ +---PAGE_BREAK--- + +**Fig. 6.** $b_0$ is the topmost, leftmost box in the top layer $T$. + +By Lemma 5, Theorem 2 is a direct corollary of the following theorem and its subsequent corollary: + +**Theorem 3.** Every string complex is reducible. + +**Proof.** Assume to the contrary. That is, let $\mathcal{S} = \{S_1, S_2, \dots, S_m\}$ be an irreducible string complex. We will show that irreducibility implies the complex must contain a 2 × 2 pattern of boxes, which contradicts the assumption that the complex is a string complex. + +Let $T_1, T_2, \dots, T_\ell$ be the top layer of boxes in $\mathcal{S}$; say the top faces lie in a plane parallel to the xy-plane, extreme in the +z direction. We first claim that every box in $T_1, T_2, \dots, T_\ell$ must have degree $\ge 2$ within the complex $\mathcal{T} = \{T_1, T_2, \dots, T_\ell\}$. Suppose otherwise. That is, suppose there is a box $T_i$ with degree $\le 1$ within the box complex $\mathcal{T}$. Then $T_i$ can have at most degree 2 in the complex $\mathcal{S}$ by joining to a box beneath it. But we know that every box in $\mathcal{S}$ must have degree $\ge 3$, because the complex $\mathcal{S}$ was irreducible. Thus, it is indeed true that each $T_i$, $i = 1, 2, \dots, \ell$ has degree $\ge 2$ in the complex $\mathcal{T}$. + +Now we look at an extreme corner box of $T_1, T_2, \dots, T_\ell$. Specifically, let $b_0$ be backmost (extreme in the +y direction), and among the topmost boxes of $\mathcal{T}$, leftmost (extreme in the -x direction). So $b_0$ is a type of “upper left corner”. Because it is extreme in two directions, two of its faces in $\mathcal{T}$ are exposed, so it must have exactly degree 2 in $\mathcal{T}$. Because we assumed $\mathcal{S}$ is irreducible, $b_0$ (and indeed every box of $\mathcal{S}$) must have degree $\ge 3$. So $b_0$ must be adjacent to a box $b'_0$ beneath it (beneath in the z-direction). See Fig. 6. + +Let $b_1$ and $b_2$ be the boxes adjacent to $b_0$ in $T$, with $b_1$ adjacent to $b_0$ in the x-direction as in the figure. Again, by our previous arguments, $b_1$ must have degree $\ge 2$ in $\mathcal{T}$. It is already adjacent to $b_0$ to its left, and it cannot be adjacent to a box above it, because it is topmost. So it must be adjacent to one or both of the boxes labeled $b_3$ and $b_4$ in the figure. + +However, $b_1$ cannot be adjacent to $b_3$, for then $\{b_0, b_1, b_2, b_3\}$ forms a 2 × 2 pattern, contradicting the assumption that $\mathcal{S}$ is a string complex. Therefore $b_1$ must be adjacent to $b_4$ in Fig. 6. Now $b_1$ has degree exactly 2 in $T$. Because it must have degree $\ge 3$ for $\mathcal{S}$ to be irreducible, it must be adjacent to box $b'_1$ underneath. But now $\{b_0, b_1, b'_0, b'_1\}$ forms a 2 × 2 pattern, again contradicting the assumption that $\mathcal{S}$ is a string complex. + +We have now exhausted all possibilities, which have led to contradictions. So the assumption that $\mathcal{S}$ is irreducible is false, and $\mathcal{S}$ must be reducible. ☐ + +**Corollary 3.** Every string complex can be reduced to the empty complex. + +**Proof.** Let $\mathcal{S}$ be a string complex. It cannot be irreducible by Theorem 3, and so it must have a box $b$ of degree $\le 2$. Let $\mathcal{S}_1 = \mathcal{S} \setminus b$ be the complex with $b$ removed. We claim that $\mathcal{S}_1$ is again a string complex. The reason is that the forbidden 2 × 2 pattern cannot be created by the removal of a box. Therefore, applying Theorem 3 again, $\mathcal{S}_1$ is reducible. Continuing in this manner, we can reduce $\mathcal{S}$ to the empty complex. ☐ + +**5. Conclusion** + +That box complexes in $\mathbb{R}^2$ sometimes need 3 colors is a straightforward observation, but whether any box complex in $\mathbb{R}^3$ might need 4 colors is an open question. Although it is natural to expect that the chromatic number might be $n+1$ for boxes in $\mathbb{R}^n$ as it is for simplices, we in fact have no example that requires more than 3 colors for any $n \ge 3$. + +**Acknowledgments** + +We thank the participants of the 2012 AMS Mathematics Research Institute for stimulating discussions, and we thank the referees for their insightful comments. The proof of Theorem 2 was developed in collaboration with Smith students Lily Du, Jessica Lord, Micaela Mendlow, Emily Merrill, Viktoria Pardey, Rawia Salih, and Stephanie Wang. The first, third and last authors were supported by an AMS Mathematics Research Communities grant. +---PAGE_BREAK--- + +References + +[1] K. Appel, W. Haken, Every planar map is four colorable, Bull. Amer. Math. Soc. 82 (5) (1976) 711-712. + +[2] Bhaskar Bagchi, Basudeb Datta, Higher-dimensional analogues of the map coloring problem, Amer. Math. Monthly 120 (8) (2013) 733–737. + +[3] Béla Bollobás, Modern Graph Theory, in: Graduate Texts in Mathematics, vol. 184, Springer-Verlag, New York, 1998. + +[4] Suzanne Gallagher, Joseph O'Rourke, Coloring objects built from bricks, in: Proc. 15th Canad. Conf. Comput. Geom., 2003, pp. 56–59. + +[5] Alexandr V. Kostochka, On almost (k - 1)-degenerate (k + 1)-chromatic graphs and hypergraphs, Discrete Math. 313 (4) (2013) 366–374. + +[6] Joseph O'Rourke, A note on solid coloring of pure simplicial complexes, December 2010, arXiv:1012.4017 [cs.DM]. + +[7] Tom Sibley, Stan Wagon, Rhombic Penrose tilings can be 3-colored, Amer. Math. Monthly 107 (3) (2000) 251–253. + +[8] Alexander Soifer, Chromatic number of the plane & its relatives. I. The problem & its history, Geombinatorics 12 (3) (2003) 131–148. \ No newline at end of file diff --git a/samples_new/texts_merged/3764397.md b/samples_new/texts_merged/3764397.md new file mode 100644 index 0000000000000000000000000000000000000000..55a92cb34cd0e28025edc25316fc667b0f7b0205 --- /dev/null +++ b/samples_new/texts_merged/3764397.md @@ -0,0 +1,278 @@ + +---PAGE_BREAK--- + +Early Collision and Fragmentation Detection of Space +Objects without Orbit Determination + +Lyndy E. Axon* + +This paper demonstrates that from using the hypothesized constraint of the admissible regions it is possible to determine if a combination of new uncorrelated debris objects have a common origin that also intersects with a known catalog object orbit, thus indicating a collision or fragmentation has occurred. Admissible region methods are used to bound the feasible orbit solutions of multiple observations using constraints on energy and radius of periapsis, propagating them to a common epoch in the past, and using sequential quadratic programming optimization to find a set of solution states that minimize the Euclidean distance between the observations at that time. If this given this set of solutions intersects with a catalog object orbit, then that object is the probabilistic source of the debris objects. This proposed method is demonstrated on an example of a low-earth object observation. + +I. Introduction + +A problem of constant concern for the future of space operations, especially as massive thousand-satellite constellations are in the design phase, is the tracking, orbit determination, and cataloging of all space objects in orbit around Earth. The U.S. Air Force Space command utilizes the Space Surveillance Network (SSN) to make approximately 80,000 daily observations to track which make an estimated population of over 300,000 objects with a diameter of over 1 cm, 17,000 known catalog objects greater than 10 cm in diameter, and 1300 active satellites¹².³ In over 50 years of space missions, over 5000 satellites have gone into orbit, of which less than 1300 are still operational today.⁴ Many of the remaining satellites have deorbited successfully, or were put into designated storage orbits prior to end-of-life, however, a large number of them are dormant orbiting the Earth.⁵ In addition to defunct satellites, debris from collisions, fragmentations, and launch litter the operational orbit environments from LEO to GEO. Not all the SSN's daily observations, or uncorrelated tracks, can be used to create actionable information. + +Extracting actionable information from an initial UCT, is not a simple task, for with a single UCT it is not possible to uniquely identify the state of the object, or how useful it would be to immediately prioritize additional observations.³ On a daily basis, thousands observations of space objects from the SSN take place over short time periods and do not possess enough observation data geometric diversity to initiate a well-posed classical initial orbit determination (IOD) problem, such as angles-only IOD. Traditional orbit determination methods rely on the curvature of the measurements in order to produce a state estimate. However, measurements obtained from a short observation or a very short sequence of observations have linear dynamics and traditional methods fail as the observation time decreases.⁶ Optical sensors measure state information as either a series of angle measurements over time or from streaks formed during a single observation; these angular measurements form a tracklet, but the range and range-rate of the SO are not observable. Therefore, the SO state is underdetermined and for any given tracklet, a continuum of range and range-rate solutions are possible which define the admissible region for a given observation.⁷ + +In an operational environment when UCTs cannot be correlated with known objects in the Space Object Catalog (SOC), operators must have a method to quickly determine if a potential threat exists. Extreme examples of potential threats include a decreased capability due to a breakup of an asset, or a debris field created by a collision. These debris objects must have had an origin, and it is currently computationally difficult and time consuming to solve this problem with real-time accuracy, and as a result collisions and fragmentations of smaller space objects have occurred. To accurately correlate new UCTs with a known + +*Graduate Researcher, Daniel Guggenheim School of Aerospace Engineering, Georgia Institute of Technology, 270 First Dr. Atlanta, GA 30313. +---PAGE_BREAK--- + +catalog object's orbit as an origin, multiple orbits must occur for LEO cases, and hours of continuous tracking is required for GEO cases. In this situation, it is more efficient to take a collection of UCTs and propagate them back over a designated period of time to determine if any of the possible states shared the same position at the same epoch, which would indicate that the observed UCTs were disparate debris from a known catalog object. Using admissible regions to initiate this approach allows the tasks of initial orbit determination and tracking to be foregone, which allows for faster actionable information. This would allow operators to track incoming UCTs and assign them as fragments or debris from a past event with a tracked catalog object, and allow for tasking of Space Surveillance Network assets to observe the catalog object as well as characterize the current state and future risks that these debris objects may pose. + +Admissible region ($\mathcal{R}$) methods are methods to constrain undetermined states using a priori constraint hypotheses, and have been proposed to support data association and track-initiation tasks. Many have extended the applicability of AR methods to space situational awareness (SSA) since Milani et al. first proposed applying these methods to the detection of asteroids too-short arc (TSA) problems.⁸ The AR approach has been applied by Tommei et al. to SO detection and discrimination by using radar and optical measurements.⁹ Optimization methods to identify a best-fitting orbit solution are proposed by Siminski et al.¹⁰ Existing admissible region methods can be used discretizing the admissible region and considering the solutions at discrete points, which would allow for a particle filter approach.¹ Additionally, an optimization scheme can be used to identify the best fitting orbits within and admissible region eliminating the need to discretize the whole region.¹⁰ Fujimoto and Scheeres work shows that observations can be associated by applying Bayes' rule to an admissible region generated from two epochs, where a nonzero result indicates that the observations are correlated.¹¹ In addition, a solution technique for correlating multiple optical observations by computing the overlap between their admissible regions, as well as using highly constrained probability distributions in Poincare orbit element space has been proposed by Fujimoto and Scheeres by.¹² Worthy et al. has developed an observation association method which uses an optimization based approach to identify local Mahalanobis distance minima in state space between two uncertain admissible regions.¹³ A limitation of these methods using the intersection of the $\mathcal{R}$ volumes is that a feasible orbit can only be constructed if the observations are the same object, otherwise these iterative solution methods will fail. The proposed methodology in this paper seeks to demonstrate that given multiple new debris objects, that cannot be associated with any known catalog object, determine if a collision or fragmentation event has occurred, and from what origin, in near real-time as new UCTs become available. + +This paper proposes a methodology for applying AR methods to bound the feasible orbit solutions of multiple observations using constraints on energy and radius of periapsis, propagating them to a common epoch in the past, and using sequential quadratic programming optimization to find a set of solution states that minimize the Euclidean distance between the observations at that time. This numerical zero-finding approach demonstrates that given two uncorrelated observations, and corresponding admissible regions, a line of feasible solutions exist that minimize the distance between the objects. In summary, this paper demonstrates that from using the hypothesized constraint of the admissible regions it is possible to determine if a combination of new uncorrelated debris objects have a common origin that also intersects with a known catalog object orbit, thus indicating a collision or fragmentation has occurred. + +## II. Approach and Methodology + +The goal of this methodology is to detect collisions and fragmentations by observing disparate debris without requiring the computational and time burden of using orbit determination. This approach can be used for a variety of orbit types and observation lengths. Given two uncorrelated observations, at two different times, $t_1$ and $t_2$, the proposed method will determine if a common origin exists for these objects at a selected epoch $t_0$. Figure 1 shows the orbital path in $\mathbb{R}^6$ orbit element space of a known catalog object as a function of time, until at some $t_0$ a break-up event occurs that results in a discrete number of debris objects. Each observation at $t_1$ and $t_2$ are of different debris from what is hypothesized to be a common origin. +---PAGE_BREAK--- + +Figure 1. Catalog Object Break-up at a given Epoch as a function of time + +Given independent observations of multiple debris objects, a continuum of range and range-rate combinations define the admissible region. These range and range-rate solutions make up the undetermined portion of a potential full state; each full-state (determined or observable information combined with unobservable) correspond to a given position and velocity solution. These solutions can be propagated back to an arbitrary estimated epoch $t_0$, at which a solution manifold can be constructed by using sequential quadratic programming and selected constraint criteria to minimize the Euclidean distance between the positions of the two observed objects at $t_0$. The solution manifold represents a line of possible common origins that goes through $\mathbb{R}^6$; if it intersects with the catalog object orbit then the observed have spawned from a break-up event involving that known object. Figure 1 is a three dimensional illustration of the previous figure, but at a particular time. Notice this figure that the solution manifold will cross the orbit of the catalog object at the hypothesized epoch $t_0$. + +Figure 2. Catalog Object Break-Up and Observation of Debris Objects from Ground Station + +Optical measurements generate angle and angle rates of objects tracked using a streak or sequence of angle measurements of right ascension, α, and declination, δ. The parameters associated with optical measurements include the observer position and velocity, **o** and $\dot{\textbf{o}}$, respectively, as well as the times at which the observations are made. Using this information, the position, **r**, and velocity, **v** of the object are given by + +$$ \mathbf{r} = \mathbf{o} + \rho \hat{\mathbf{l}} \qquad (1) $$ +---PAGE_BREAK--- + +where $\rho$ is the range to the target, $\dot{\rho}$ is the range-rate, and $\hat{\mathbf{l}}$, $\hat{\mathbf{l}}_{\alpha}$, and $\hat{\mathbf{l}}_{\delta}$ are given by + +$$ \mathbf{v} = \dot{\mathbf{o}} + \dot{\rho}\hat{\mathbf{l}} + \rho\dot{\alpha}\hat{\mathbf{l}}_{\alpha} + \rho\dot{\delta}\hat{\mathbf{l}}_{\delta} \quad (2) $$ + +$$ \hat{\mathbf{l}} = \begin{bmatrix} \cos\alpha\cos\delta \\ \sin\alpha\cos\delta \\ \sin\delta \end{bmatrix} \qquad (3) $$ + +$$ \hat{\mathbf{l}}_{\alpha} = \begin{bmatrix} -\sin\alpha\cos\delta \\ \cos\alpha\cos\delta \\ 0 \end{bmatrix} \qquad (4) $$ + +$$ \hat{\mathbf{l}}_{\delta} = \begin{bmatrix} -\cos\alpha\sin\delta \\ -\sin\alpha\sin\delta \\ \cos\delta \end{bmatrix} \qquad (5) $$ + +For this system, the states **x**, the observations **k**, and parameters **p** are defined as + +$$ \mathbf{x}^T = [\dot{\alpha} \ \dot{\delta} \ \dot{\delta} \ \rho \ \dot{\rho}] \qquad (6) $$ + +$$ \mathbf{k}^T = [\alpha_1 \dots \alpha_q \ \delta_1 \dots \delta_q] \mathbf{o}^T \dot{\mathbf{o}}^T \qquad (7) $$ + +$$ \mathbf{p}^T = [\mathbf{o}^T \dot{\mathbf{o}}^T] \qquad (8) $$ + +where $\dot{\alpha}$ and $\dot{\delta}$ are the angle rates which are generated using Lagrange Interpolation shown in Equation 9, and $q$ is the number of observations. In order to limit the inherent error associated with using Lagrange interpolation from point values, streak observations are used in this methodology. The rate estimations from the center of each streak are used for further calculations as this provides a better estimate of the rate than the beginning of the streak. + +$$ \begin{aligned} \dot{\alpha}(t) ={}& \alpha(t_1) \frac{(t-t_2) + (t-t_3) + \cdots + (t-t_q)}{(t_1-t_2)(t_1-t_3)\cdots(t_1-t_q)} \\ &+ \alpha(t_2) \frac{(t-t_2) + (t-t_3) + \cdots + (t-t_q)}{(t_2-t_1)(t_2-t_3)\cdots(t_2-t_q)} \\ &+ \cdots + \alpha(t_l) \frac{(t-t_2) + (t-t_3) + \cdots + (t-t_{q-1})}{(t_l-t_1)(t_l-t_3)\cdots(t_l-t_{q-1})} \end{aligned} \qquad (9) $$ + +For an observation with two measurements, the combined measurement and parameter vector, $\mathbf{y}^T \in \mathbb{R}^{12}$ is given by + +$$ \mathbf{y}^T = [\alpha_1 \ \alpha_2 \ \delta_1 \ \delta_2] (\mathbf{t}_1 \ \mathbf{t}_2) (\mathbf{u}_1 \ \mathbf{u}_2)^T \qquad (10) $$ + +Given $\mathbf{y}$ and solving for the angle rates using Equation 9, four of the six states in $\mathbf{x}$ can be observed or determined; these four states, known henceforth as $\mathbf{x}_d$ are shown in Equation 11. The remaining two undetermined states, known as $\mathbf{x}_u$, are given by Equation 12. + +$$ \mathbf{x}_d = \begin{bmatrix} \alpha \\ \dot{\alpha} \\ \delta \\ \dot{\delta} \end{bmatrix}_{4\times1} \qquad (11) $$ + +$$ \mathbf{x}_u = \begin{bmatrix} \rho \\ \dot{\rho} \\ 0 \\ 0 \end{bmatrix}_{2\times1} \qquad (12) $$ + +To limit the realm of possible solutions for $\mathbf{x}_u$, constraint hypotheses are imposed on the admissible regions. These constraints can be based on a priori information about the observation (e.g. is the object LEO or GEO), as well as reasonable constraints for objects in orbit around Earth can be imposed. For the +---PAGE_BREAK--- + +purpose of this paper, the primary assumption is that of 2-body motion, which allows the use of a constraint on the specific orbital energy equation. This constraint, $\kappa$ requires that the space object is in Earth's orbit, and therefore excludes hyperbolic orbit solutions. To constrain these solutions for $\mathbf{x}_u$, the admissible region set $\mathcal{R}$ can be defined as $\{\mathbf{x}_u \in \mathbb{R}^2 | \epsilon(\mathbf{r}, \dot{\mathbf{v}}) = 0\}$, which is the solution to Equation 13.⁶ The solutions to this polynomial define the two dimensional boundary of the admissible region. + +$$ \kappa(\mathbf{x}_u, \mathbf{y}) = 2\epsilon(\mathbf{r}, \mathbf{v}) = \dot{\rho}^2 + w_1\dot{\rho} + T(\rho) - \frac{2\mu}{\sqrt{S(\rho)}} = 0 \quad (13) $$ + +Farnocchia, et. al. and Tommei et. al. define $T(\rho)$, $S(\rho)$, and coefficients $w_0$ through $w_5$ as Equations 14 and 15.¹⁴.⁹ + +$$ T(\rho) = w_2\rho^2 + w_3\rho + w_4, \quad S(\rho) = \rho^2 + w_5\rho + w_6 \quad (14) $$ + +$$ +\begin{align} +w_0 &= \|\mathbf{o}\|^2, & w_1 &= 2\langle \dot{\mathbf{o}} \cdot \hat{\mathbf{l}} \rangle \\ +w_2 &= \dot{\alpha}^2 \cos^2 \delta + \dot{\delta}^2, & w_3 &= 2\dot{\alpha} \langle \dot{\mathbf{o}} \cdot \hat{\mathbf{l}}_\alpha \rangle + 2\dot{\delta} \langle \dot{\mathbf{o}} \cdot \hat{\mathbf{l}}_\delta \rangle \\ +w_4 &= \|\dot{\mathbf{o}}\|^2, & w_5 &= 2\langle \mathbf{o} \cdot \hat{\mathbf{l}} \rangle +\end{align} +\quad (15) +$$ + +To further constrain the realm of possible state solutions, a periapsis radius constraint is used to exclude parabolic and potentially re-entering space objects that will impact the Earth in less than one revolution. For the purpose of this paper, the minimum radius of periapsis $r_{min}$ is set at 6378 km plus $h_{atm}$, where $h_{atm}$ is 200 km. A form of this constraint, $r_p = a(1-e) \ge r_{min}$ was proposed by Maruskin et. al.¹ The periapsis constraint $r_{min} - r_p(\rho, \dot{\rho})$ was analytically developed by Farnocchia et. al. to be¹⁴ + +$$ (r_{min}^2 - \|D\|^2)\dot{\rho}^2 - P(\rho)\dot{\rho} - U(\rho) + r_{min}^2 T(\rho) - \frac{2r_{min}^2\mu}{\sqrt{S(\rho)}} \le 0 \quad (16) $$ + +with + +$$ P(\rho) = 2\mathbf{D} \cdot \mathbf{E}\rho^2 + 2\mathbf{D} \cdot \mathbf{F}\rho + 2\mathbf{D} \cdot \mathbf{G} - r_{min}^2 w_1 \quad (17) $$ + +$$ U(\rho) = \|E\|^2 \rho^4 + 2E \cdot F\rho^3 + (2E \cdot G + \|F\|^2)\rho^2 + 2F \cdot G\rho + \|G\|^2 - 2r_{min}\mu \quad (18) $$ + +given the following + +$$ +\begin{align} +\mathbf{D} &= \mathbf{o} \times \hat{\mathbf{l}}, & \mathbf{E} &= \hat{\mathbf{l}} \times (\dot{\alpha}\mathbf{l}_{\alpha} + \dot{\delta}\mathbf{l}_{\delta}) \\ +\mathbf{F} &= \mathbf{o} \times (\dot{\alpha}\mathbf{l}_{\alpha} + \dot{\delta}\mathbf{l}_{\delta}) + \mathbf{l} \times \dot{\mathbf{o}}, & \mathbf{G} &= \mathbf{o} \times \dot{\mathbf{o}} +\end{align} +\quad (19) +$$ + +Additional constraints may be relevant depending on available a priori information about the space object. For example, eccentricity would be an appropriate constraint to apply to GEO observations.¹ For the purpose of this paper, only energy and radius of periapsis constraints will be imposed. Imposing these constraints on an observation **y** results in a two dimensional space of solutions to **x**_u that could possibly complete the state **x** of the observed space object. + +Given two observations of an object, such as shown in Equation 10, admissible regions can be determined for each observation, $\mathcal{R}_1$ and $\mathcal{R}_2$. Each of these have a set of possible undermined states $\mathbf{x}_u$ that satisfy the aforementioned constraints. By combining these into a single variable, into a single variable **z** + +$$ z = \begin{bmatrix} x_{u,1} \\ x_{u,2} \end{bmatrix} = \begin{bmatrix} \rho_1 \\ \dot{\rho}_1 \\ \rho_2 \\ \dot{\rho}_2 \end{bmatrix} \quad (20) $$ + +It is possible to conduct a random uniform sampling of both $\mathcal{R}_1$ and $\mathcal{R}_2$ to collect a set of **z** solutions that satisfy the constraints. Each $\mathbf{x}_{u,1}$ and $\mathbf{x}_{u,2}$, combined with $\mathbf{x}_{d,1}$ and $\mathbf{x}_{d,2}$, respectively, create a possible full state solution $mathbf{fx}_1$ and $mathbf{fx}_2$ for the observed space object. Each of these states can be converted into Cartesian position **r** and velocity **v** by using Equations 1 and 2. Propagating these states back to some common time *t* in the past, the resulting vectors are defined as +---PAGE_BREAK--- + +$$ +\begin{align} +\mathbf{r}_1(t) &= [\mathbb{I} \ 0] \phi(t, \mathbf{x}_{u,1}, \mathbf{x}_{d,1}, t_1) \\ +\mathbf{r}_2(t) &= [\mathbb{I} \ 0] \phi(t, \mathbf{x}_{u,2}, \mathbf{x}_{d,2}, t_2) +\end{align} +$$ + +From this, the goal is to determine if there is a set of solutions for **z** that minimize the Euclidean distance between the position vectors corresponding to each observation time. The cost function J(**z**) and gradient are as follows + +$$ +J(\mathbf{z}) = \frac{1}{2} (\mathbf{r}_1 - \mathbf{r}_2)^T (\mathbf{r}_1 - \mathbf{r}_2) \quad (22) +$$ + +$$ +\frac{\partial J}{\partial \mathbf{z}} = \left[ \frac{\partial J}{\partial \mathbf{x}_{u,1}}, \frac{\partial J}{\partial \mathbf{x}_{u,2}} \right] = \left[ (\mathbf{r}_1 - \mathbf{r}_2)^T \cdot \left[ \mathbb{I} \ 0 \right] \frac{\partial \phi}{\partial \mathbf{x}_1} \frac{\partial \mathbf{x}_1}{\partial \mathbf{x}_{u,1}}, (\mathbf{r}_1 - \mathbf{r}_2)^T \cdot \left[ \mathbb{I} \ 0 \right] \frac{\partial \phi}{\partial \mathbf{x}_2} \frac{\partial \mathbf{x}_2}{\partial \mathbf{x}_{u,2}} \right] \quad (23) +$$ + +**Algorithm 1:** Algorithm to Determine Solution Manifold + +**Result:** Minimize Eq. 22 + +1 initialization of givens, observables, and parameter settings; + +2 compute GS Vectors and observer unit vectors with Eq. 3, 4, & 5; + +3 compute $\mathcal{R}$ boundaries for each Obs. by solving the quadratic equation for $\dot{\rho}$ given a continuous set of $\rho$ values using Eq. 13; + +4 uniformly sample from $\mathcal{R}$ interiors by selecting a random $\rho$ & $\dot{\rho}$ based on the min and max values and satisfying the energy (Eq. 13) and radius of periapsis constraints (Eq. 16); + +5 construct $\mathbf{z}$ (Eq. 20) by stacking the sample values from $\mathcal{R}_1$ & $\mathcal{R}_2$; + +6 **for** *i* = 1:*length(*z*) *do* + +7    Establish current **z** "guess" value (*z* = *z*(:, *i*)) ; + +8    **while** $J(\tilde{\mathbf{z}}) \geq Tolerance$ **do** + +9      Use fmincon to estimate the gradient (Eq. 23) and step **z** in that direction using nonlinear constraints in Eqs. 13 & 16; + +10      Update **z** value to reflect step towards minimum; + +11      Evaluate constraints (Eq. 13 and Eq. 16) given current **z** value to ensure solution still falls within $\mathcal{R}$; + +12      **if** current **z** not within $\mathcal{R}$ (does not meet constraints); + +13      **then** + +14        Get new "guess" for **z** from fmincon by continuing; + +15      **else** + +16        Convert **z** to cartesian using Eq. 1 & 2 to get $\tilde{\mathbf{z}}$; + +17        Propagate $\tilde{\mathbf{z}}$ to $t_0$ and calculate distance using Eq. 22; + +18      **end** + +19 **end** + +20 save **z** solution value that minimize $J(\mathbf{z})$ (Eq. 22) + +21 end + +III. Results + +The goal of this methodology is to detect collisions and fragmentations by observing disparate debris. To demonstrate the initial effectiveness of this approach, two independent optical observations of the same object where used. The observations were made for one second exposures 5 minutes (300 seconds) apart based on an observation taken March 1, 2014 at 02:01:36 UTC. The measurement values for the tested LEO case are given in Table 1. The observations were made using an equatorial mounted telescope from Deerlick Astronomy Village, the observer parameters are given in Table 2. Error in observation measurements were assumed to have a zero angle mean noise and approximately 0.5 arcsecond standard deviation of the noise on the angle observations (right ascension and declination). The standard deviation is approximated at this value due to the type of mount the observations were made from, as well as the exposure time. +---PAGE_BREAK--- + +**Table 1. LEO Optical Observation Measurements** + +
Timeα (rad)δ (rad)Exposure (sec)
02:01:361.40070.55561
02:06:361.3504-0.69311
+ +**Table 2. Observer Parameters for Deerlick Astronomy Village, GA** + +
LatitudeLongitudeAltitude (m)
33.561deg N82.764deg W176.8
+ +From these observations, admissible regions were constructed using a radius of periapsis constraint of 6578 km (radius of Earth plus 200km), an energy constraint of less than zero (Earth orbiting), and eccentricity constraint of less than 0.7. A set *n* particle pairs **x***u*'s that meet these constraints were then created by randomly uniformly sampling the interiors of each admissible region. Then set from the observation at t1, **x***u*,1, is combined with **x***u*,2 from the observation at t2, it results in **z** being a 4 × *n* matrix (Equation 20). Figure 3 shows the admissible regions corresponding to each observation as well as the sampled points from each interior. + +**Figure 3. Admissible Region boundaries for Observation 1 & 2** + +Using this test case, two different epoch times were selected: 100 seconds and 1 hour (3600 seconds) in the past. In each of these scenarios, each column in mathbf*z* is then stepped towards a minimum solution for the cost function *J*(*z*) at time t0 by using the MATLAB function fmincon from the optimization toolbox to solve for the minimum of Equation 22 given the nonlinear constraints and functions. Fmincon is a gradient-based method that is designed to work on problems where the objective and constraint functions are both continuous and have continuous first derivatives. + +**A. Epoch Time = -100 Seconds** + +In this scenario, 5000 particles were sampled from the admissible regions, resulting in a 4 × 5000 matrix for **z**. Each of the 5000 columns of **z** was propagated backwards 100 seconds using a two-body propagator in Ode45. The solution values for **z**, that correspond to each observation, that minimize the Euclidean distance between the observed objects are shown in Figures 4 and 5. At an epoch that is only 100 seconds before the first observation, the solution manifold appears to have very limited curvature. +---PAGE_BREAK--- + +Figure 4. Admissible Region for Observation 1 with minimized solutions and truth for $t_0 = -100$sec + +Figure 5. Admissible Region for Observation 2 with minimized solutions and truth for $t_0 = -100$sec + +The solution manifold line in the first observation's admissible region is much longer than the corresponding line in the second observation's admissible region. This result was to be expected, as the first observation was taken at a minimum range to the ground station, which means that a set of possible solution orbits with larger variation are possible. Conversely, the second observation was taken at a much lower elevation, thus increasing the slant range to the object from the ground station. This provides a smaller amount of variation in the solution states. The 3D plots shown in Figures 6 and 7 show the solution manifold in the position and velocity space. In these figures, the first observation is indicated with a blue arrow, the second with an orange arrow, and the ground station with a green arrow. The solution manifold is a short line made up of red (observation 1) and blue (observation 2) position solutions that clearly intersects with the shown known object truth orbit at the given epoch. This indicates that the observed debris objects have the same origin and it is possible that they spawned from an event involving the shown known object orbit. +---PAGE_BREAK--- + +Figure 6. 3D Plot Earth Hemisphere with Solution Manifold and True Catalog Orbit for $t_0 = -100sec$ + +Figure 7. Solution Manifold and True Catalog Orbit Intersection for $t_0 = -100sec$ + +## B. Epoch Time = -1 Hour + +In this scenario, 1100 particles were sampled from the admissible regions, resulting in a 4 × 1100 matrix for z. Each of the 1100 columns of *mathbf{z}* was propagated backwards one hour (3600 seconds) using a two-body propagator in Ode45. The solution values for *mathbf{z}*, that correspond to each observation, that minimize the Euclidean distance between the observed objects are shown in Figures 4 and 5. At an epoch that is one hour prior to the first observation, the solution manifold appears to have an increased amount of curvature when compared with the corresponding results from the previous scenario. This is especially true of the solution manifold in Observation 2's admissible region. +---PAGE_BREAK--- + +Figure 8. Admissible Region for Observation 1 with minimized solutions and truth for $t_0 = -3600$ sec + +Figure 9. Admissible Region for Observation 2 with minimized solutions and truth for $t_0 = -3600$ sec + +Just as was shown with the first scenario, the solution manifold line in the first observation's admissible region is much longer than the corresponding line in the second observation's admissible region. However, for an epoch of one hour prior to the first observation, the solution manifolds display much more curvature than the 100 second scenario. This is especially evident in the solution manifold corresponding to observation 2 in Figure 9. The 3D plots shown in Figure 10 show the solution manifold in the position and velocity space. In this figure, the first observation is indicated with a blue arrow, the second with an orange arrow, and the ground station with a green arrow. The solution manifold is a short line made up of red (observation 1) and blue (observation 2) position solutions that clearly intersects with the shown known object truth orbit at the given epoch. This indicates that the observed debris objects have the same origin and it is possible that they spawned from an event involving the shown known object orbit. The solution manifold displays much more interesting characteristics and curvature in this scenario, as it extends well beyond the known object orbit. +---PAGE_BREAK--- + +Figure 10. 3D Plot Earth Hemisphere with Solution Manifold and True Catalog Orbit for $t_0 = -3600sec$ + +### C. Error and Challenges + +Error in observation measurements were assumed to have a zero angle mean noise and approximately 0.5 arcsecond standard deviation of the noise on the angle observations (right ascension and declination). The standard deviation is approximated at this value due to the type of mount the observations were made from, as well as the exposure time. Error also is inherent with any numerical propagation method, such as Ode45. The relative and absolute tolerances were set to $1e^{-12}$ to limit error throughout this process. Additional sources of error can be found from using Lagrangian interpolation in order to estimate the angle rates of each observation. As aforementioned, to minimize this error source, streaks were used and the observation information from the center of the streak was used and the rates were estimated using the beginning and the end of the streak. For future work, the rates will be fed in as part of the 4-state $x_d$ and not estimated based on the right ascension and declination of the observations. This approach is computationally slow because implementing fmincon, which estimates the gradient, instead of using a gradient based approach like steepest descent. A limitation of the method described here is that the epoch time, $t_0$, is arbitrary, and may be based on a priori information (e.g. last known observation, etc.), but would require an iterative "guessing" process to select a good estimate for $t_0$ which increases computational cost. + +## IV. Conclusions + +The results in this paper, though there are limitations, illustrate that it is possible to detect fragments and collisions much sooner than current capabilities that rely on orbit determination. The current state-of-the-art relies on orbit determination, which requires multiple observations over at least two orbits for a LEO object and continual observation over hours for a GEO object. The approach outlined in this paper requires only independent observations of two debris orbits and to answer the same hypothesis, with the cost largely in computation. The problem reduces to a 4-dimensional particle swarm optimization, which can easily be solved using a gradient-based method. By using the hypothesized constraint of the admissible regions it was demonstrated that it is possible to determine if a combination of new uncorrelated debris objects have a common origin that also intersects with a known catalog object orbit, thus indicating break-up of that known object has occurred. +---PAGE_BREAK--- + +V. Future Work + +This paper reflects a very initial endeavour into understanding the limitations and applications of this methodology. Additional test cases, including one on a GEO break up as well as another using LEO collision path, will need to be done, as this paper only demonstrates that if you have two observations of the same object that a zero-finding problem is possible. Other phenomenology should also be considered, such as radar observations. LEO observations are not typically made using optical or electro-optical hardware; conversely, GEO observations are almost exclusively made with these methods. Radar observations have different admissible region structure, as they provide a different set of observable, or determined states. In this scenario, $x_d$ is a 2 × 1 matrix, whereas in optical it is 4 × 1 matrix. Therefore, to use radar information in the methodology described in this paper, additional observations would need to be included to create a closed solution. + +References + +¹J. M. Maruskin, D. J. Scheeres, and K. T. Alfriend, "Correlation of optical observations of objects in earth orbit," *Journal of Guidance, Control, and Dynamics*, Vol. 32, No. 1, 2009, pp. 194-209. + +²A. Rossi, "The earth orbiting space debris," *Serbian Astronomical Journal*, Vol. 170, 2005, pp. 1-12. + +³M. J. Holzinger, K. K. Luu, C. Sabol, and K. Hill, "Uncorrelated-Track Classification, Characterization, and Prioritization Using Admissible Regions and Bayesian Inference," *Journal of Guidance, Control, and Dynamics*, 2016, pp. 2469-2484. + +⁴K. Wormnes, R. Le Letty, L. Summerer, R. Schonenborg, O. Dubois-Matra, E. Luraschi, A. Cropp, H. Krag, and J. Delaval, "ESA technologies for space debris remediation," *6th IAASS Conference:Safety is Not an Option, Montrel*, 2013. + +⁵P. d. Selding, "Orbital Debris a Growing Problem with No End in Sight," *Space News*, Vol. 31, 2006. + +⁶J. L. Worthy, *Initialization of sequential estimation for unobservable dynamical systems using partial information in the presence of systemic uncertainty*. PhD thesis, Georgia Institute of Technology, 2017. + +⁷J. L. Worthy III and M. J. Holzinger, "Incorporating uncertainty in admissible regions for uncorrelated detections," *Journal of Guidance, Control, and Dynamics*, Vol. 38, No. 9, 2015, pp. 1673-1689. + +⁸A. Milani, G. F. Gronchi, M. d. Vitturi, and Z. Knežević, "Orbit determination with very short arcs. I admissible regions," *Celestial Mechanics and Dynamical Astronomy*, Vol. 90, No. 1-2, 2004, pp. 57-85. + +⁹G. Tommei, A. Milani, and A. Rossi, "Orbit determination of space debris: admissible regions," *Celestial Mechanics and Dynamical Astronomy*, Vol. 97, No. 4, 2007, pp. 289-304. + +¹⁰J. A. Siminski, O. Montenbruck, H. Fiedler, and T. Schildknecht, "Short-arc tracklet association for geostationary objects," *Advances in space research*, Vol. 53, No. 8, 2014, pp. 1184-1194. + +¹¹K. Fujimoto and D. J. Scheeres, "Applications of the admissible region to space-based observations," *Advances in Space Research*, Vol. 52, No. 4, 2013, pp. 696-704. + +¹²K. Fujimoto and D. J. Scheeres, "Correlation of optical observations of earth-orbiting objects and initial orbit determination," *Journal of guidance, control, and dynamics*, Vol. 35, No. 1, 2012, pp. 208-221. + +¹³J. L. Worthy, M. J. Holzinger, and D. J. Scheeres, "An optimization approach for observation association with systemic uncertainty applied to electro-optical systems," *Advances in Space Research*, 2018. + +¹⁴D. Farnocchia, G. Tommei, A. Milani, and A. Rossi, "Innovative methods of correlation and orbit determination for space debris," *Celestial Mechanics and Dynamical Astronomy*, Vol. 107, No. 1-2, 2010, pp. 169-185. \ No newline at end of file diff --git a/samples_new/texts_merged/3884483.md b/samples_new/texts_merged/3884483.md new file mode 100644 index 0000000000000000000000000000000000000000..002b1b000be2001c7aafd3b2d1e3c2e1db913447 --- /dev/null +++ b/samples_new/texts_merged/3884483.md @@ -0,0 +1,660 @@ + +---PAGE_BREAK--- + +# Real-time thermoacoustic data assimilation + +Andrea Nóvoa*& Luca Magri*¤ + +June 14, 2021 + +## Abstract + +Low-order thermoacoustic models are qualitatively correct, but they are typically quantitatively inaccurate. We propose a time-domain method to make qualitatively low-order models quantitatively (more) accurate. First, we develop a Bayesian data assimilation method for a low-order model to self-adapt and self-correct any time that reference data, for example from experiments, becomes available. Second, we apply the methodology to infer the thermoacoustic states, heat release parameters, and model errors on the fly without storing data (real-time). Third, we analyse the performance of the data assimilation with synthetic data and interpret the results physically. We apply the data assimilation algorithm to all nonlinear thermoacoustic regimes, from limit cycles to chaos, in which acoustic pressure measurements from microphones are assimilated. Fourth, we propose practical rules for thermoacoustic data assimilation based on physical observations on the dynamics. An *increase, reject, inflate* strategy is proposed to deal with the rich nonlinear behaviour, the bifurcations of which are sensitive to small perturbations to the parameters. We show that (i) the correct acoustic pressure and parameters can be accurately inferred; (ii) the learning is robust because it can tackle large uncertainties in the observations (up to 50% the mean values); (iii) the uncertainty of the prediction and parameters is naturally part of the output; and (iv) both the time-accurate solution and statistics can be successfully inferred. Physical time scales for assimilation are proposed in non-chaotic regimes (with the Nyquist-Shannon criterion) and in chaotic regimes (with the Lyapunov time). Data assimilation opens up new possibility for real-time prediction of thermoacoustics by synergistically combining physical knowledge and data. + +**Keywords:** Data assimilation, state and parameter estimation, nonlinear thermoacoustics + +## 1 Introduction + +When the heat released by a heat source, such as a flame, is sufficiently in phase with the acoustic waves of a confined environment, such a gas turbine or a rocket, thermoacoustic oscillations + +*Cambridge University Engineering Department, Trumpington St, Cambridge CB2 1PZ, UK + +†Imperial College London, Aeronautics Department, Exhibition Road, London SW7 2AZ, UK + +‡The Alan Turing Institute, 96 Euston Rd, London NW1 2DB, UK + +§Institute for Advanced Study, TU Munich, Lichtenbergstraße 2a 85748 Garching, Germany (visiting) + +¶lm547@cam.ac.uk +---PAGE_BREAK--- + +may occur (Rayleigh, 1878). Thermoacoustic oscillations manifest themselves as large-amplitude vibrations, which can be detrimental to gas-turbine reliability (e.g., Lieuwen, 2012), and can be destructive in high-power-density motors such as rocket engines (e.g., Culick, 2006). The objective of manufacturers is to design devices that are thermoacoustically stable, which is the goal of optimisation, and suppress a thermoacoustic oscillation if it occurs, which is the goal of control (e.g., Magri, 2019). Both optimisation and control rely on a mathematical model, which provides predictions on the key physical variables, such as the acoustic pressure and the heat release rate. The accurate prediction of thermoacoustic oscillations, however, remains one of the most challenging problems faced by power generation, heating and propulsion manufacturers (e.g., Dowling & Morgans, 2005; Noiray et al., 2008; Lieuwen, 2012; Poinsot, 2017; Juniper & Sujith, 2018). + +The prediction of thermoacoustic dynamics—even in simple systems—is challenging because of three reasons. First, thermoacoustics is a multi-physics phenomenon. For a thermoacoustic oscillation to occur, three physical subsystems (flame, acoustics and hydrodynamics) constructively interact with each other (e.g., Lieuwen, 2012; Magri, 2019). Second, thermoacoustics is a nonlinear phenomenon (e.g., Sujith & Unni, 2020). In general, the flame’s heat release responds nonlinearly to acoustic perturbations (Dowling, 1999); and the hydrodynamics are typically turbulent (e.g., Huhn & Magri, 2020). Third, thermoacoustics is sensitive to perturbations to the system. In the linear regime, small changes to the system’s parameters, such as the flame time delay, can cause arbitrarily large changes of the eigenvalue growth rates at exceptional points (Mensah et al., 2018; Orchini et al., 2020). In the nonlinear regime, small changes to the system’s parameters can cause a variety of nonlinear bifurcations of the solution. As a design parameter is varied in a small range, thermoacoustic oscillations may become chaotic, by either period doubling, or Ruelle-Takens-Newhouse scenarios (Gotoda et al., 2011, 2012; Kabiraj & Sujith, 2012; Kashinath et al., 2014; Orchini et al., 2015; Huhn & Magri, 2020), or by intermittency bifurcations scenarios (Nair et al., 2014; Nair & Sujith, 2015). The rich bifurcation behaviour has an impact on the effectiveness of control strategies, which may work for periodic oscillations with a dominant frequency, but may not work for multi-frequency oscillations as effectively. Additionally, unpredictable changes in the operating conditions and turbulence, which can be modelled as random phenomena (Nair & Sujith, 2015; Noiray, 2017), increase the uncertainty on the prediction of the quantities of interest. + +Thermoacoustics can be modelled with a hierarchy of assumptions and computational costs. Large-eddy simulations make assumptions only on the finer flow scales, which makes the final solution high-fidelity, but computationally expensive (Poinsot, 2017). Euler and Helmholtz solvers compute the acoustics that evolve on a prescribed mean flow, which makes the solution medium-fidelity and computationally less expensive than turbulent simulations (e.g., Nicoud et al., 2007). This is commonly achieved with flame models, which capture the heat-release response to acoustic perturbations with transfer functions (e.g., Silva et al., 2013). Other medium-fidelity and medium-cost methods are based on flame-front tracking (e.g., Pitsch & De Lageneste, 2002) and simple chemistry models (e.g., Magri & Juniper, 2014), to name only a few. On the other hand, low-order models based on travelling waves and standing waves (Dowling, 1995) provide +---PAGE_BREAK--- + +low-fidelity solutions, but with low-computational cost. These low-order models capture only the dominant physical mechanisms, which are the flame time delay, the flame strength (or index) and the damping. Low-order models, which are the subject of this study, are attractive to practitioners because they provide quick estimates on the quantity of interest. Along with modelling, accurate experimental data is becoming more accessible and available (O'Connor et al., 2015). To monitor the thermoacoustic behaviour, in both real engines and academic rig (such as the Rijke tube), the pressure is experimentally measured by microphones (Lieuwen & Yang, 2005; Kabiraj et al., 2012a). Microphones sample the pressure amplitude at typically high rates, which generates large datasets in real time. Except when required for diagnostics, the data is useful if it can be used in *real time*, i.e., on-the-fly, to correct (or update) our knowledge of the thermoacoustic states. + +To summarise, in thermoacoustics, we have three ingredients to improve the design: (i) a human being, who identifies the physical mechanisms that need to be modelled depending on the objectives and resources; (ii) a mathematical model, which provides estimates of the physical states; and (iii) experimental data, which provides a quantitative measure of the system's observables. A model is good if the human being identifies the physical mechanisms needed to formulate a mathematical model that provides the system's states compatibly with the experimental data. The overarching objective of this paper is to propose a method *to make qualitatively low-order models quantitatively (more) accurate* every time that reference data becomes available. The ingredients for this are a physical low-order model, which provides the states; data, which provides the observables; and a statistical method, which finds the most likely model (states and parameters) by assimilating the data in the model. In weather forecasting, this process is known as data assimilation (Sasaki, 1955). Data assimilation techniques have been applied to oceanographic studies (Eckart, 1960), aerospace control (Gelb, 1974), robotics, geosciences, cognitive sciences (Reich & Cotter, 2015), to name only a few. Data assimilation is a principled method, which, in contrast to traditional machine learning, uses a physical model to provide a prediction on the solution (*the forecast*), which is updated when observations become available to provide a corrected state (*the analysis*) (Reich & Cotter, 2015). The analysis is an estimator of the physical state (*the true state*), which is more accurate than the forecast. Data assimilation methods can be divided into two main approaches (Lewis *et al.*, 2006): (i) variational and (ii) statistical assimilation methods. Variational data assimilation requires the minimisation of a cost functional—e.g., a Mahalanobis (semi)norm—in terms of a control variable to obtain a single optimal analysis state (Bannister, 2017). The most common variational methods are 3D-VAR and 4D-VAR, which are widely used in weather centres such as the Met Office in the UK or the European Centre for Medium-Range Weather Forecasts, and in academic research (Bannister, 2008). In thermoacoustics, variational data assimilation was introduced by Traverso & Magri (2019), who found the optimal thermoacoustic states given reference data from pressure observations. On the other hand, statistical data assimilation combines concepts of probability and estimation theory. The aim of statistical data assimilation is to compute the probability distribution function of a numerical model to statistically combine it with data from observations. Because the probability distribution function is high dimensional, the practitioner is often interested in capturing only the first and second statistical moments of it. In reduced-order modelling, this was achieved +---PAGE_BREAK--- + +in flame tracking methods by Yu *et al.* (2019), who implemented ensemble Kalman filters and smoothers to learn the flame parameters on the fly. In high-fidelity methods in reacting flows, data assimilation with ensemble Kalman filters have been applied in large-eddy simulation of premixed flames to predict local extinctions in a jet flame (Labahn *et al.*, 2019), and under-resolved turbulent simulation to predict autoignition events (Magri & Doan, 2020). The ensemble Kalman filter has also been successfully applied to non-reacting flow systems that show high nonlinearities such as the estimation of turbulent near-wall flows (Colburn *et al.*, 2011), uncertainties in Reynolds-averaged Navier-Stokes (RANS) equations (Xiao *et al.*, 2016), aerodynamic flows (da Silva & Colonius, 2018). In thermoacoustics, statistical data assimilation based on Bayesian methods was introduced by Nóvoa & Magri (2020). + +The objective of this paper is fourfold. First, we develop a sequential data assimilation for a low-order model to self-adapt and self-correct any time that reference data becomes available. The method, which is based on Bayesian inference, provides the *maximum a posteriori estimate* model prediction, i.e., the most likely prediction. Second, we apply the methodology to infer the thermoacoustic states, heat release parameters, and model errors on the fly without storing data. Third, we analyse the performance of the data assimilation algorithm with synthetic data and interpret the results physically. Fourth, we propose practical rules for thermoacoustic data assimilation. The paper is structured as follows. § ?? provides a description of the nonlinear thermoacoustic model with the data assimilation technique and its implementation for thermoacoustics. § 2 presents the method used for state and parameter estimation. § 3 presents the nonlinear characterisation of the thermoacoustic dynamics. § 4 shows and discusses the results for non-chaotic regimes, whereas § 5 shows and discusses the results for chaotic solutions. A final discussion and conclusions end the paper. + +## 1.1 Qualitative nonlinear thermoacoustic model + +The system consists of an open-ended tube containing a heat source, such as a flame or an electrically heated gauze. Because the tube is sufficiently long with respect to the diameter, the cut-on frequency is such that only longitudinal acoustic waves propagate. This is known as the Rijke tube, which is a common laboratory-scale device that has been employed in a variety of fundamental studies (Heckl, 1990; Balasubramanian & Sujith, 2008; Juniper, 2011; Magri et al., 2013). This device is represented in Figure 1. The Rijke model used in this work is described by Balasubramanian & Sujith (2008) and Juniper (2011). The flow is assumed to be a perfect gas; the mean flow is sufficiently slow such that its effects are neglected in the acoustic propagation; and viscous and body forces are neglected. The acoustics are governed by the linearised momentum and energy conservation equations +---PAGE_BREAK--- + +Figure 1: Schematic of an open-ended duct with a heat source (also known as the Rijke tube). The heat released by the compact heat source is indicated by the vertical dotted line. The light blue vertical lines indicate microphones located equidistantly. + +$$ \frac{\partial u'}{\partial t} + \frac{\partial p'}{\partial x} = 0 \qquad (1a) $$ + +$$ \frac{\partial p'}{\partial t} + \frac{\partial u'}{\partial x} = \dot{Q} \delta (x - x_f) - \zeta p' \qquad (1b) $$ + +where $u'$ is the acoustic velocity; $p'$ is the acoustic pressure; $\dot{Q}$ is the heat release rate; $x_f$ is the non-dimensional flame location; $\delta$ is the Dirac delta distribution, which models the heat source as a point source (compact assumption); and $\zeta$ is the damping factor, which encapsulates the acoustic energy radiated from both ends of the duct, and the thermo-viscous losses in boundary layers. The non-dimensional heat release rate perturbation, $\dot{Q}$, is modelled with a qualitative nonlinear time-delayed model (Heckl, 1990) + +$$ \dot{Q} = \beta \left[ \sqrt{\frac{1}{3} + u'_{\text{f}}(t-\tau)} - \sqrt{\frac{1}{3}} \right] \qquad (2) $$ + +where $\beta$ is the strength of the source; $u'_f$ is the acoustic velocity at the flame location; and $\tau$ is the time delay. The heat release rate is a key thermoacoustic parameter for the system's stability. The dimensionless variables (without ~) and the dimensional variables (with ~) are related as $x = \tilde{x}/\tilde{L}_0$, where $\tilde{L}_0$ is the length of the tube; $t = \tilde{t}\tilde{c}_0/\tilde{L}_0$, where $\tilde{c}_0$ is the mean speed of sound; $u' = \tilde{u}'/\tilde{c}_0$; $\rho' = \tilde{\rho}'/\tilde{\rho}_0$, where $\tilde{\rho}_0$ is the mean density; $p' = \tilde{p}'/(\tilde{\rho}_0 \tilde{c}_0^2)$; $\dot{Q} = \tilde{Q}' (\gamma - 1)/(\tilde{\rho}_0 \tilde{c}_0^3)$, where $\gamma$ is the heat capacity ratio; and $\delta(x-x_f) = \tilde{\delta}(\tilde{x}-\tilde{x}_f)\tilde{L}_0$. The open-ended boundary conditions are ideal, which means that the acoustic pressure is zero, i.e., $p' = 0$ at $x = \{0, 1\}$. By separation of variables, the acoustic velocity and pressure are decomposed as (Zinn & Lores, 1971) + +$$ u'(x,t) = \sum_{j=1}^{N_m} \eta_j(t) \cos(j\pi x), \qquad p'(x,t) = -\sum_{j=1}^{N_m} \frac{\dot{\eta}_j(t)}{j\pi} \sin(j\pi x) \qquad (3) $$ + +where cos($j\pi x$) and sin($j\pi x$) are the eigenfunctions of the acoustic velocity and pressure, respectively, when $\zeta = 0$ and $\dot{Q} = 0$; and $N_m$ is the number of acoustic modes kept in the decomposition. Substituting (3) into (1), multiplying (1b) by sin($k\pi x$), and integrating over $x = [0, 1]$, yield the +---PAGE_BREAK--- + +governing ordinary differential equations, which physically represent a set of nonlinearly coupled +oscillators + +$$ +\begin{align} +\frac{d\eta_j}{dt} - j\pi \left(\frac{\dot{\eta}_j}{j\pi}\right) &= 0 \tag{4a} \\ +\frac{d}{dt} \left(\frac{\dot{\eta}_j}{j\pi}\right) + j\pi \eta_j + \zeta_j \frac{\dot{\eta}_j}{j\pi} + 2\dot{Q} \sin(j\pi x_f) &= 0 \tag{4b} +\end{align} +$$ + +where the damping term is defined by modal components $\zeta_j = C_1 j^2 + C_2 \sqrt{j}$, which is physically +motivated in Landau & Lifshitz (1987). The damping coefficients, $C_1$ and $C_2$, are assumed to +be constant. For reasons that will be explained in § 1.2, we introduce an advection equation to +mathematically eliminate the time-delayed velocity term (Huhn & Magri, 2020) + +$$ +\frac{\partial v}{\partial t} + \frac{1}{\tau} \frac{\partial v}{\partial X} = 0 , \quad 0 \le X \le 1 \qquad (5) +$$ + +where v is a dummy variable that travels with non-dimensional velocity τ⁻¹ in a dummy spatial +domain X such that + +$$ +u'_{f}(t - \tau) = v(X = 1, t), \quad u'_{f}(t) = v(X = 0, t). \tag{6} +$$ + +Equation (6) is discretised with a Chebyshev method (Trefethen, 2000) with $N_c + 1$ points in the interval $0 \le X \le 1$. + +In a state-space notation, the thermoacoustic problem is governed by + +$$ +\begin{equation} +\begin{aligned} +\frac{d\psi}{dt} &= \mathbf{F}(\alpha; \psi), && \psi(t=0) = \psi_0, \\ +\mathbf{y} &= \mathbf{M}(x)\psi, +\end{aligned} +\tag{7} +\end{equation} +$$ + +where the state vector $\boldsymbol{\psi} = (\boldsymbol{\eta}; \dot{\boldsymbol{\eta}}; \boldsymbol{v}) \in \mathbb{R}^{2N_m+N_c}$ is the column-concatenation of the acoustic amplitudes, $\boldsymbol{\eta} = (\eta_1, \eta_2, ..., \eta_{N_m}) \in \mathbb{R}^{N_m}$ and $\dot{\boldsymbol{\eta}} = (\dot{\eta}_1/\pi, \dot{\eta}_2/(2\pi), ..., \dot{\eta}_{N_m}/(N_m\pi)) \in \mathbb{R}^{N_m}$, and the advection velocity variables $\boldsymbol{v} = (\nu_1, \nu_2, ..., \nu_{N_c}) \in \mathbb{R}^{N_c}$; the thermoacoustic parameters are contained in the vector $\boldsymbol{\alpha} = (\beta, \tau, \zeta) \in \mathbb{R}^{N_P}$; $\mathbf{F}$ represents the nonlinear operator that consists of (4a),(4b) and (5), $\mathbf{F}: \mathbb{R}^{2N_m+N_c+N_P} \rightarrow \mathbb{R}^{2N_m+N_c}$; and $\mathbf{M}(x)$ is the measurement operator, which maps the state to the observable space at $x$. The expression of the measurement operator depends on the nature of the observables being assimilated, as explained in § 2. To work with a reduced-order model that qualitatively captures the essential dynamics, we use $N_m = 10$ acoustic modes. For the advection equation, $N_c = 10$ ensures numerical convergence (Huhn & Magri, 2020). The number of degrees of freedom of the reduced-order model is $N = 2N_m + N_c = 30$. The initial value problem (7) is solved with an automatic-stepsize-control method that combines fourth and fifth order Runge-Kutta methods (Shampine & Reichelt, 1997) +---PAGE_BREAK--- + +## 1.2 Data assimilation + +Data assimilation optimally combines the prediction from an imperfect model with data from observations to improve the knowledge of the system's state. The updated solution (analysis) optimally combines the information from the observations, $\mathbf{y}$, and the model solution (forecast) with their uncertainties. In order to (i) update the system's knowledge any time that data becomes available, and (ii) not store the data during the entire operation, we assimilate sequentially assuming that the process is a Markovian process. The concept of Bayesian update is key to this process, as explained in § 1.2.1. + +### 1.2.1 Bayesian update + +In a Bayesian framework, we quantify our confidence in a model by a probability measure. Hence, we update our confidence in the model predictions every time we have reference data from observations. The rigorous framework to achieve this is probability theory, as explained in Cox's theorem (Jaynes, 2003). + +To set a probabilistic framework at time $t = t_k$, the state, $\psi_k$, and reference observation, $\mathbf{y}_k$, are assumed to be realisations of their corresponding random variables acting on the sample spaces $\Omega_\psi = \mathbb{R}^{2N_m+N_c}$ and $\Omega_\mathbf{y} = \mathbb{R}^{N_y}$. Because we transformed the time-delayed problem into an initial value problem, the solution of (7) at the present depends on the solution at the previous time step only. In other words, we transformed a non-Markovian system into a Markovian system, which simplifies the design of the Bayesian update. We quantify our confidence in a quantity through a probability, $\mathcal{P}$ + +$$ \psi_k \sim \mathcal{P}(\psi_k | \psi_{k-1}, \alpha, \mathbf{F}) \qquad \mathbf{y}_k \sim \mathcal{P}(\mathbf{y}_k | \psi_k, \alpha, \mathbf{F}), \tag{8} $$ + +where $\|\cdot\|$ denotes that the quantity on the left is conditioned on the knowledge of the quantities on the right. The leftmost probability answers the question: "Given a model **F**, a set of parameters $\alpha$, and the state $\psi_{k-1}$, what is the probability that the state takes the value $\psi_k$?". The rightmost probability answers the question: "if we forecast the state $\psi_k$ from the model, what is the probability that we observe $\mathbf{y}_k$?". We assume that the observations are statistically independent and uncorrelated with respect to the forecast. To update our knowledge of the system, the prior knowledge from the reduced-order model and the reference observations are combined through Bayes' rule + +$$ \mathcal{P}(\psi_k | \mathbf{y}_k, \alpha, \mathbf{F}) = \frac{\mathcal{P}(\mathbf{y}_k | \psi_k, \alpha, \mathbf{F}) \mathcal{P}(\psi_k, \alpha, \mathbf{F})}{\mathcal{P}(\mathbf{y}_k, \alpha, \mathbf{F})}. \tag{9} $$ + +First, $\mathcal{P}(\psi_k, \alpha, \mathbf{F})$ is the prior, which measures the knowledge of our system prior to observing $\mathbf{y}_k$. The prior evolves through the Chapman-Kolmogorov equation (Jazwinski, 2007), which involves multi-dimensional integrals. To numerically solve the Chapman-Kolmogorov equation, we use an ensemble method by integrating the model equations (§ 1.2.2), which provide a *forecast* on the state. Second, $\mathcal{P}(\mathbf{y}_k | \psi_k, \alpha, \mathbf{F})$ is the likelihood (8), which measures the confidence we have in our model prediction. The likelihood is prescribed. Third, $\mathcal{P}(\mathbf{y}_k, \alpha, \mathbf{F})$ is the evidence, which is the probability that the observable takes on the value $\mathbf{y}_k$. This can be prescribed from the knowledge +---PAGE_BREAK--- + +of the experimental uncertainties. Finally, $\mathcal{P}(\psi_k | y_k, \alpha, F)$ is the posterior, which measures the knowledge we have on the state, $\psi_k$, after we have observed $y_k$. Here, we will select the most probable value of $\psi_k$ in the posterior (i.e., the mode) as the best estimator of the state (maximum a posteriori approach, which is a well-posed approach in inverse problems. The best estimator is called *analysis* in weather forecasting (Tarantola, 2005). Equation (9) provides the Bayesian update, which is key to this work and sequential data assimilation. + +### 1.2.2 Stochastic ensemble filtering for sequential assimilation + +For brevity, we will omit the subscript $k$, unless it becomes necessary for clarity. We focus on a qualitative reduced-order model in which (i) the bias on the solution is negligible, (ii) the uncertainty on the state is represented by a covariance, (iii) the probability density function of the state is assumed to be symmetrical around the mean, and (iv) the dynamics at regime do not present frequent extreme events, i.e., the tails of the probability density function are not heavy. The probability distribution to employ is the distribution that maximises the information entropy (Jaynes, 1957), which, in this scenario, is the Gaussian distribution. Therefore, the system's forecast and the observations are assumed to follow Gaussian distributions, i.e., $\psi^f \sim N(\psi, C_{\psi\psi}^f)$ and $y \sim N(M\psi, C_\epsilon\epsilon)$, respectively, where $N$ denotes the normal distribution with the first argument being the mean, and the second argument being the covariance matrix. The forecast and observation covariance matrices are $C_{\psi\psi}^f$ and $C_\epsilon\epsilon$, respectively. + +If the dynamics were linear, the Bayesian update (9) would be exactly solved by the Kalman filter equations (Kalman, 1960) + +$$ \psi^a = \psi^f + (\mathbf{M} \mathbf{C}_{\psi\psi}^f)^{\mathrm{T}} [\mathbf{C}_{\epsilon\epsilon} + \mathbf{M} \mathbf{C}_{\psi\psi}^f \mathbf{M}^{\mathrm{T}}]^{-1} (\mathbf{y} - \mathbf{M}\mathbf{\psi}^f) \quad (10a) $$ + +$$ \mathbf{C}_{\psi\psi}^a = \mathbf{C}_{\psi\psi}^f - (\mathbf{M}\mathbf{C}_{\psi\psi}^f)^{\mathrm{T}} \left[ \mathbf{C}_{\epsilon\epsilon} + \mathbf{M}\mathbf{C}_{\psi\psi}^f \mathbf{M}^{\mathrm{T}} \right]^{-1} (\mathbf{M}\mathbf{C}_{\psi\psi}^f) \quad (10b) $$ + +where the superscripts ‘a’ and ‘f’ denote analysis and forecast, respectively. Equation (10a) corrects the model prediction by weighting the statistical distance between the observations (data) and the forecast, according to the prediction and observation covariances (Evensen, 2003). The observation error covariance has to be prescribed based on the knowledge of the experimental methodology used. + +In an ensemble method, the distribution is represented by the sample statistics + +$$ \bar{\psi} \approx \frac{1}{m} \sum_{i=1}^{m} \psi^i, \qquad \mathbf{C}_{\psi\psi} \approx \frac{1}{m-1} \Psi\Psi^T \quad (11) $$ + +where the $i$-th column of the matrix $\Psi$ is the deviation from the mean of the $i$-th realisation, $\psi^i - \bar{\psi}$, and $m$ is the number of ensemble members. Because (11) is a Monte Carlo Markov Chain integration, the sampling error scales as $O(N^{-1/2})$. The key idea of ensemble filters is to group +---PAGE_BREAK--- + +Figure 2: Conceptual schematic of a sequential filtering process. Truth (green); observations and their uncertainties (red); forecast states and uncertainties (orange); and analysis states and uncertainties (blue). The circles represent pictorially the spread of the probability density functions: the larger the circles, the larger the uncertainty. + +forecast states from a numerical model (the ensemble) to obtain, on filtering, the analysis state. Ensemble methods describe the state’s uncertainty by the spread in the ensemble at a given time to avoid the explicit formulation of the covariance matrices (Livings et al., 2008). The algorithmic procedure is as follows. First, the initial condition is integrated forward in time to provide the forecast state, $\psi^f$. Second, experimental observations, $y$, are statistically assimilated into the forecast to obtain the analysis state, $\psi^a$, which, in turn, becomes the initial condition for the next time step. The forecast accumulates errors over the integration period, which is reduced in the assimilation stage through observations with their experimental uncertainties. If the model is qualitatively correct and unbiased, after a sufficient number of assimilations, the ensemble concentrates around the true value. This sequential filtering process on one ensemble member is shown in Figure 2. The process is repeated in parallel for the other ensemble members. + +### 1.2.3 Ensemble Square-Root Kalman Filter + +In the ensemble Kalman filter (10), each ensemble member is updated with the assimilation of independently perturbed observation data. However, this method provides a sub-optimal solution that, in some cases, does not preserve the ensemble mean and is affected by sampling errors of the observations (Evensen, 2003). Moreover, the ensemble Kalman filter may require a fairly large ensemble to compensate the sampling errors of the observations (Sakov & Oke, 2008). The ensemble square-root Kalman filter (EnSRKF), which is an ensemble-transform Kalman filter, overcomes these issues (Livings et al., 2008). The key idea of the EnSRKF is to update the ensemble mean and deviations instead of each ensemble member. The EnSRKF for *m* ensemble +---PAGE_BREAK--- + +members and a state vector of size *N* reads + +$$ +\mathbf{A}^{\mathrm{a}} = \bar{\mathbf{A}}^{\mathrm{a}} + \mathbf{\Psi}^{\mathrm{a}} +\quad (12\mathrm{a}) +$$ + +$$ +\bar{\mathbf{A}}^{\mathrm{a}} = \bar{\mathbf{A}}^{\mathrm{f}} + \mathbf{\Psi}^{\mathrm{f}} (\mathbf{M}\mathbf{\Psi}^{\mathrm{f}})^{\mathrm{T}} \left[ (m-1) \mathbf{C}_{\epsilon\epsilon} + \mathbf{M}\mathbf{\Psi}^{\mathrm{f}} (\mathbf{M}\mathbf{\Psi}^{\mathrm{f}})^{\mathrm{T}} \right]^{-1} (\mathbf{Y} - \mathbf{M}\bar{\mathbf{A}}^{\mathrm{f}}) +\quad (12\mathrm{b}) +$$ + +$$ +\mathbf{\Psi}^{\mathrm{a}} = \mathbf{\Psi}^{\mathrm{f}} \mathbf{V} (\mathbf{I} - \boldsymbol{\Sigma})^{1/2} \mathbf{V}^{\mathrm{T}} +\quad (12c) +$$ + +$$ +\mathbf{V}\boldsymbol{\Sigma}\mathbf{V}^{\mathrm{T}} = (\mathbf{M}\mathbf{\Psi}^{\mathrm{f}})^{\mathrm{T}} \left[ (m-1)\mathbf{C}_{\epsilon\epsilon} + \mathbf{M}\mathbf{\Psi}^{\mathrm{f}} (\mathbf{M}\mathbf{\Psi}^{\mathrm{f}})^{\mathrm{T}} \right]^{-1} (\mathbf{M}\mathbf{\Psi}^{\mathrm{f}}) +\quad (12\text{d}) +$$ + +where $\mathbf{A} = (\psi_1, \psi_2, \dots, \psi_m) \in \mathbb{R}^{N \times m}$ is the matrix that contains the ensemble members as columns; $\bar{\mathbf{A}} = (\bar{\psi}_1, \bar{\psi}_2, \dots, \bar{\psi}_m) \in \mathbb{R}^{N \times m}$ contains the mean analysis states in each column; $\mathbf{Y} = (\mathbf{y}, \dots, \mathbf{y}) \in \mathbb{R}^{q \times m}$ is the matrix containing the $q$ observations repeated $m$ times. The identity matrix is represented by $\mathbf{I}$, and $\mathbf{V}$ and $\boldsymbol{\Sigma}$ are the orthogonal matrices of eigenvectors and a diagonal matrix of eigenvalues, respectively, from singular value decomposition. The largest matrices required in the EnSRKF algorithm have dimension $N \times m$ and $m \times m$, therefore, the storage requirements are significantly smaller than those of non-ensemble based filters. In addition, this filter is non-intrusive and suitable for parallel computation. A derivation of the EnSRKF can be found in Appendix A. + +**1.3 Discussion** + +An ensemble method enables us to (i) work with high-dimensional systems because we do not need to propagate the covariance matrix, which has $O(N^2)$ components; (ii) work with nonlinear systems, such as the thermoacoustic system under investigation; (iii) work with time-dependent problems; (iv) not store the data because we sequentially assimilate (on-the-fly assimilation); and (v) avoid implementing tangent or adjoint solvers, which are required, for example, in variational data assimilation methods (Traverso & Magri, 2019). On the one hand, if the system were linear, a Gaussian prior would remain Gaussian under time integration. This makes the ensemble filter the exact Bayesian update in the limit of an infinite number of samples. On the other hand, if the system were nonlinear (e.g., in the present study), a Gaussian prior does not necessarily remain Gaussian under time integration. This makes the ensemble filter an approximate Bayesian update. The update of the first and second statistical moments, however, remains exact. In other words, we cannot capture the skewness, kurtosis, and other higher moments. (Particle filter methods overcome this limitation, but they may be computationally expensive (Pham, 2001).) + +**2 State and parameter estimation** + +This work considers both state estimation, in which the state is the uncertain quantity (§ 2.1); +and combined state and parameter estimation, in which both the state and model parameters are +uncertain (§ 2.2). +---PAGE_BREAK--- + +## 2.1 State estimation + +State estimation is the process of using a series of noisy measurements into an estimation of the state of the dynamical system, $\psi$. This paper considers two different scenarios in assimilating acoustic data in thermoacoustics: (i) assimilation of the acoustic modes; and (ii) assimilation of pressure measurements from $N_{\text{mic}}$ microphones, which are located equidistantly from the flame location up to the end of the Rijke tube (Figure 1). The assimilation of acoustic modes assumes that observation data is available for the pressure and velocity acoustic modes, $\{\eta, \dot{\eta}\}$. Hence, the state equations are + +$$ +\begin{aligned} +\frac{d\psi}{dt} &= \mathbf{F}(\alpha; \psi), && \psi(t=0) = \psi_0 = \begin{bmatrix} \eta_0 \\ \dot{\eta}_0 \\ v_0 \end{bmatrix} \\ +\mathbf{y} &= \mathbf{M}(x)\psi = \begin{bmatrix} \eta \\ \dot{\eta} \end{bmatrix} +\end{aligned} +\quad (13) +$$ + +Alternatively, in scenario (ii), from (3), the reference pressure measurements are computed as + +$$ +\mathbf{p}'_{\text{mic}} = \begin{pmatrix} p'_1(t) \\ p'_2(t) \\ \vdots \\ p'_{N_{\text{mic}}}(t) \end{pmatrix} = - \begin{pmatrix} \sin(\pi x_1) & \sin(2\pi x_1) & \dots & \sin(N_m \pi x_1) \\ \sin(\pi x_2) & \sin(2\pi x_2) & \dots & \sin(N_m \pi x_2) \\ \vdots & \vdots & \ddots & \vdots \\ \sin(\pi x_{N_{\text{mic}}}) & \sin(2\pi x_{N_{\text{mic}}}) & \dots & \sin(N_m \pi x_{N_{\text{mic}}}) \end{pmatrix} \begin{pmatrix} \frac{\dot{\eta}_1(t)}{\pi} \\ \frac{\dot{\eta}_2(t)}{2\pi} \\ \vdots \\ \frac{\dot{\eta}_{N_m}(t)}{N_m \pi} \end{pmatrix} \quad (14) +$$ + +The statistical errors of the microphones are assumed to be independent and Gaussian. In the twin experiment, the pressure observations are created from the true state, with a standard deviation $\sigma_{\text{mic}}$ that mimics the measurement error. Pressure data cannot be assimilated directly with the EnSRKF because the state vector contains the acoustic modes, i.e., it does not contain the acoustic pressure. To circumvent this, we augment the state vector with the acoustic pressure at the microphones' locations according to (14). Therefore, the new state vector includes the acoustic modes, the advection modes and the pressure at the different microphone locations, i.e., $\psi' = (\eta; \dot{\eta}; v; p'_{\text{mic}})$, with dimension $N' = 2N_m + N_c + N_{\text{mic}}$. The augmented state equations are + +$$ +\begin{aligned} +\frac{d\psi'}{dt} &= \mathbf{F}(\alpha; \psi), && \psi'(t=0) = \psi'_0 = \begin{bmatrix} \eta_0 \\ \dot{\eta}_0 \\ v_0 \\ p'_{\text{mic},0} \end{bmatrix} \\ +\mathbf{y} &= \mathbf{M}(x)\psi' = p'_{\text{mic}}(x) +\end{aligned} +\quad (15) +$$ + +With this, the modes will be updated indirectly during the assimilation step using the microphone data and their experimental error. +---PAGE_BREAK--- + +## 2.2 Combined State and Parameter estimation + +Combined state and parameter estimation is the process of using a series of noisy measurements into an estimation of the state of the dynamical system, $\psi$, and the parameters, $\alpha$. In this work, we consider the heat source $\beta$ and the time delay $\tau$ as the parameters to learn from the assimilation process (with a slight abuse on notation, $\alpha \equiv (\beta, \tau)$). The parameters are regarded as variables of the dynamical system so that they are updated in every analysis step. This is achieved by combining the governing equations of the thermoacoustic model with the equations that describe the evolution of parameters, which are constant in time, but can change when observations are assimilated. The equations for the augmented state of combined state and parameter estimation are + +$$ \frac{d}{dt} \begin{bmatrix} \psi \\ \beta \\ \tau \end{bmatrix} = \begin{bmatrix} F(\alpha; \psi) \\ 0 \\ 0 \end{bmatrix}, \quad \begin{array}{l} \psi(t=0) = \psi_0 \\ \beta(t=0) = \beta_0, \\ \tau(t=0) = \tau_0 \end{array} $$ + +$$ y = M(x)\psi, \tag{16} $$ + +With a slight abuse of notation, the state vector $\psi$ in (16) is equal to $\psi \equiv (\eta; \dot{\eta}; v)$ in (13) for the assimilation of acoustic modes, and equal to $\psi' \equiv (\eta; \dot{\eta}; v; p'_{\text{mic}})$ in (15) for the assimilation of pressure measurements. The data assimilation algorithm is applied to the augmented system for both the forecast state and the parameters to be updated at every analysis step. The parameters need to be initialised for each ensemble member from a uniform distribution with a width of 25% of the mean parameter value. In other words, we assume that the parameters are uncertain by $\pm 25\%$. + +## 2.3 Performance metrics + +The performance of the state estimation and combined state and parameter estimation are evaluated with three metrics: (i) the trace of the forecast covariance, $C_{\psi\psi}^f$, which globally measures the spread of the ensemble; (ii) the relative difference between the true pressure oscillations at the flame location and the filtered solution, which measures the instantaneous error; and (iii) for the combined state and parameter assimilation, the convergence of the filtered parameters normalised to their true values, and the root-mean square error with respect to the true solution. + +# 3 Nonlinear characterisation + +In order to assess the performance of data assimilation, we first characterise the nonlinear dynamics by analysing the solutions at regime (after the initial transient) with bifurcation analysis and non-linear time series analysis (Kantz & Schreiber, 2003; Kabiraj *et al.*, 2012b; Guan *et al.*, 2020). The system's parameters are $x_f = 0.2$, $C_1 = 0.1$, $C_2 = 0.06$ and $N_m = 10$. + +In bifurcation analysis, we examine the topological changes in the pressure oscillations, $p'_f$, as the control parameters vary. First, we study the two-dimensional bifurcation diagram, which is +---PAGE_BREAK--- + +Figure 3: Two–dimensional bifurcation diagram. Classification of the attractor of the thermoacoustic system. The area enclosed by the black rectangle corresponds to a refined grid. The coarse and fine sweeps are performed with resolutions $(\Delta\beta, \Delta\tau) = (0.2, 0.01)$ and $(\Delta\beta, \Delta\tau) = (0.1, 0.005)$, respectively. + +shown in Figure 3. The classification in the two-dimensional diagram is obtained following the procedure of Huhn & Magri (2020). This method consists of obtaining the Lyapunov exponents, $\lambda_i$ through covariant-vector analysis. With this, the dynamical motions are identified as: (i) fixed point if $\lambda_1 < 0$; (ii) limit cycle if $\lambda_1 = 0$ and $\lambda_2 < 0$; (iii) quasiperiodic if $\lambda_1 = 0$ and $\lambda_2 = 0$; and (iv) chaotic if $\lambda_1 > 0$. For small $\beta$ and $\tau$ the system converges to a fixed point because the thermoacoustic energy is smaller than damping. As the heat source strength increases, the Rayleigh criterion is fulfilled and self-excited oscillations arise as limit cycles. When $\beta$ reaches values over 2.5, different types of solution appear, such as quasiperiodic or chaotic attractors. The refined region in Figure 3 shows that the type of solutions is sensitive to small changes in the control parameters, which has implications for data assimilation, as argued in the remainder of the paper. + +These topological changes are further investigated with a one-dimensional bifurcation diagram for a fixed time delay ($\tau = 0.2$), shown in Figure 4. Because the nonlinear solutions at regime may vary with the initial condition, two sets of results are shown for a small initial condition ($\eta_j = \dot{\eta}_j/j\pi = 0.005$) and a large initial condition ($\eta_j = \dot{\eta}_j/j\pi = 5$) to capture subcritical behaviours. The bifurcation diagram is obtained by marching forward in time the governing equations of the nonlinear dynamical system until the system reaches a statistically stationary state. For each value of the control parameter, the bifurcation diagram shows the peaks and troughs of the acoustic pressure at the flame location. (The nonlinear time series analysis results are shown in Figure 20 for regions B to D, and in Figure 21 for regions E to H in Appendix B.) + +From left to right, first, the solution is the fixed point (region A), which is the case of no +---PAGE_BREAK--- + +oscillations. Second, the appearance of periodic oscillations from a fixed point is observed with a large initial condition at $\beta = 0.26$, with a small region of hysteresis from $\beta = 0.26$ to $\beta = 0.34$. This first self-sustained state is a period-1 limit cycle (region B), which originates from a subcritical Hopf bifurcation. Within region B, the system undergoes a period-doubling bifurcation at $\beta = 0.6$ from period-1 to period-2 oscillations. Third, the period-2 limit cycle bifurcates into a 3-torus quasiperiodic motion at $\beta = 3.35$ (region C). A quasiperiodic oscillation is an aperiodic solution that results from the interaction between two or more incommensurate frequencies, (also known as a Neimark-Sacker bifurcation) (Kabiraj et al., 2012b). Fourth, the solution becomes chaotic at $\beta = 3.65$ (region D). In summary, the evolution from region A to region D shows that the system reaches a chaotic state via a quasiperiodic route to chaos, i.e., via a Ruelle-Takens scenario (Kabiraj et al., 2012a). Fifth, after this first route to chaos, changes in the control parameter drive the system back to a periodic limit cycle through a tangent bifurcation (Kantz & Schreiber, 2003) at approximately $\beta = 4.25$ (region E), with a second region of hysteresis from $\beta = 4.24$ to $\beta = 4.28$. These high amplitude limit cycles region becomes again chaotic at $\beta = 5.61$ (region F). Sixth, when $\beta$ reaches 7.65, the system evolves towards a frequency-locked state (region G). Frequency-locked solutions arise from the competition between two or more frequencies, but in contrast to quasiperiodic signals, these frequencies are commensurate. Seventh, at $\beta = 7.85$, the frequency-locked solution bifurcates into a quasiperiodic solution (region H). Region-H solutions show a two-dimensional toroidal structure, in contrast to the three-dimensional torus from region C. In region H, some of the simulations showed that there are areas of chaotic dynamics, which can be appreciated by the difference of the solutions from the small and large initial condition in Figure 4. (A higher region refinement could be performed to fully understand the bifurcations within this region, however, this is beyond the scope of this work.) The qualitative bifurcation behaviour of this reduced-order model is observed in experiments (Kabiraj et al., 2012b; Kabiraj & Sujith, 2012), which means that the reduced-order model qualitatively captures the nonlinear thermoacoustic dynamics. + +The bifurcation analysis shows a rich variety of solutions in a relatively small range of parameters, i.e., small changes of a parameter, or a state, can generate solutions that are topologically different. This nonlinear sensitivity has implications in the design of a data-assimilation ensemble framework, as discussed in § 4. + +# 4 Twin experiments in non-chaotic regimes + +We perform a series of experiments with synthetic data, which is generated by the model. To mimic an experiment, we add stochastic uncertainty to the synthetic data by prescribing an observation covariance matrix. This approach is also known as the twin experiment (e.g., Traverso & Magri, 2019). The EnSRKF algorithm is tested in the different regions of Figure 4, for the different nonlinear regimes: fixed point, limit cycle, frequency-locked, quasiperiodic and chaotic. The filter is first tested in the non-chaotic regimes for the assimilation of (i) acoustic modes (§ 4.1), and (ii) acoustic pressure from microphones (§ 4.2). The assimilation of chaotic solutions, which presents +---PAGE_BREAK--- + +Figure 4: One-dimensional bifurcation diagram. Maxima and minima of the pressure oscillations at the flame location versus the heat-source strength. The solutions obtained for small/large initial conditions are shown in a dark/light blue colour. This diagram identifies different nonlinear behaviours, which have implications for data assimilation. + +further challenges, is investigated in § 5. Different simulations are performed to determine suitable values for the number of ensemble members ($m$); the time between analysis ($\Delta t_{analysis}$); the standard deviation ($\sigma_{frac}$), i.e., the observations' uncertainties during the acoustic modes assimilation; and the standard deviation of the microphone measurements ($\sigma_{mic}$). Table 1 shows the parameters and initial conditions of the reference (i.e., “true”) solution. This range of parameters is justified from the literature in thermoacoustic data assimilation (Traverso & Magri, 2019). Computational time is discussed in Appendix C. + +## 4.1 Assimilation of the acoustic modes + +This section includes results for state estimation (§ 4.1.1) and combined state and parameter estimation (§ 4.1.2). + +### 4.1.1 State estimation + +This section presents simulations performed assuming that there are observations available for all acoustic modes, i.e., the number of observations is $q = 2N_m = 20$. (Including observations for the advection modes would further improve the filter convergence, however, they are not considered because the velocity advection field in the heat source region is not measured in a real engine.) Figure 5 shows the acoustic pressure before assimilation (unfiltered solution), after assimilation +---PAGE_BREAK--- + +**Table 1: Parameters and initial conditions for the true solution.** + +
ParameterValueParameterValue
xf0.2β[0.2, 0.4, 3.6, 7.7, 7.0]
Nm10τ0.2
Nc10m10
Δt0.001σfrac0.25
ηj(t = 0)0.005Δtanalysis[2 (Non-chaotic), 0.5 (Chaotic)]
η̇j/jπ(t = 0)0.005Nmic6
vi(t = 0)0σmic0.01
+ +(filtered solution) and the data at the assimilation steps (analysis steps). Panels (a) to (d) show the transient of a fixed point, a period–1 limit cycle, a frequency–locked, and a quasiperiodic motion, respectively. In the filtered solution, data assimilation is performed during the first 50 time units, and it is marched in time without further assimilation for 10 more time units. The EnSRKF successfully learns (i.e., infers) the true solution for all the nonlinear regimes. As expected, the convergence is faster for the fixed point and limit cycle cases (Figs. 5a,b) because they are simple dynamical motions. (The unfiltered solution also converges to the same value for these simple cases. This is due to the stable nature of their attractors, and because their regions are unaffected by the chaotic butterfly effect.) For multi–frequency dynamical regimes, figures 5c,d show that the Bayesian update can learn the frequency–locked and quasiperiodic states of regions C and G in Figure 4. However, these show more discrepancies between the filtered and true solutions. Physically, this is due to the multiple bifurcations that occur in a small range of parameters, which is typical of thermoacoustic systems. In reference to Figure 4, region C is next to the chaotic region D; and region G is a short range region surrounded by the chaotic region F, and the mixed quasiperiodic–chaotic region H. Therefore, the discrepancy in these cases is caused by some ensemble members falling in different basins of attraction. To overcome this issue, we propose a strategy in § 4.2.2. + +The data assimilation process depends on the observation's uncertainty, $\sigma_{\text{frac}}$, and ensemble size, $m$. Figure 6 shows the performance metrics (§ 2) for the quasiperiodic solution of Figure 5d. As expected, the filtered solution is more accurate for a smaller standard deviation because the observations are closer to the truth. Importantly, the algorithm is capable of learning the reference solution for an ensemble having an error as large as 50% of the mean of the acoustic modes, which means that the data assimilation algorithm is robust. + +For the pressure performance metric, the algorithm brings the relative error below 10% after 15 time units (in the worst case scenario, Figure 6a). For the covariance matrix trace performance metric, the EnSRKF continuously reduces the initial ensemble variance up to a final plateau, which cannot be zero because of the non-zero observation and forecast background noise (Figure 6c). The evolution of the trace is an indicator of the spread of the forecast ensemble, which informs +---PAGE_BREAK--- + +Figure 5: Real-time learning of the state. Assimilation of acoustic modes for state estimation of non-chaotic regimes. (a) Transient towards a fixed point ($\beta$ = 0.2); (b) limit cycle ($\beta$ = 0.4); (c) frequency-locked ($\beta$ = 7.7); and (d) quasiperiodic ($\beta$ = 3.6). True pressure oscillations at the flame location (light grey), unfiltered solution (dashed dark grey) and filtered solution (black). The analysis time steps are indicated with red circles. $m = 10$, $\sigma_{frac} = 0.25$, $\Delta t_{analysis} = 2$. +---PAGE_BREAK--- + +Figure 6: Assimilation of acoustic modes for state estimation of a quasiperiodic regime. Performance metrics. Left: Effect of the standard deviation with $m = 10$. Right: effect of the ensemble size with modes measurement uncertainty $\sigma_{frac} = 0.25$. The error evolution is shown with the relative difference between the filtered solutions and truth (top) and the trace of the ensemble covariance (bottom). The dashed vertical line indicates when data assimilation stops. $\beta = 3.6$, $\Delta t_{analysis} = 2$. + +on the uncertainty of the solution. The ensemble size does not have a strong influence in the +ensemble uncertainty during the assimilation because the trace of the covariance matrix remains +of the same magnitude independently of the value of *m* (Figure 6d). Nevertheless, the relative +error is significantly higher for a small ensemble with *m* = 4 (Figure 6c). This means that four +ensemble members are not sufficient to give a sufficient ensemble distribution, therefore, the solution +converges to an incorrect state, but with a small spread around it. Comparing the errors for ten and +fifty ensemble members, we see no significant differences between the solutions, which shows that +having an ensemble size larger than the number of degrees of freedom is not required. This is one of +the benefits of using the square-root filter (in the standard ensemble Kalman filter larger ensembles +are needed to avoid sampling errors (Livings et al., 2008)). However, the computational time +required for 50 ensemble members was approximately 4 times longer than that for 10. Therefore, +an ensemble size of *m* = 10 provides a good approximation of the true state for the assimilation of +acoustic modes, while keeping the computation time minimal. +---PAGE_BREAK--- + +### 4.1.2 Combined state and parameter estimation + +In this section, we analyse the combined state and parameter estimation to calibrate both the state and parameters. The two uncertain parameters ($\beta$ and $\tau$) are added to the state vector and updated simultaneously with the acoustic and advection modes, as detailed in § 2.2. Figure 7 shows the evolution of the parameters, normalised to their true value, for the four non-chaotic solutions. The convergence shows that the EnSRKF update is capable of learning the true $\beta$ and $\tau$ values for the four dynamical motions. + +For a comparison of combined state and parameter estimation with state estimation, we compute the root mean square (RMS) error. The RMS error at each time step is defined as the square-root of the trace of the covariance matrix of the filtered ensemble, relative to the true solution + +$$ \text{RMS error} = \sqrt{\text{tr}\left(\frac{1}{m-1}\sum_{j=1}^{m} (\psi_j - \psi^{\text{true}})(\psi_j - \psi^{\text{true}})^T\right)} \quad (17) $$ + +The RMS error is evaluated for the state estimation and the combined state and parameter estimation cases, using different initial uncertainties for $\beta$ and $\tau$. This is achieved in state estimation by defining $\beta = c\beta^{\text{true}}$ and $\tau = c\tau^{\text{true}}$, where $c$ is the defined initial uncertainty. For the combined state and parameter estimation, the initial $\beta$ and $\tau$ of each member in the ensemble are taken from an uniform distribution centred around $c\beta^{\text{true}}$ and $c\tau^{\text{true}}$, with a sample standard deviation of 25%. Figure 8a shows the RMS error for the initial parameters set to their true value. The state estimation only outperforms the combined state and parameter estimation in this case, as the state estimation model works with constant true parameters while the combined state and parameter estimation updates the parameters in each analysis step with the EnSRKF update. The true parameters are perturbed by 5%, 25% and 50% in Figs. 8b,c,d, respectively. The combined state and parameter estimation simulations are capable of learning the true state up to a 25% error in the parameters initialisation, as the RMS error is reduced by two orders of magnitude from the initial state, such as in the case of Figure 8a. Combined state and parameter estimation provides an improved approximation of the solution for the highly uncertain case of 50% error (Figure 8d). + +## 4.2 Assimilation of the acoustic pressure from microphones + +As detailed in § 2.1, we consider the scenario of assimilation of pressure measurements from $N_{\text{mic}}$ microphones, located equidistantly from the flame location. This section includes results for state estimation (§ 4.2.1) and combined state and parameter estimation (§ 4.2.2). + +### 4.2.1 State estimation + +We consider a tube that is equipped with $N_{\text{mic}} = 6$ microphones, which measure multiple frequency contributions in the signal. This value is chosen from the literature in thermoacoustic +---PAGE_BREAK--- + +Figure 7: Real-time learning of the parameters and the state. Assimilation of acoustic modes for combined state and parameter estimation of non-chaotic regimes. (a) Transient toward a fixed point ($\beta^{\text{true}} = 0.2$), (b) limit cycle ($\beta^{\text{true}} = 0.4$), (c) frequency-locked solution ($\beta^{\text{true}} = 7.7$), and (d) quasiperiodic solution ($\beta^{\text{true}} = 3.6$). The dashed vertical line indicates when data assimilation stops. $\tau^{\text{true}} = 0.2$, $m = 10$, $\sigma_{\text{frac}} = 0.25$, $\Delta t_{\text{analysis}} = 2$. +---PAGE_BREAK--- + +Figure 8: Assimilation of acoustic modes of a quasiperiodic regime. Performance of state estimation (blue) vs. combined state and parameter estimation (orange) in a quasiperiodic regime. Initial conditions $\beta = c\beta^{\text{true}}$ and $\tau = c\tau^{\text{true}}$ with (a) $c = 1$, (b) $c = 1.05$, (c) $c = 1.25$, (d) $c = 1.5$; and $\beta^{\text{true}} = 3.6$, $\tau^{\text{true}} = 0.2$. The dashed vertical line indicates when data assimilation stops. +---PAGE_BREAK--- + +Figure 9: Real-time learning of the state. Assimilation of acoustic pressure from microphones for state estimation of non-chaotic regimes. (a) Transient towards a fixed point ($\beta = 0.2$); (b) limit cycle ($\beta = 0.4$); (c) frequency-locked solution ($\beta = 7.7$); and (d) quasiperiodic solution ($\beta = 3.6$). True pressure oscillations at the flame location (light grey), unfiltered solution (dashed dark grey) and filtered solution (black). The analysis time steps are indicated with red circles. $m = 10$, $\sigma_{\text{mic}} = 0.01$, $\sigma_{\text{frac}} = 0.25$, $\Delta t_{\text{analysis}} = 2$. + +experiments (Garita et al., 2021). Figure 9 shows the acoustic pressure at the flame location of the true solution, the unfiltered solution, and the filtered solution. In nonlinear regimes, the algorithm successfully learns the pressure state. The accuracy of the solution is lower than in the assimilation of the acoustic modes of § 4.1.1 because, here, less information on the state is assimilated. (The filter is not designed for statistically non-stationary problems, which is why the transient fixed point solution is not fully learnt by the filter.) + +The effect of the experimental uncertainty is analysed by varying the microphones standard deviation. Physically, the errors are larger than those in Figure 6 because, here, we are assimilating 6 components of the augmented state vector out of 36 components, whereas in § 4.1.1 the filter assimilates 20 out of the 30 components of the state vector. Figures 10a,c show that, after about 20 analysis steps, the filter follows more closely the model than the observations for larger observation's uncertainties. (In other words, the filtered solution “trusts” more the prediction from the model than the observations when the experimental uncertainty is high.) We set $\sigma_{\text{mic}} = 0.01$ in the following simulations, which models experimental microphone uncertainties (De Domenico et al., 2017). +---PAGE_BREAK--- + +Figure 10: Assimilation of pressure from microphones for state estimation of a quasiperiodic regime. Performance metrics. Left: Effect of the microphones’ standard deviation with $\Delta t_{\text{analysis}} = 2$. Right: effect of the assimilation frequency with $\sigma_{\text{mic}} = 0.01$. The error evolution is shown with the relative difference between the true and filtered solutions (top) and the trace of the ensemble covariance (bottom). The dashed vertical line indicates when data assimilation stops. $\beta = 3.6$, $m = 10$. $\sigma_{\text{frac}} = 0.25$. +---PAGE_BREAK--- + +Figure 11: Real-time learning of the parameters. Assimilation of acoustic pressure from microphones for combined state and parameter estimation of a quasiperiodic solution. Left: normalised $\beta$. Right: normalised $\tau$. $N_{\text{mic}} = 6$, $\beta^{\text{true}} = 3.6$, $\tau^{\text{true}} = 0.2$, $\Delta t_{\text{analysis}} = 1.5$, $\sigma_{\text{mic}} = 0.01$. The shaded areas show the standard deviation, which becomes smaller as more data is assimilated. + +The relative error is higher than 20% for this case (Figure 10a). Increasing the frequency of analysis allows for a faster convergence with a smaller relative error (Figs. 10b,d). With a time between analysis of $\Delta t_{\text{analysis}} = 1.5$ or 1, the relative error of the filtered solution becomes less than 10% in only 10 time units, approximately. Thus, for the assimilation of microphone pressure data, a higher frequency of analysis is more suitable. We choose the time between analysis to 1.5 time units. The evolution of the trace of the forecast covariance matrix indicates that the spread of the ensemble rapidly shrinks (Figs. 10c,d). Besides, the spread is two orders of magnitude smaller than in the assimilation of the modes (Figs. 6c,d) and remains small even with large relative errors. Physically, this is because the acoustic modes are directly updated in the modes assimilation, but, in this case, the acoustic modes are unobserved variables that are updated indirectly through the microphone pressure observations. + +### 4.2.2 Combined state and parameter estimation + +The parameters $\beta$ and $\tau$ are updated by the EnSRKF at each analysis step, which occurs every 1.5 time units. Figure 11a,b, shows that for an ensemble of ten members, the solution converges to the parameters $\beta \approx 6.6$ and $\tau \approx 0.4$, which correspond to a chaotic solution (see Figure 3). Nevertheless, the true solution is a quasiperiodic oscillator with $\beta = 3.6$ and $\tau = 0.2$. This means that the filtered solution not only converges to different parameters, but also belongs to a different nonlinear regime than that of the true solution. Physically, this occurs because thermoacoustic dynamics experience several bifurcations in short ranges of $\beta$ and $\tau$ (Figure 4). This makes the sampling of nonlinear thermoacoustics challenging. A way to circumvent this is to increase the ensemble size. A parametric study of the effect of the number of realisations is shown in Figure 11. Ten ensemble members are not sufficient to learn the reference solution, however, the larger the ensemble, the faster the EnSRKF converges to the true solution. +---PAGE_BREAK--- + +Figure 12: Real-time learning of the parameters. Assimilation of acoustic pressure from microphones for combined state and parameter estimation of a quasiperiodic solution. Left: normalised $\beta$. Right: normalised $\tau$. Effect of ensemble size without inflation (top) and with inflation using $\rho = 1.02$ (bottom). $N_{\text{mic}} = 15$, $\beta^{\text{true}} = 3.6$, $\tau^{\text{true}} = 0.2$, $\Delta t_{\text{analysis}} = 1$, $\sigma_{\text{mic}} = 0.01$. The shaded areas show the standard deviation, which becomes smaller as more data is assimilated. + +Occasionally, the EnSRKF provides unphysical parameters as the solution of the optimisation problem, such as negative heat source strength as the solution of the optimisation problem. To avoid this, we reject the analysis steps that give unphysical solutions and continue the forecast with no assimilation. This means that we are left-truncating the Gaussian. Thus the parameters remain constant until the EnSRKF gives a physical solution to the optimisation problem. (Ad-hoc ways to bound parameters can be designed (Li et al., 2019). This is beyond the scope of this work.) The thresholds for rejection are defined as $\beta \in [0.1, 10]$ and $\tau \in [0.005, 0.8]$. Because the rejection is effectively reducing the amount of information that can be assimilated, the ensemble convergence slows down. This *increase* and *reject* approach is not always sufficient to reach convergence. Figures 12a,b show the same simulation as in Figure 11 with more microphones, $N_{\text{mic}} = 15$, and $\Delta t_{\text{analysis}} = 1$. In this case, the filtered solution is not converging even for 150 ensemble members, which is caused by covariance collapse. To accelerate the convergence and overcome the spurious correlations of finite-seized ensembles (Evensen, 2009), we introduce a covariance inflation to the ensemble forecast when the solution of the analysis step provides unfeasible parameters. The inflation method can be used to counteract the variance reduction due to the spurious correlations, and force the model to explore more states. Here, we include the model uncertainty as stochastic +---PAGE_BREAK--- + +noise by adding an inflation factor $\rho$ to the ensemble forecast + +$$A_{ij}^f = \bar{A}_{ij}^f + \rho \Psi_{ij}^f. \quad (18)$$ + +In this case, $\rho = 1.02$ improves the analysis for the quasiperiodic solution. If necessary, adaptive strategies can be designed following Evensen (2009). Figure 12c,d shows the parameters' convergence for the same ensemble sizes as Figures 12a,b, but with covariance inflation. This is sufficient to remove the plateau caused by the divergence of the EnSRKF to unphysical parameters in large ensembles, thereby speeding up the convergence. + +To summarise, we propose an *increase, reject, inflate* strategy to learn the nonlinear dynamics and parameters of thermoacoustics. + +## 5 Twin experiments in chaotic regimes + +This section addresses the assimilation in chaotic regimes. We perform a series of twin experiments with synthetic data using the base parameters of Tab. 1 and the obtained suitable parameters in § 4. Both, state estimation and combined state and parameter estimation are tested in the chaotic region F. In the combined state and parameter estimation, the initial conditions for $\beta$ and $\tau$ are sampled from uniform distributions with an upper bound 25% larger than their true value, and a lower bound 25% smaller than the true parameters. Different simulations are performed to analyse the predictability of the solutions and to determine a suitable time between analysis ($\Delta t_{\text{analysis}}$), which is not trivial in chaotic oscillations. + +Figure 13 shows the comparison between the combined state and parameter assimilation solution, an unfiltered solution, and the true state in the chaotic region F of the bifurcation diagram with the same time between analysis as the previous non-chaotic studies. The assimilation does not perform as well as in non-chaotic regimes. This is physically due to the short predictability of chaotic systems. There are several ways to estimate the predictability of a chaotic system (Boffetta et al., 2002). Here, the predictability is computed as the inverse of the maximal Lyapunov exponent, which provides a time scale after which two nearby trajectories diverge (linearly) due to the butterfly effect. The methodology followed is described in Magri & Doan (2020). The maximal Lyapunov exponent is determined by analysing the growth of the distance between two nearby trajectories. In a logarithmic scale, the Lyapunov exponent is the slope of the linear region, which is computed by linear regression. Figure 14a shows two trajectories that are the same until $t_1 = 980$, when they are set apart by $\epsilon = 10^{-6}$. After 10 time units, the two instantaneous solutions are completely different, which is a manifestation of chaos. The logarithmic evolution of the distance between the two trajectories is shown in Figure 14b, where the slope of the linear region gives the dominant Lyapunov exponent. This method is carried out for several initial conditions in the attractor. The resulting maximal Lyapunov exponent is $\lambda_1 = 0.74 \pm 0.30$, which corresponds to a predictability +---PAGE_BREAK--- + +Figure 13: Real-time learning of the state. Assimilation of (a) acoustic modes and (b) pressure from microphones for state estimation of a chaotic regime ($\beta = 7.0$). Comparison of the time evolution of the true pressure oscillations at the flame location (light grey), an unfiltered solution (dashed dark grey) and the filtered solution (black). The analysis time steps are indicated with red circles. $m = 10$, $\sigma_{\text{mic}} = 0.01$, $\sigma_{\text{frac}} = 0.25$, $\Delta t_{\text{analysis}} = 2$. + +Figure 14: Calculation of the Lyapunov exponent to select the analysis time in data assimilation. (a) Time evolution of the pressure oscillations at the flame location of two nearby chaotic solutions, and (b) logarithmic growth of the trajectory separation. +---PAGE_BREAK--- + +Figure 15: Assimilation of acoustic modes for state estimation of a chaotic regime. Performance metrics. Effect of the assimilation frequency. Left: relative difference between the filtered solutions and truth. Right: the trace of the ensemble covariance. The dashed vertical line indicates when data assimilation stops. $\beta = 7.0$, $m = 10$, $\sigma_{frac} = 0.25$. + +time scale of $t_\lambda = \lambda_1^{-1} = 1.62 \pm 0.78$. Physically, the predictability, $t_\lambda$, is key to the implementation of the ensemble square-root Kalman filter for time-accurate predictions because, if the time interval between analysis is too large, the forecast ensemble will already be far apart from the truth. Figure 13 shows how the filtered chaotic solution with an assimilation time on the high end of the time scale $t_\lambda$ is completely different to the true solution. Figure 15 shows the effect of the time between analysis $\Delta t_{\text{analysis}}$ in the chaotic assimilation. The EnSRKF time-accurately learns the true solution for $\Delta t_{\text{analysis}} < t_\lambda$ only as the relative error and the trace of the covariance are reduced significantly and converge. Therefore, we consider a time between analysis of $\Delta t_{\text{analysis}} = 0.5$ for chaotic regions. (The butterfly effect is not present in non-chaotic behaviours, therefore, the time considered between analysis in the fixed point, limit cycle, frequency-locked and quasiperiodic cases can be increased to reduce the computation time, as long as the Nyquist-Shannon criterion is fulfilled (Traverso & Magri, 2019).) + +Figure 16 shows the results of state estimation. The assimilation of the acoustic modes is shown in Figs. 16a, while the assimilation of pressure observations is shown in Figs. 16b. The results are generated with an ensemble of $m = 100$. The results indicate that the filter learns the pressure state in chaotic regimes for the two assimilation approaches. Because of the butterfly effect, the filtered pressure and the true signal start differing after removing the filtering due to the chaotic nature of the solutions. Figure 17 shows the results of state estimation in the form of power spectral density (PSD). The top PSDs are computed during the assimilation window ($t \in [900, 1200]$) and the bottom PSDs are computed after removing the filter and propagating the filtered solution without data assimilation ($t \in [1200, 1500]$). The PSDs during the assimilation indicate that the filter learns as well almost exactly the frequency spectrum of the solution, while the unfiltered solution exhibits significant discrepancies. After removing the filter, the PSD of the true and filtered solutions remain qualitatively similar, but differ slightly due to the chaotic divergence of the solution. +---PAGE_BREAK--- + +Figure 16: Real-time learning of the state. Assimilation of (left) acoustic modes and (right) pressure from microphones for state estimation of a chaotic solution ($\beta = 7.0$). Comparison of the time evolution of the true pressure oscillations at the flame location (light grey), unfiltered solution (dashed dark grey) and filtered solution (black). The analysis steps are indicated in red circles. $m = 100$, $\sigma_{\text{mic}} = 0.01$, $\sigma_{\text{frac}} = 0.25$, $\Delta t_{\text{analysis}} = 0.5$. + +Figure 17: Power spectral density (PSD) during (top) and after (bottom) assimilation of the true pressure oscillations at the flame location (light grey), unfiltered solution (dashed dark grey) and filtered solution (black), during state estimation in a chaotic regime ($\beta = 7.0$). The analysis time steps are indicated with red circles. Left: assimilation of acoustic modes. Right: assimilation of pressure from microphones. $m = 100$, $\sigma_{\text{mic}} = 0.01$, $\sigma_{\text{frac}} = 0.25$, $\Delta t_{\text{analysis}} = 0.5$. +---PAGE_BREAK--- + +Figure 18: Real-time learning of the parameters. Assimilation of (a) acoustic modes and (b) pressure from microphones for combined state and parameter estimation of a chaotic regime. Time evolution of the parameters and their standard deviation. Chaotic solution ($\beta = 7.0$). The dashed vertical line indicates when data assimilation stops. $m = 300$, $\rho = 1.2$, $\sigma_{\text{mic}} = 0.01$, $\Delta t_{\text{analysis}} = 0.5$, $N_{\text{mic}} = 6$. + +Finally, the data assimilation algorithm is able to estimate $\beta$ and $\tau$ in the combined state and parameter estimation in chaotic regimes for the assimilation of both acoustic modes and pressure from microphones (Figs. 18a,b, respectively). The results indicate that there is a successful convergence of the parameters even though their initial uncertainty is large. These simulations are performed with a large ensemble of 300 members and by inflating the ensemble when the assimilation is neglected due to unphysical parameters. The inflation parameter required for convergence in the assimilation of pressure data (Figure 18b) is large ($\rho = 1.2$). Figures 19b shows that the convergence is significantly faster and requires a smaller inflation ($\rho = 1.02$) if the number of microphones is increased to 15, as they provide a greater amount of information on the system, i.e., the problem is less ill-conditioned. + +The data assimilation successfully learns the true state and parameters for chaotic regimes in the twin experiments by increasing the assimilation frequency, the ensemble size and the inflation parameter. + +# 6 Conclusions + +Low-order thermoacoustic models are qualitatively correct, but they may be quantitatively incorrect. In this work, we introduce data assimilation to make qualitative models quantitatively (more) accurate. This is achieved by combining the knowledge from observations, such as experimental data, and a physical model prediction. Data and model predictions are combined with a Bayesian data assimilation. The algorithm learns the state, such as the acoustic pressure, and model's parameters, every time that reference data becomes available (real-time). +---PAGE_BREAK--- + +Figure 19: Real-time learning of the parameters. Assimilation of (a) acoustic modes and (b) pressure from microphones for combined state and parameter estimation of a chaotic regime. Time evolution of the parameters and their standard deviation. Chaotic solution ($\beta = 7.0$). The dashed vertical line indicates when data assimilation stops. $m = 300$, $\rho = 1.02$, $\sigma_{\text{mic}} = 0.01$, $\Delta t_{\text{analysis}} = 0.5$, $N_{\text{mic}} = 15$. + +First, we discuss that the prediction of nonlinear thermoacoustics is challenging due to the sensitivity to small changes in the physical parameters, such as the time delay and flame index. In the nonlinear dynamics, this sensitivity manifests itself as an abundance of bifurcations of the solution topology, and hystereses. Nonlinear regimes (periodic, quasiperiodic, chaotic, frequency-locked) and bifurcations are identified through covariant Lyapunov vector analysis, dynamical systems theory adn nonlinear timeseries analysis. Second, we develop a sequential data assimilation algorithm based on the ensemble square-root Kalman filter in the time domain. This nonlinear filter selects the most likely state and set of physical parameters, which are compatible with model predictions and their uncertainties, and observations and their uncertainties. The filter is physical, i.e., it is not a purely machine learning technique, because it provides estimates that are compatible with the conservation laws, which makes it robust and principled. The data, once assimilated, does not need to be stored. For the data assimilation, which is based on a Markov assumption, we transform the time-delayed dynamics (non-Markovian) into an initial value problem (Markovian). Third, twin experiments are performed in each region of the bifurcation diagram with reference data on (i) the acoustic Galerkin modes, and (ii) the acoustic pressure taken from multiple microphones. On the one hand, in non-chaotic oscillations, the frequency with which data should be assimilated needs to fulfil the Nyquist-Shannon criterion with respect to the dominant acoustic mode. On the other hand, in chaotic oscillations, we highlight that the assimilation frequency should scale with the Lyapunov exponent. During the combined state and parameter estimation with pressure observations, it is observed that the filter occasionally provides unphysical solutions, such as negative time delays, which lead to convergence to incorrect solutions. This is due to the bifurcations and hystereses that occur in a small range of parameters. Hence, fourth, we propose an *increase, reject, inflate* strategy to overcome this. In detail, we increase the ensemble size to better capture the correct dynamics; we reject the analysis steps that provide unphysical parameters, e.g., negative time delays; and we inflate the ensemble covariance by adding noise as a regularisation term. With data assimilation, we +---PAGE_BREAK--- + +show that (i) the correct acoustic pressure and parameters can be accurately learnt (i.e., inferred); (ii) the ensemble size is small (in contrast to standard Kalman filters), from ten to hundred depending on the multi-frequency content of the solution; (iii) the learning is robust because it can tackle large uncertainties in the observations (up to 50% the mean values); (iv) the uncertainty of the prediction and parameters is naturally part of the output; and (v) both the time-accurate solution and statistics (through power spectral density function) can be successfully learnt. + +The technology developed in this paper can be applied to improve the quantitative accuracy of reduced-order models with high-fidelity experimental data from pressure sensors, and to learn different model parameters. Data assimilation opens up new possibility for real-time prediction of thermoacoustics by synergistically combining physical knowledge and data. + +## Acknowledgements + +A. N. is financially supported by Rolls-Royce, the EPSRC-DTP and the Cambridge Commonwealth, European & International Trust under a Cambridge European Scholarship. L. M. gratefully acknowledges support from the RAEng Research Fellowships Scheme and the ERC Starting Grant PhyCo 949388. The authors are grateful to Francisco Huhn, who helped the authors produce Figure 3. The authors report no conflict of interest. + +# A Derivation of the EnSRKF + +Before starting with the derivation of the filter, some definitions are introduced. For *m* ensemble members and a state vector $\psi_i \in \mathbb{R}^{N \times 1}$, the matrix that encapsulates the ensemble members and the ensemble mean are defined as + +$$ A = (\psi_1, \psi_2, \dots, \psi_m) \in \mathbb{R}^{N \times m} \quad \text{and} \quad \bar{\psi} \approx \frac{1}{m} \sum_{i=1}^{m} \psi_i \tag{19} $$ + +With these, the following definition for the ensemble perturbation matrix applies + +$$ \mathbf{\Psi} = (\psi_1 - \bar{\psi}, \psi_2 - \bar{\psi}, \dots, \psi_m - \bar{\psi}) \tag{20} $$ + +The ensemble covariance matrix can be determined from (21), introducing a factor ($m-1$) to avoid a sample bias. The covariance matrix is defined as an approximation because it is derived from a statistical sample + +$$ C_{\psi\psi} \approx \frac{1}{m-1} \mathbf{\Psi} \mathbf{\Psi}^T \tag{21} $$ + +Accounting for these definitions, the Kalman Filter update (10a) for the ensembles is in matrix form: + +$$ A^a = A^f + (M C_{\psi\psi}^f)^T [C_{\epsilon\epsilon} + M C_{\psi\psi}^f M^T]^{-1} (Y - M A^f) \tag{22} $$ +---PAGE_BREAK--- + +where $\mathbf{Y} \in \mathbb{R}^{q \times m}$ is the matrix containing the $q$ observations of each member in the ensemble; $\mathbf{M} \in \mathbb{R}^{q \times N}$ is the measurement operator matrix; and $\mathbf{C}_{\epsilon\epsilon} \in \mathbb{R}^{q \times q}$ is the observations' error covariance matrix. + +Using the definitions for the ensemble covariance in (21), the ensemble mean of (22) is: + +$$ \bar{\mathbf{A}}^{\mathrm{a}} = \bar{\mathbf{A}}^{\mathrm{f}} + \mathbf{\Psi}^{\mathrm{f}} (\mathbf{M}\mathbf{\Psi}^{\mathrm{f}})^{\mathrm{T}} \left[ (m-1)\mathbf{C}_{\epsilon\epsilon} + \mathbf{M}\mathbf{\Psi}^{\mathrm{f}} (\mathbf{M}\mathbf{\Psi}^{\mathrm{f}})^{\mathrm{T}} \right]^{-1} (\mathbf{Y} - \mathbf{M}\bar{\mathbf{A}}^{\mathrm{f}}) \quad (23) $$ + +where $\bar{\mathbf{A}}$ is a $N \times m$ matrix of identical mean analysis states in each column. Introducing now the covariance expression into the analysis error update (see (10b)), yields the analysis covariance matrix + +$$ \mathbf{C}_{\psi\psi}^{\mathrm{a}} = \frac{\mathbf{\Psi}^{\mathrm{f}} \mathbf{\Psi}^{\mathrm{f}}^{\mathrm{T}}}{m-1} - \left( \mathbf{M} \frac{\mathbf{\Psi}^{\mathrm{f}} \mathbf{\Psi}^{\mathrm{f}}^{\mathrm{T}}}{m-1} \right)^{\mathrm{T}} \left[ \mathbf{C}_{\epsilon\epsilon} + \mathbf{M} \frac{\mathbf{\Psi}^{\mathrm{f}} \mathbf{\Psi}^{\mathrm{f}}^{\mathrm{T}}}{m-1} \mathbf{M}^{\mathrm{T}} \right]^{-1} \left( \mathbf{M} \frac{\mathbf{\Psi}^{\mathrm{f}} \mathbf{\Psi}^{\mathrm{f}}^{\mathrm{T}}}{m-1} \right) \quad (24) $$ + +Equation (23) and (24) can be simplified by introducing the following matrices + +$$ \mathbf{S} = \mathbf{M}\mathbf{\Psi}^{\mathrm{f}} \quad \text{and} \quad \mathbf{W} = \mathbf{S}\mathbf{S}^{\mathrm{T}} + (m-1)\mathbf{C}_{\epsilon\epsilon} \quad (25) $$ + +leading to + +$$ \bar{\mathbf{A}}^{\mathrm{a}} = \bar{\mathbf{A}}^{\mathrm{f}} + \mathbf{\Psi}^{\mathrm{f}} \mathbf{S}^{\mathrm{T}} \mathbf{W}^{-1} (\mathbf{Y} - \mathbf{M}\bar{\mathbf{A}}^{\mathrm{f}}) \quad (26) $$ + +$$ C_{\psi\psi}^{a} = \frac{1}{m-1} \Psi^f (\mathbb{I} - S^T W^{-1} S) \Psi^f T \quad \therefore \quad \Psi^\alpha \Psi^{\alpha T} = \Psi^f (\mathbb{I} - S^T W^{-1} S) \Psi^f T \quad (27) $$ + +The key idea of the EnSRKF is to find a matrix $\mathbf{\Psi}^\alpha$ with the covariance of (27), which is added to the mean ensemble in (26) to compute the full ensemble. First, the matrix $\mathbf{W}$ defined in (25) can be eigen-decomposed such that $\mathbf{W} = \mathbb{Z} \Lambda \mathbb{Z}^T$ because it is a symmetric square matrix, where $\Lambda$ and $\mathbb{Z}$ are the matrices of eigenvalues (diagonal) and eigenvectors (orthogonal), respectively. Substituting the eigen-decomposition into definition of the analysis perturbation matrix, (27) is re-written as + +$$ \mathbf{\Psi}^{\mathrm{a}} \mathbf{\Psi}^{\mathrm{aT}} = \mathbf{\Psi}^f (\mathbb{I} - \mathbf{S}^T \mathbb{Z} \Lambda^{-1} \mathbb{Z} \mathbf{S}) \mathbf{\Psi}^f T = \mathbf{\Psi}^f (\mathbb{I} - \mathbf{X}^T \mathbf{X}) \mathbf{\Psi}^f T \quad (28) $$ + +where $\mathbf{X} = \Lambda^{-1/2} \mathbb{Z}^T \mathbf{S}$. Similarly to the decomposition of $\mathbf{W}$, the symmetric matrix given by the product $\mathbf{X}^T \mathbf{X}$ can be expressed as: $\mathbf{X}^T \mathbf{X} = \mathbf{V}\boldsymbol{\Sigma}\mathbf{V}^\mathrm{T}$, where $\mathbf{V}$ is an orthogonal matrix of eigenvectors and $\boldsymbol{\Sigma}$ is a diagonal matrix of eigenvalues. Next, introducing this decomposition into (28) yields + +$$ +\begin{align} +\boldsymbol{\Psi}^\alpha \boldsymbol{\Psi}^{\alpha T} &= \boldsymbol{\Psi}^f (\mathbb{I} - \boldsymbol{\nu}\boldsymbol{\Sigma}\boldsymbol{\nu}^\mathrm{T}) \boldsymbol{\Psi}^f T \\ +&= \boldsymbol{\Psi}^f \boldsymbol{\nu} (\mathbb{I} - \boldsymbol{\Sigma}) \boldsymbol{\nu}^\mathrm{T} \boldsymbol{\Psi}^f T \\ +&= [\boldsymbol{\Psi}^f \boldsymbol{\nu} (\mathbb{I} - \boldsymbol{\Sigma})^{1/2} \boldsymbol{\nu}^\mathrm{T}] [\boldsymbol{\Psi}^f \boldsymbol{\nu} (\mathbb{I} - \boldsymbol{\Sigma})^{1/2} \boldsymbol{\nu}^\mathrm{T}]^\mathrm{T} +\end{align} +\tag{29} +$$ +---PAGE_BREAK--- + +Hence, a solution for the analysis ensemble perturbations, which preserves the zero mean in the +updated perturbations and keeps the EnSRKF unbiased, is (Sakov & Oke, 2008): + +$$ +\Psi^a = \Psi^f \mathbf{V} (\mathbf{I} - \boldsymbol{\Sigma})^{1/2} \mathbf{V}^\mathrm{T} \quad (30) +$$ + +Finally, the analysis state of the ensembles is determined by adding the analysis ensemble +perturbations to the mean analysis ensembles. This analysis state is then propagated in time using +the nonlinear forecast model, i.e.: + +$$ +\mathbf{A}^a = \bar{\mathbf{A}}^a + \mathbf{\Psi}^a +\quad (31a) +$$ + +$$ +\mathbf{A}^f(t + \Delta t) = \mathcal{F}(\mathbf{A}^a(t)) \tag{31b} +$$ + +where $\mathcal{F}$ is a compact representation of the nonlinear thermoacoustic equations. Note that, in the absence of observations, there would be no data assimilation and the initial conditions for the next forecast are the forecast states rather than the analysis states, hence + +$$ +\mathbf{A}^f(t + \Delta t) = \mathcal{F}(\mathbf{A}^f(t)) \tag{32} +$$ + +**B Nonlinear time series analysis** + +The reconstruction of the attractor in a *d*-dimensional phase space (phase portrait) is enabled by Takens’ delay embedding theorem, where the optimal time delay, $\zeta$, is calculated as the first local minimizer of the average mutual information (Kabiraj et al., 2012b). The embedding dimension, *d*, which should be sufficiently large in order to unfold the actual structure of the phase space to avoid false crossing of trajectories, is calculated with the false nearest neighbour algorithm. The first return map shows the local maximum of a signal with respect to the next, which approximates the Poincare’ section. Recurrence plots (Nair et al., 2014) are computed to examine the recurrences of thermoacoustic instabilities in time through the ($N_1 \times N_1$) binary matrix $R_{ij} = \Theta (\epsilon - ||x_i - x_j||)$, $i, j = 1, 2, ..., N_1$, where *n* is the length of the signal; $N_1 = n - (d-1)\zeta$ points; $\Theta$ is the Heaviside function ($\Theta(X < 0) = 0$ and $\Theta(X \ge 0) = 1$); and $\epsilon$ is a user-defined threshold. The threshold is set to 10% of the maximum of the distance, $||x_i - x_j||$. Therefore, the recurrence plot is a graphical representation of black and white points, where a coordinate is depicted in a black colour if the system at a state *i* is within a distance $\epsilon$ from the system state *j* at different time. + +Figure 20 includes the detailed characterization of the solutions in the bifurcation diagram in Figure 4. The first row illustrates a period–1 solution and the second row a period–2. The PSD shows that the frequency spectra of period–1 oscillations have a single dominant frequency, and its higher harmonics; however, period–2 limit cycles have two frequencies of the same order of magnitude, and their higher harmonics, with the smaller peak at half of the dominant frequency. The dynamics of an X-periodic limit cycle is characterised by forming a closed loop in the phase portrait, and by a discrete number of X points the first return map. The periodic recurrence plot +---PAGE_BREAK--- + +Figure 20: Time series (red), power spectral density (purple), phase space (green), first return map (blue) and recurrence plot (black and white) for the first route to chaos, regions B to D in the bifurcation diagram. From top to bottom row, a period-1 limit cycle with $\beta = 0.4$, a period-2 limit cycle with $\beta = 2.0$, a quasiperiodic solution with $\beta = 3.6$, and a chaotic motion with $\beta = 4.0$. +---PAGE_BREAK--- + +has equally spaced diagonals, where the distance between lines is the period of the solution, hence the line spacing in the period-2 limit cycle is smaller in Figure 20. In the frequency spectra, one can observe that quasiperiodic motions have several high density peaks and, in this case, there are 3 incommensurate frequencies (indicated with arrows in the figure), while the rest are combinations of them. Because the frequencies are not rationally related, the trajectories do not form a closed loop in the phase space, but they evolve on the surface of a 3-torus structure. The first return map for quasiperiodic solutions shows some closed loop of points and the recurrence plot illustrates this behaviour as diagonal lines with uneven vertical spacing (Kabiraj & Sujith, 2012). The chaotic PSD generally consist of a broadband spectra with peaks at frequencies close to the acoustic frequencies of the duct. Chaotic attractors do not have a smooth geometry, but instead they are fractal structures, which means that they have multiple loops of nearby trajectories seen in the phase space of Figure 20. Their first return map shows a set of scattered points with a seemingly-random pattern. The recurrence plot depicts short diagonals and single scattered points that create patches, because a chaotic system returns to an arbitrary small neighbourhood of the previous states and diverges after some short period due to the butterfly effect. + +The first two rows in Figure 21 shows the characterisation of these regions with the limit cycle shown as a discrete number of points in the return map and the chaotic as a strange attractor in the phase space. Although the spectra of this quasiperiodic solution and the frequency-locked case look similar, the main difference is that the frequencies of the dominant frequencies in the frequency-locked case is a rational number, while the quasiperiodic frequencies are incommensurate. The time series analysis in Figure 21 shows that FLs are high periodic oscillations with large time period because they are closed trajectories with several loop in the phase space, and a finite number of points in the first return map. In the recurrence plot, frequency-locked solutions show as parallel diagonals with small spacing. + +## C Computational time + +The simulations are performed in a 6-core machine (Intel(R) Core(TM) i7-8750H CPU @2.20GHz, 2201 Mhz). The computation time of the twin experiments shown in this section (including the computation of the true, the unfiltered and the filtered solutions) ranges from 3–4 seconds in the simpler cases with $m = 10$, up to 16–17 seconds with $m = 200$, approximately. The increase in computation time is linearly dependent on the ensemble size $m$, which is expected because the algorithm runs in parallel. Shortening the time between analysis steps also increases the computation cost. By reducing $\Delta t_{analysis}$ from 2 time units, to 1 (§ 4.2.2) the CPU time increases by ~10%; and by reducing this further to 0.5 time units during the chaotic assimilations (§ 5), the computation time increases by ~20%. There are no significant CPU cost variations due to the type of assimilation used or the introduction of parameter estimation / inflation to the algorithm. +---PAGE_BREAK--- + +Figure 21: Time series (red), power spectral density (purple), phase space (green), first return map (blue) and recurrence plot (black and white) for regions E to H in the bifurcation diagram. From top to bottom row, a period-2 limit cycle with $\beta = 5.2$, a chaotic solution with $\beta = 7.0$, a frequency-locked motion with $\beta = 7.7$, and a quasiperiodic oscillator with $\beta = 8.5$. +---PAGE_BREAK--- + +References + +BALASUBRAMANIAN, KOUSHIK & SUJITH, R. I. 2008 Thermoacoustic instability in a Rijke tube: Non-normality and nonlinearity. *Physics of Fluids* **20** (044103). + +BANNISTER, R. N. 2008 A review of forecast error covariance statistics in atmospheric variational data assimilation. i: Characteristics and measurements of forecast error covariances. *Quarterly Journal of the Royal Meteorological Society: A journal of the atmospheric sciences, applied meteorology and physical oceanography* **134** (637), 1951–1970. + +BANNISTER, R. N. 2017 A review of operational methods of variational and ensemble-variational data assimilation. *Quarterly Journal of the Royal Meteorological Society* **143** (703), 607–633. + +BOFFETTA, GUIDO, CENCINI, MASSIMO, FALCIONI, MASSIMO & VULPIANI, ANGELO 2002 Predictability: a way to characterize complexity. *Physics reports* **356** (6), 367–474. + +COLBURN, C. H., CESSNA, J. B. & BEWLEY, T. R. 2011 State estimation in wall-bounded flow systems. part 3. the ensemble kalman filter. *Journal of Fluid Mechanics* **682**, 289–303. + +CULICK, FRED E. C. 2006 Unsteady Motions in Combustion Chambers for Propulsion Systems. *North Atlantic Treaty Organization RTO AGAR-Dograph AG-AVT-039*. + +DE DOMENICO, FRANCESCA, ROLLAND, ERWAN O & HOCHGREB, SIMONE 2017 Detection of direct and indirect noise generated by synthetic hot spots in a duct. *Journal of Sound and Vibration* **394**, 220–236. + +DOWLING, ANN P 1995 The calculation of thermoacoustic oscillations. *Journal of sound and vibration* **180** (4), 557–581. + +DOWLING, ANN P. 1999 A kinematic model of a ducted flame. *Journal of fluid mechanics* **394**, 51–72. + +DOWLING, ANN P. & MORGANS, AIMEE S. 2005 Feedback Control of Combustion Oscillations. *Annual Review of Fluid Mechanics* **37** (1), 151–182. + +ECKART, CARL 1960 Hydrodynamics of ocean and atmosphere. *Pergamon Press* . + +EVENSEN, GEIR 2003 The ensemble kalman filter: Theoretical formulation and practical implementation. *Ocean dynamics* **53** (4), 343–367. + +EVENSEN, GEIR 2009 Data assimilation: the ensemble Kalman filter. Springer Science & Business Media. + +GARITA, FRANCESCO, YU, HANS & JUNIPER, MATTHEW P 2021 Assimilation of experimental data to create a quantitatively accurate reduced-order thermoacoustic model. *Journal of Engineering for Gas Turbines and Power* **143** (2), 021008. +---PAGE_BREAK--- + +GELB, ARTHUR 1974 *Applied optimal estimation*. MIT press. + +GOTODA, HIROSHI, IKAWA, TAKUYA, MAKI, KOSHIRO & MIYANO, TAKAYA 2012 Short-term prediction of dynamical behavior of flame front instability induced by radiative heat loss. *Chaos: An Interdisciplinary Journal of Nonlinear Science* **22** (3), 033106. + +GOTODA, HIROSHI, NIKIMOTO, HIROYUKI, MIYANO, TAKAYA & TACHIBANA, SHIGERU 2011 Dynamic properties of combustion instability in a lean premixed gas-turbine combustor. *Chaos: An Interdisciplinary Journal of Nonlinear Science* **21** (1), 013124. + +GUAN, YU, GUPTA, VIKRANT & LI, LARRY KB 2020 Intermittency route to self-excited chaotic thermoacoustic oscillations. *Journal of Fluid Mechanics* **894**. + +HECKL, MARIA A. 1990 Non-linear Acoustic Effects in the Rijke Tube. *Acustica* **72**. + +HUHN, FRANCISCO & MAGRI, LUCA 2020 Stability, sensitivity and optimisation of chaotic acoustic oscillations. *Journal of Fluid Mechanics* **882**, A24. + +JAYNES, EDWIN T. 1957 Information theory and statistical mechanics. *Physical review* **106** (4), 620. + +JAYNES, EDWIN T. 2003 *Probability theory: The logic of science*. Cambridge University Press. + +JAZWINSKI, ANDREW H 2007 *Stochastic processes and filtering theory*. Courier Corporation. + +JUNIPER, MATTHEW P. 2011 Triggering in the horizontal Rijke tube: non-normality, transient growth and bypass transition. *Journal of Fluid Mechanics* **667**, 272–308. + +JUNIPER, MATTHEW P. & SUJITH, R. I. 2018 Sensitivity and Nonlinearity of Thermoacoustic Oscillations. *Annual Review of Fluid Mechanics* **50** (1), 661–689. + +KABIRAJ, LIPIKA, SAURABH, ADITYA, WAHI, PANKAJ & SUJITH, R. I. 2012a Route to chaos for combustion instability in ducted laminar premixed flames. *Chaos: An Interdisciplinary Journal of Nonlinear Science* **22** (2), 023129. + +KABIRAJ, LIPIKA & SUJITH, R. I. 2012 Nonlinear self-excited thermoacoustic oscillations: intermittency and flame blowout. *Journal of Fluid Mechanics* **713**, 376–397. + +KABIRAJ, LIPIKA, SUJITH, R. I. & WAHI, PANKAJ 2012b Bifurcations of Self-Excited Ducted Laminar Premixed Flames. *Journal of Engineering for Gas Turbines and Power* **134** (3), 031502–1–031502–7. + +KALMAN, R. E. 1960 A New Approach to Linear Filtering and Prediction Problems. *Journal of Basic Engineering* **82** (1), 35–45. + +KANTZ, HOLGER & SCHREIBER, THOMAS 2003 *Nonlinear Time Series Analysis*, 2nd edn. Cambridge: Cambridge University Press. +---PAGE_BREAK--- + +KASHINATH, KARTHIK, WAUGH, IAIN C. & JUNIPER, MATTHEW P. 2014 Nonlinear self-excited thermoacoustic oscillations of a ducted premixed flame: bifurcations and routes to chaos. *Journal of Fluid Mechanics* **761**, 399–430. + +LABAHN, JEFFREY W, WU, HAO, CORITON, BRUNO, FRANK, JONATHAN H & IHME, MATTHIAS 2019 Data assimilation using high-speed measurements and LES to examine local extinction events in turbulent flames. *Proceedings of the Combustion Institute* **37** (2), 2259–2266. + +LANDAU, L. D. & LIFSHITZ, E. M. 1987 *Fluid Mechanics*, 2nd edn. Pergamon Press. + +LEWIS, JOHN M., LAKSHMIVARAHAN, S. & DHALL, SUDARSHAN 2006 *Dynamic Data Assimilation: A Least Squares Approach, Encyclopedia of Mathematics and its Applications*, vol. 13. Cambridge University Press. + +LI, RUOXIA, JAN, NABIL MAGBOOL, HUANG, BIAO & PRASAD, VINAY 2019 Constrained ensemble Kalman filter based on Kullback-Leibler divergence. *Journal of Process Control* **81**, 150–161. + +LIEUWEN, TIM C. 2012 *Unsteady Combustor Physics*. Cambridge: Cambridge University Press. + +LIEUWEN, TIM C. & YANG, VIGOR 2005 *Combustion instabilities in gas turbine engines: operational experience, fundamental mechanisms, and modelling*. American Institute of Aeronautics and Astronautics. + +LIVINGS, DAVID M., DANCE, SARAH L. & NICHOLS, NANCY K. 2008 Unbiased ensemble square root filters. *Physica D: Nonlinear Phenomena* **237** (8), 1021–1028. + +MAGRI, LUCA 2019 Adjoint methods as design tools in thermoacoustics. *Applied Mechanics Reviews* **71** (2). + +MAGRI, LUCA, BALASUBRAMANIAN, K., SUJITH, R. I. & JUNIPER, MATTHEW P. 2013 Non-normality in combustion-acoustic interaction in diffusion flames: a critical revision. *Journal of Fluid Mechanics* **719**, 183–202. + +MAGRI, LUCA & DOAN, NGUYEN ANH KHOA 2020 Physics-informed data-driven prediction of turbulent reacting flows with lyapunov analysis and sequential data assimilation. In *Data Analysis for Direct Numerical Simulations of Turbulent Combustion*, pp. 177–196. Springer. + +MAGRI, LUCA & JUNIPER, MATTHEW 2014 Global modes, receptivity, and sensitivity analysis of diffusion flames coupled with duct acoustics. *arXiv preprint arXiv:1408.1762*. + +MENSAH, GEORG A, MAGRI, LUCA, SILVA, CAMILO F, BUSCHMANN, PHILIP E & MOECK, JONAS P 2018 Exceptional points in the thermoacoustic spectrum. *Journal of Sound and Vibration* **433**, 124–128. +---PAGE_BREAK--- + +NAIR, VINEETH & SUJITH, R. I. 2015 A reduced-order model for the onset of combustion instability: physical mechanisms for intermittency and precursors. *Proceedings of the combustion institute* **35** (3), 3193–3200. + +NAIR, VINEETH, THAMPI, GIREESHKUMARAN & SUJITH, R. I. 2014 Intermittency route to thermoacoustic instability in turbulent combustors. *Journal of Fluid Mechanics* **756**, 470–487. + +NICoud, Franck, Benoit, Laurent, Sensiau, Claude & Poinsot, Thierry 2007 Acoustic modes in combustors with complex impedances and multidimensional active flames. *AIAA journal* **45** (2), 426–441. + +Noiray, Nicolas 2017 Linear growth rate estimation from dynamics and statistics of acoustic signal envelope in turbulent combustors. *Journal of Engineering for Gas Turbines and Power* **139**(4). + +Noiray, NICOLAS, DUROX, DANIEL, SCHÜLLER, THIERRY & CANDEL, SÉBASTIEN 2008 A unified framework for nonlinear combustion instability analysis based on the flame describing function. *Journal of Fluid Mechanics* **615**, 139–167. + +Nóvoa, Andrea & Magri, Luca 2020 A bayesian approach for predicting and filtering linear and nonlinear thermoacoustic oscillations. *Bulletin of the American Physical Society* **65** (13). + +O’Connor, Jacqueline, Acharya, Vishal & Lieuwen, Timothy 2015 Transverse combustion instabilities: Acoustic, fluid mechanic, and flame processes. *Progress in Energy and Combustion Science* **49**, 1–39. + +Orchini, A., Illingworth, S. J. & Juniper, Matthew P. 2015 Frequency domain and time domain analysis of thermoacoustic oscillations with wave-based acoustics. *Journal of Fluid Mechanics* . + +Orchini, Alessandro, Magri, Luca, Silva, Camilo F., MenSAH, Georg A & Moeck, Jonas P 2020 Degenerate perturbation theory in thermoacoustics: high-order sensitivities and exceptional points. *Journal of Fluid Mechanics* **903**. + +Pham, Dinh Tuan 2001 Stochastic methods for sequential data assimilation in strongly nonlinear systems. *Monthly weather review* **129** (5), 1194–1207. + +Pitsch, Heinz & De Lageneste, L. DUCHAMP 2002 Large-eddy simulation of premixed turbulent combustion using a level-set approach. *Proceedings of the Combustion Institute* **29** (2), 2001–2008. + +Poinsot, Thierry 2017 Prediction and control of combustion instabilities in real engines. Proceedings of the Combustion Institute **36** (1), 1–28. + +Rayleigh, Lord 1878 The Explanation of Certain Acoustical Phenomena. Nature **18**, 319-321. +---PAGE_BREAK--- + +REICH, SEBASTIAN & COTTER, COLIN 2015 *Probabilistic Forecasting and Bayesian Data Assimilation*. Cambridge: Cambridge University Press. + +SAKOV, PAVEL & OKE, PETER R. 2008 Implications of the form of the ensemble transformation in the ensemble square root filters. *Monthly Weather Review* **136** (3), 1042–1053. + +SASAKI, YOSHIKAZU 1955 A fundamental study of the numerical prediction based on the variational principle. *Journal of the Meteorological Society of Japan*. Ser. II **33** (6), 262–275. + +SHAMPINE, LAWRENCE F. & REICHELT, MARK W. 1997 The MATLAB ODE Suite. *SIAM journal on scientific computing* **18** (1), 1–22. + +DA SILVA, ANDRE F. C. & COLONIUS, TIM 2018 Ensemble-based state estimator for aerodynamic flows. *AIAA Journal* **56** (7), 2568–2578. + +SILVA, CAMILO FERNANDO, NICOOD, FRANCK, SCHULLER, THIERRY, DUROX, DANIEL & CANDEL, SÉBASTIEN 2013 Combining a helmholtz solver with the flame describing function to assess combustion instability in a premixed swirled combustor. *Combustion and Flame* **160** (9), 1743–1754. + +SUJITH, R. I. & UNNI, VISHNU R. 2020 Complex system approach to investigate and mitigate thermoacoustic instability in turbulent combustors. *Physics of Fluids* **32** (6), 061401. + +TARANTOLA, ALBERT 2005 *Inverse problem theory and methods for model parameter estimation*. SIAM. + +TRAVERSO, TULLIO & MAGRI, LUCA 2019 Data Assimilation in a Nonlinear Time-Delayed Dynamical System with Lagrangian Optimization. *Computational Science – ICCS 2019* p. 156–168. + +TREFETHEN, LLOYD N. 2000 *Spectral methods in MATLAB*, vol. 10. SIAM. + +XIAO, HENG, WU, J-L, WANG, J-X, SUN, RUI & ROY, C. J. 2016 Quantifying and reducing model-form uncertainties in reynolds-averaged navier–stokes simulations: A data-driven, physics-informed bayesian approach. *Journal of Computational Physics* **324**, 115–136. + +YU, HANS, JUNIPER, MATTHEW P & MAGRI, LUCA 2019 Combined state and parameter estimation in level-set methods. *Journal of Computational Physics* **399**, 108950. + +ZINN, BEN T. & LORES, MANUEL E. 1971 Application of the galerkin method in the solution of non-linear axial combustion instability problems in liquid rockets. *Combustion Science and Technology* **4** (1), 269–278. \ No newline at end of file diff --git a/samples_new/texts_merged/393503.md b/samples_new/texts_merged/393503.md new file mode 100644 index 0000000000000000000000000000000000000000..a0f0a7e8b470120998a948c8f8f05f36ae9b03fa --- /dev/null +++ b/samples_new/texts_merged/393503.md @@ -0,0 +1,393 @@ + +---PAGE_BREAK--- + +# On the Choice of Multiple Flat Outputs for Fault Detection and Isolation of a Flat System + +Rim RAMMAL*, Tudor-Bogdan AIRIMITOAIE*, +Franck CAZAURANG*, Jean LÈVINE**, +Pierre MELCHIOR* + +* Univ. Bordeaux, Bordeaux INP, CNRS, IMS, 33405 Talence, France +ictional redundancy in which multiple sensors and actuators are used to measure and control a particular variable (Chen et al., 2015). The drawbacks of this method are the extra equipment, maintenance cost and additional space required to accommodate the equipment. This approach was improved later on by the introduction of the *model-based analytical redundancy method*, based on the notion of *generating residual signals*. These residues are defined as the difference between the measured variables and the estimated ones. In the case of no fault, and in the ideal case of noise free observations, the values of the residues are equal to zero. In the non-zero case, the estimation method must be specified, see e.g. the observer-based approach (Tousi and Khorasani, 2011), the parity-space approach (Diversi et al., 2002) or the Kalman-based approach (Izadian and Khayyer, 2010). However, in these approaches, a sensor may be wrongly declared faulty because of the lack of efficiency of the estimation algorithm, hence the importance of the notion of *detectability*. + +Recently, the flatness property has been introduced into the repertoire of FDI techniques (Suryawan et al., 2010; Martínez-Torres et al., 2014). Here, residues are calculated using the differential flatness property. Roughly speaking, let us recall that a system is said to be flat if all the + +state and input variables can be expressed as functions of a particular variable, called flat output, and a finite number of its successive derivatives. The method presented in Suryawan et al. (2010) is dedicated to linear flat systems and uses the properties of B-spline parameterisation to estimate the time derivatives of the flat output, which may not be defined because of the presence of noise. This derivative estimation can take time and cause a delay in the reconfiguration process. In order to overcome these issues, a high-gain observer has been proposed in Martínez-Torres et al. (2014) to evaluate the time derivative of the noisy signals. The observer may be complemented by a low-pass filter to improve its performance. Note that the latter method can be applied to both, linear and nonlinear flat systems. + +In the present flatness-based FDI approach, an effort is made to dissociate the theoretical *isolability* property, based on residue computation, and the estimation process. For this purpose, we compute the residues between the measurements and their expression exactly obtained from the measured flat outputs and their derivatives estimated online. The treatment of these residues slightly differs from the ones of the previous approaches (Kóscielny et al., 2016): every sensor and actuator admits a *fault alarm signature*, i.e. a number of residues affected by a fault on this sensor/actuator and a fault on a sensor/actuator is isolable if its corresponding fault alarm signature is distinct. In practice, the treatment of these residues is adapted, in the presence of noise, by introducing a threshold and an estimation process as in the previous approaches (Martínez-Torres et al., 2013). Moreover, we show that it is possible to increase the isolability of faults by considering several flat outputs, at the condition that they are independent, + +**Abstract:** This paper presents a rigorous definition of the isolability of a fault in a flat system whose flat outputs are measured by sensors that are subject to faults. In particular, if only one sensor or actuator is faulty at a time, we show that the isolation of faults can be achieved if a pair of flat outputs satisfies some independence condition. A detailed characterization of this condition is presented. Finally, the pertinence of the isolability concept is demonstrated on the example of a three tank system. + +**Keywords:** nonlinear flat system, flat output, fault detection and isolation, three tank system. + +## 1. INTRODUCTION +---PAGE_BREAK--- + +thus completing in a rigorous way some heuristic results of Martínez-Torres et al. (2013). These results are applied to a three tank FDI problem where we compute two independent flat outputs that allow the isolation of all possible simple faults (only one faulty sensor or actuator at a time). + +The main contributions of this paper are the above mentioned rigorous definition of isolability of faults and the characterization of the flat outputs to be used in the fault isolation. + +This paper is organized as follows: section 2 introduces the basic concepts of FDI for nonlinear differentially flat systems and their definitions. Section 3 discusses the conditions for independence between flat outputs. Section 4 deals with the application of this FDI approach to the three tank system. Finally, section 5 concludes the paper. + +## 2. FLATNESS-BASED FDI + +### 2.1 Differentially Flat System + +Consider the following nonlinear system + +$$ \begin{cases} \dot{x} = f(x, u) \\ y = h(x, u) \end{cases} \quad (1) $$ + +where $x$, the vector of states, evolves in a $n$-dimensional manifold $X$, $u \in \mathbb{R}^m$ is the vector of inputs, $y \in \mathbb{R}^p$ is the measured output, $m \le n$, $\text{rank}(\frac{\partial f}{\partial u}) = m$ and $m \le p$. Let $(x, \bar{u}) \triangleq (x, u, \dot{u}, \ddot{u}, \ldots)$ be a prolongation of the coordinates $(x, u)$ to the manifold of jets of infinite order $\mathcal{X} \triangleq X \times \mathbb{R}_\infty^m$ (Fliess et al., 1999), (Levine, 2009, Chapter 5). + +In the sequel, we systematically denote by $\bar{\xi} \triangleq (\xi, \dot{\xi}, \ddot{\xi}, \ldots)$ the sequence of infinite order jets of a vector $\xi$ and $\tilde{\xi}^{(\alpha)} \triangleq (\xi, \dot{\xi}, \ddot{\xi}, \ldots, \xi^{(\alpha)})$ the truncation at the finite order $\alpha \in \mathbb{N}$ of the previous sequence. + +The system (1) is flat at a point $(x_0, \bar{u}_0) \in \mathcal{X}$ if and only if there exist a vector $z = (\bar{z}_1, \ldots, \bar{z}_m) \in \mathbb{R}^m$, two integers $\rho$ and $\nu$ and mappings $\psi$ defined on a neighbourhood $\mathcal{V}$ of $(x_0, \bar{u}_0)$ in $\mathcal{X}$ and $\varphi = (\varphi_0, \varphi_1, \ldots)$ defined on a neighbourhood $\mathcal{W} \subset \mathcal{V}$ of $\bar{z} \triangleq (z, \dot{z}, \ddot{z}, \ldots) \triangleq \psi(x_0, \bar{u}_0)$ in $\mathbb{R}_\infty^m$ such that: + +(1) $z = \psi(x, \bar{u}^{(\nu)}) \in \mathcal{W}$ + +(2) $\bar{z}_1, \ldots, \bar{z}_m$ and their successive derivatives are linearly independent in $\mathcal{W}$ + +(3) The state $x$ and the input $u$ are functions of $z$ and its successive derivatives: + +$$ (x, u) = (\varphi_0(\bar{z}^{(\rho)}), \varphi_1(\bar{z}^{(\rho+1)})) \in \operatorname{pr}_{X \times \mathbb{R}^m}(\mathcal{V}) \quad (2) $$ + +where $\operatorname{pr}_{X \times \mathbb{R}^m}(\mathcal{V})$ is the canonical projection from $\mathcal{V}$ to $X \times \mathbb{R}^m$ + +(4) The differential equation $\dot{\varphi}_0(\bar{z}) = f(\varphi_0(\bar{z}), \varphi_1(\bar{z}))$ is identically satisfied in $\mathcal{W}$. + +The vector $z$ is called flat output of the system. The mappings $\psi$ and $\varphi$ are called Lie-Bäcklund isomorphisms and are inverse of one another. + +**Remark 1.** The property of flatness is not defined globally. The Lie-Bäcklund isomorphisms $\psi$ and $\varphi$ are non unique and only locally defined. Thus, there might exist points in $\mathcal{X}$ where no such isomorphisms exist or, otherwise + +stated, where the system is not flat. It has been proven in Kaminski et al. (2018) that the set of intrinsic singularities contains the set of equilibrium points of the system that are not first order controllable. + +### 2.2 Fault Detection and Isolation + +For the flat system (1), we suppose that the vector $y^s = (y_1^s, \ldots, y_p^s)^T$ is measured by sensors $S_1, \ldots, S_p$ respectively. We also suppose that the flat output $z$ is part of these measurements according, without loss of generality, to + +$$ z^s = (y_1^s, \ldots, y_m^s)^T. \quad (3) $$ + +Moreover, the value of the input vector $u = (u_1, \ldots, u_m)^T$, corresponding to the actuators $A_1, \ldots, A_m$, is assumed to be available at every time. We now propose a new definition of the notion of residue that generalizes the one introduced by Martínez-Torres et al. (2014). + +According to (2), the state and input read: + +$$ x^z = \varphi_0(\overline{z^s^{(\rho)}}), \quad u^z = \varphi_1(\overline{z^s^{(\rho+1)}}) \quad (4) $$ + +where the superscript $z$ indicates that they are evaluated as functions of the measurements $z^s$ and, according to (1), + +$$ y_k^z \triangleq h_k(\varphi_0(\overline{z^s^{(\rho)}}), \varphi_1(\overline{z^s^{(\rho+1)}})) \quad (5) $$ + +is the virtual value of $y_k$ computed via the measured flat output $z^s$. + +Note that the first $m$ components of $y^z$ are equal to the corresponding components of $z^s$: + +$$ y^z = (\overline{z^s}, \tilde{h}(\varphi_0(\overline{z^s}), \varphi_1(\overline{z^s})))^T \quad (6) $$ + +with $\tilde{h} = (h_{m+1}(\varphi_0(\overline{z^s}), \varphi_1(\overline{z^s})), \dots, h_p(\varphi_0(\overline{z^s}), \varphi_1(\overline{z^s})))^T$. **Definition 1.** The $k$th-sensor residue $R_{S_k}$ and $l$th-input residue $R_{A_l}$, for $k=1,\dots,p$ and $l=1,\dots,m$, are given by: + +$$ R_{S_k} = y_k^s - y_k^{\tilde{z}}, \quad R_{A_l} = u_l - u_l^{\tilde{z}}. \quad (7) $$ + +In total, we have $p+m$ residues for a single flat output $z^s$ and we denote the full residue vector by: + +$$ r = (R_{S_1}, \dots, R_{S_m}, R_{S_{m+1}}, \dots, R_{S_p}, R_{A_1}, \dots, R_{A_m})^T \\ = (r_1, \dots, r_m, r_{m+1}, \dots, r_p, r_{p+1}, \dots, r_{p+m})^T \quad (8) $$ + +and according to (6) + +$$ r = (0, \dots, 0, R_{S_{m+1}}, \dots, R_{S_p}, R_{A_1}, \dots, R_{A_m})^T \\ = (0, \dots, 0, r_{m+1}, \dots, r_p, r_{p+1}, \dots, r_{p+m})^T. \quad (9) $$ + +Measured and calculated variables are illustrated in Fig. 1. + +A residue who is always equal to zero indicates that it cannot be affected by faults on one of the sensors or actuators. Then, we eliminate it and truncate the residue vector to keep the last $p$ components only. This truncated vector is denoted by $r_\tau$: + +$$ r_\tau = (R_{S_{m+1}}, \dots, R_{S_p}, R_{A_1}, \dots, R_{A_m})^T \\ = (r_{\tau_1}, r_{\tau_2}, \dots, r_{\tau_p})^T. \quad (10) $$ + +**Hypothesis:** From now on, we assume that there is only one fault at a time affecting the sensors or actuators. + +In practice, due to the presence of noises on sensors and actuators, the successive derivatives of $z^s$ may not be +---PAGE_BREAK--- + +Fig. 1. Flatness-based residual generation + +defined. We assume that they are computed via a high- +gain observer, possibly completed by a low-pass filter as in +Martínez-Torres et al. (2014) to improve its robustness. +Moreover, a threshold is associated to each residue. In +the non faulty case, the residues in (10) will not exceed +their thresholds. If, otherwise, at least one of the residues +exceeds its threshold then a fault alert is launched. If +several residues in (10) trigger an alert at the same time, a +fault alarm signature, defined below, is required to isolate +the fault. + +For this purpose, we introduce the so-called *signature matrix*: + +*Definition 2.* (Signature matrix). Given the vector of residues $r_{\tau}$ defined in (10) and $\zeta = (y_1^s, \dots, y_p^s, u_1, \dots, u_m)^T \in \mathbb{R}^{p+m}$ the vector of available measurements. We define by the *signature matrix* associated to $z^s$, the matrix **S** given by: + +$$ +\mathbf{S} = \begin{pmatrix} +\sigma_{1,1} & \sigma_{1,2} & \cdots & \sigma_{1,p+m} \\ +\vdots & \vdots & \ddots & \vdots \\ +\sigma_{p,1} & \sigma_{p,2} & \cdots & \sigma_{p,p+m} +\end{pmatrix} \quad (11) +$$ + +with + +$$ +\sigma_{i,j} \triangleq \begin{cases} 0 & \text{if } \frac{\partial r_{\tau_i}}{\partial \zeta_j^{(\varrho)}} = 0 \quad \forall \varrho \in \{0, 1, \dots\} \\ 1 & \text{if } \exists \varrho \in \{0, 1, \dots\} \text{ s.t. } \frac{\partial r_{\tau_i}}{\partial \zeta_j^{(\varrho)}} \neq 0 \end{cases} \tag{12} +$$ + +*Remark 1.* Each column $\Sigma_j$ of the signature matrix $\mathbf{S}$ indicates whether a residue $r_{\tau_i}$ is or is not functionally affected by a fault on the measurement $\zeta_j$. So in (12), $\sigma_{i,j} = 0$ means that the residue $r_{\tau_i}$ is not affected by a fault on the measurement $\zeta_j$ and $\sigma_{i,j} = 1$ means that the residue may be affected. + +*Definition 3.* A column $\Sigma_j$ of the signature matrix $\mathbf{S}$ is called *fault alarm signature* or simply *signature*, associated to the sensor/actuator $\zeta_j$. + +From the signature matrix **S** we propose the following +definitions of detectability and isolability in the flatness +context: + +*Definition 4.* (Detectability). A fault on a sensor/actuator $\zeta_j$ is detectable if, and only if there exists at least one $i \in \{1, \dots, p\}$ such that $\sigma_{i,j} = 1$. + +*Definition 5.* (Isolability). A fault on a sensor $S_k$, +$k = 1, \dots, p$, is said *isolable* if, and only if, its correspond- +ing fault alarm signature $\Sigma_k$ in the signature matrix $S$ is +distinct from the others, i.e. + +$$ +\Sigma_k \neq \Sigma_j, \quad \forall j = 1, \dots, p+m, \quad j \neq k. \tag{13} +$$ + +An isolable fault on the actuator $A_l$, for $l = 1, \dots, m$, is +defined analogously: + +$$ +\Sigma_{p+l} \neq \Sigma_j, \quad \forall j = 1, \dots, p+m, \quad j \neq p+l. \quad (14) +$$ + +We define $\mu$ as the number of distinct signatures of the +signature matrix $\mathbf{S}$ associated to $z^s$. Then, $\mu$ is the number +of isolable faults associated to $z^s$. + +A more general, but much more complicated, definition of isolability in the structured residual context of polynomial systems has been introduced in Staroswiecki and Comtet-Varga (2001), based on elimination techniques. + +Definition 5 means that if the signature matrix $\mathbf{S}$ has two identical signatures, i.e. $\Sigma_i = \Sigma_j$, for two different sensors/actuators $\zeta_i \neq \zeta_j$, then we cannot make a decision on the faulty device, hence the fault is detected but cannot be isolated. Thus, the number of isolated faults is equal to the number of distinct signatures in the matrix $\mathbf{S}$. + +## 2.3 The Example of the three tank System + +We consider a three tank system made up with three cylindrical tanks of cross-sectional area S, connected to each other by means of cylindrical pipes of section Sn, and two pumps P1 and P2 that supply tanks T1 and T2. These three tanks are also connected to a central reservoir through pipes (see Fig. 2). + +The model is given by: + +$$ +\dot{x}_1 = -Q_{10}(x_1) - Q_{13}(x_1, x_3) + u_1 \quad (15) +$$ + +$$ +\dot{x}_2 = -Q_{20}(x_2) + Q_{32}(x_2, x_3) + u_2 +$$ + +$$ +\dot{x}_3 = Q_{13}(x_1, x_3) - Q_{32}(x_2, x_3) - Q_{30}(x_3) \quad (17) +$$ + +where the state variables $x_i$, $i = 1, 2, 3$ represent the water level of each tank, $Q_{i0}$, $i = 1, 2, 3$ the outflow between each tank and the central reservoir, $Q_{13}$ is the outflow between tanks $T_1$ and $T_3$ and $Q_{32}$ the outflow between tanks $T_3$ and $T_2$, $u_1$ and $u_2$ are the incoming flows by unit of surface of each pump. + +We assume the following inequalities to avoid singularities¹: + +$$ +x_1 > x_3 > x_2. +$$ + +We consider that the valves connecting tanks $T_1$ and $T_3$ +with the central reservoir are closed, i.e. $Q_{10} \equiv 0$ and +$Q_{30} \equiv 0$. The expressions of $Q_{13}$, $Q_{32}$ and $Q_{20}$ are given +by: + +$$ +Q_{13}(x_1, x_3) = a_{z1} \sqrt{2g(x_1 - x_3)} \quad (18) +$$ + +$$ +Q_{20}(x_2) = a_{z2} \sqrt{2g(x_2)} +$$ + +$$ +Q_{32}(x_2, x_3) = a_{z3} \sqrt{2g(x_3 - x_2)} \quad (20) +$$ + +¹ According to the *Remark 1*, the point $\bar{x} \in \mathcal{X}$ s.t. $x_1 = x_2 = x_3$ is an equilibrium point which is not first order controllable, then it is a point of intrinsic flatness singularity. +---PAGE_BREAK--- + +Fig. 2. *Three Tank System*, Source: (Noura et al., 2009) + +where $a_{zr}$, $r = 1, 2, 3$, is the flow coefficient and $g$ the gravitational force. Each tank $T_i$ is equipped with a sensor $\mathbf{S}_i$ to measure its level $x_i$. Hence, the measured output is: + +$$y^s = (y_1^s, y_2^s, y_3^s)^T = (x_1^s, x_2^s, x_3^s)^T \quad (21)$$ + +The system (15)-(16)-(17) is flat with $z = (x_1, x_3)^T = (z_1, z_2)^T$ as flat output. The measured flat output is then given by $z^s = (y_1^s, y_3^s)^T = (z_1^s, z_2^s)^T$. In order to construct the vector of residues, using (4) and (5), we set: + +$$\begin{aligned} +y_1^z &= z_1^s \\ +y_2^z &= z_2^s - \frac{1}{2g} \left( \frac{a_{z1} \sqrt{2g(z_1^s - z_2^s) - \dot{z}_2^s}}{a_{z3}} \right)^2 \\ +y_3^z &= z_2^s \\ +u_1^z &= \dot{z}_1^s + a_{z1} \sqrt{2g(z_1^s - z_2^s)} \\ +u_2^z &= \dot{y}_2^z - a_{z3} \sqrt{2g(z_2^s - y_2^z)} + a_{z2} \sqrt{2gy_2^z}. +\end{aligned}$$ + +According to (7), the vector of residues, associated to $z^s$, is then given by: + +$$r = \begin{pmatrix} R_{S_1} \\ R_{S_2} \\ R_{S_3} \\ R_{A_1} \\ R_{A_2} \end{pmatrix} = \begin{pmatrix} y_1^s \\ y_2^s \\ y_3^s \\ u_1 \\ u_2 \end{pmatrix} - \begin{pmatrix} y_1^z \\ y_2^z \\ y_3^z \\ u_1^z \\ u_2^z \end{pmatrix}. \quad (22)$$ + +However, residues $R_{S_1}$ and $R_{S_3}$ are identically zero: + +$$\begin{aligned} +R_{S_1} &= y_1^s - y_1^z = z_1^s - z_1^s = 0 \\ +R_{S_3} &= y_3^s - y_3^z = z_2^s - z_2^s = 0 +\end{aligned} \quad (23)$$ + +hence, according to (10), the vector $r$ is truncated to: + +$$r_\tau = (R_{S_2}, R_{A_1}, R_{A_2})^T = (r_{\tau_1}, r_{\tau_2}, r_{\tau_3})^T. \quad (24)$$ + +Therefore, the signature matrix $\mathbf{S}$, associated to $z^s$, is constructed as follows: + +- All the residues in (24) depend on the measurement of $z^s = (y_1^s, y_3^s)^T$ then the first and the third columns of the signature matrix contain only ones: + +$$\sigma_{i,1} = \sigma_{i,3} = 1, \forall i = 1, 2, 3$$ + +- Only residue $r_{\tau_1}$ depends on $y_2^s$ and its successive derivatives, then the second column will be such that: + +$$\sigma_{1,2} = 1 \text{ and } \sigma_{i,2} = 0, i = 2, 3$$ + +- Since $r_{\tau_2}$ depends only on $u_1$ and $r_{\tau_3}$ depends only on $u_2$, then column 4 and column 5 of $\mathbf{S}$ are such that: + +$$\sigma_{2,4} = 1 \text{ and } \sigma_{i,4} = 0 \forall i = 1, \dots, 3, i \neq 2$$ + +and + +$\sigma_{3,5} = 1$ and $\sigma_{i,5} = 0 \forall i = 1, \dots, 3, i \neq 3$ + +respectively. + +Hence, the signature matrix, associated to $r_\tau$, is given by: + +$$\mathbf{S} = \begin{pmatrix} 1 & 1 & 1 & 0 & 0 \\ 1 & 0 & 1 & 1 & 0 \\ 1 & 0 & 1 & 0 & 1 \end{pmatrix}. \quad (25)$$ + +According to definition 4, all faults on the three tank system's sensors and actuators are detectable. Since fault alarm signatures $\Sigma_2$, $\Sigma_4$ and $\Sigma_5$ are distinct, then, according to definition 5, faults on sensor $\mathbf{S}_2$ and actuators $\mathbf{A}_1$ and $\mathbf{A}_2$ are isolable. This reflects the fact that if, at some point during system operation, a fault alarm is launched with the signature $\Sigma_2$ then we conclude that the sensor $\mathbf{S}_2$ is faulty. However, if we obtain a signature like $\Sigma_1$, the fault could be on the sensor $\mathbf{S}_1$ or $\mathbf{S}_3$, since signatures $\Sigma_1$ and $\Sigma_3$ are identical. Then, a fault on $\mathbf{S}_1$ or $\mathbf{S}_3$ cannot be isolated. To conclude, this example shows that the isolability property is strongly conditioned by the dependence of the flat output with respect to the measured variables. This motivates the study of the choice of flat outputs of the next section. + +**Remark 2.** In Nagy et al. (2009), it has been shown that system (15)-(16)-(17) is observable through $x_1$ only and that $x_2$ and $x_3$ can be estimated using $x_1$ given the measurements of $u_1$ and $u_2$, leading to different isolability results. The reader may refer to this article for more details. Note that, here, the measurements of $u_1$ and $u_2$ are not necessary to guarantee the $x_2$-isolability. + +### 3. FLAT OUTPUT SELECTION + +In order to get more isolability on systems sensor and actuator, the authors in Martínez-Torres et al. (2014) propose to increase the number of residues by using several flat outputs. These flat outputs must be *independent* in the sense that when we use them together we gain more isolability of faults. In this section, we propose a characterization of the relation between different flat outputs using a so-called *augmented signature matrix*. This characterization leads to a decision concerning the choice of flat outputs that are useful for the isolability. + +According to definition 5, the number $\mu$ of isolated faults by a flat output $z$ is equal to the number of distinct signatures $\Sigma_k$ of its signature matrix. Then, in order to get more isolability of faults, we need to increase the number of distinct signatures. This is possible when different projections of the system's output $y$ are available that are flat outputs. For this purpose, we introduce definitions 6 and 7. + +In the following, we denote the $i^{th}$ element of the set of q flat output vectors $Z_i$ by $Z_i = (z_{i1}, \dots, z_{im})^T$. + +*Definition 6.* (Augmented signature matrix). Let $Z_1, \dots, Z_q$ be q different flat output vectors of the flat system (1), such that $Z_i = \text{pr}_{\mathbb{R}^m}(y)$. The *augmented signature matrix* $\tilde{\mathbf{S}}$ associated to $Z_1, \dots, Z_q$ is defined by: + +$$\tilde{\mathbf{S}} = \begin{pmatrix} \mathbf{S}_1 \\ \mathbf{S}_2 \\ \vdots \\ \mathbf{S}_q \end{pmatrix} \quad (26)$$ +---PAGE_BREAK--- + +where $\mathbf{S}_i$ is the signature matrix associated to the flat output vector $Z_i$. + +The choice of flat output vectors is not arbitrary. They must be independent in the sense given by the following definition: + +*Definition 7.* (Independence). Let $\tilde{\mathbf{S}}$ be the augmented signature matrix associated to $Z_1$ and $Z_2$: + +$$ \tilde{\mathbf{S}} = \begin{pmatrix} \mathbf{S}_1 \\ \mathbf{S}_2 \end{pmatrix}, $$ + +$\mu_i, i = 1, 2$, the number of distinct signatures of the matrix $\mathbf{S}_i$ and $\tilde{\mu}$ the number of distinct signatures of the augmented matrix $\tilde{\mathbf{S}}$. We say that $Z_1$ and $Z_2$ are *independent* if, and only if + +$$ \tilde{\mu} > \mu_1 \quad \text{and} \quad \tilde{\mu} > \mu_2. \tag{27} $$ + +Definition 7 means that two flat outputs are independent if, by using them together, the number of distinct signatures increases which corresponds to the number of isolated faults. If the condition (27) is not satisfied then the combination of $Z_1$ and $Z_2$ is not helpful for the isolability, and we have to find another combination by calculating more flat outputs. To conclude, the condition of full isolability is given by the following proposition: + +*Proposition 2.* Let $Z_1, \dots, Z_q$ be $q$ different flat output vectors of the system (1). A full isolability of faults on sensors and actuators is achieved if the augmented matrix + +$$ \tilde{\mathbf{S}} = \begin{pmatrix} \mathbf{S}_1 \\ \mathbf{S}_2 \\ \vdots \\ \mathbf{S}_q \end{pmatrix} $$ + +has $p+m$ distinct signatures, i.e. $\tilde{\mu} = p+m$. + +# 4. APPLICATION TO THE THREE TANK SYSTEM + +Back to the three tank system presented in section 2.3, we denote by $Z_1$ the flat output vector $Z_1 = (z_{11}, z_{12})^T = (x_1, x_3)^T$. The corresponding vector of residues is given by (24). We recall the signature matrix associated to $Z_1$, and we denote it by $\mathbf{S}_1$: + +$$ \mathbf{S}_1 = \begin{pmatrix} 1 & 1 & 1 & 0 & 0 \\ 1 & 0 & 1 & 1 & 0 \\ 1 & 0 & 1 & 0 & 1 \end{pmatrix} \tag{28} $$ + +We also recall that, according to definition 5, faults on sensors $\mathbf{S}_1$ and $\mathbf{S}_3$ cannot be isolated. The number of distinct signatures of $\mathbf{S}_1$ is $\mu_1 = 3$. + +In order to increase the number of isolable faults, we consider $Z_2 = (z_{21}, z_{22})^T = (x_2, x_3)^T$ another flat output vector of the three tank system. It is measured by sensors $\mathbf{S}_2$ and $\mathbf{S}_3$, i.e. $Z_2^s = (z_{21}^s, z_{22}^s)^T = (y_2^s, y_3^s)^T$. To construct the vector of residues associated to $Z_2^s$ and its signature matrix, we set, using (4) and (5): + +$$ +\begin{align*} +y_1^{Z_2} &= z_{22}^s + \frac{1}{2g} \left( a_{z3} \sqrt{2g(z_{22}^s - z_{21}^s)} + \dot{z}_{22}^s \right)^2 \\ +y_2^{Z_2} &= z_{21}^s \\ +y_3^{Z_2} &= z_{22}^s \\ +u_1^{Z_2} &= \dot{z}_{22}^s + a_{z1} \sqrt{2g(z_{21}^s - z_{22}^s)} \\ +u_2^{Z_2} &= \dot{y}_{2}^{Z_2} - a_{z3} \sqrt{2g(z_{22}^s - y_{2}^{Z_2})} + a_{z2} \sqrt{2gy_{2}^{Z_2}}. +\end{align*} +$$ + +Therefore, as shown for the flat output $Z_1$, residues $R_{S_2}^{Z_2}$ and $R_{S_3}^{Z_2}$ are identically zero and the truncated vector of residues (10) reads: + +$$ r_{\tau}^{Z_2} = \begin{pmatrix} R_{\mathbf{S}_1}^{Z_2} \\ R_{A_1}^{Z_2} \\ R_{A_2}^{Z_2} \end{pmatrix} = \begin{pmatrix} y_2^s \\ u_1 \\ u_2 \end{pmatrix} - \begin{pmatrix} y_2^{Z_2} \\ u_1^{Z_2} \\ u_2^{Z_2} \end{pmatrix}. \tag{29} $$ + +Hence, the signature matrix associated to $Z_2$ is given by: + +$$ S_2 = \begin{pmatrix} 1 & 1 & 1 & 0 & 0 \\ 0 & 1 & 1 & 1 & 0 \\ 0 & 1 & 1 & 0 & 1 \end{pmatrix}. \tag{30} $$ + +Signatures $\Sigma_1, \Sigma_4$ and $\Sigma_5$ in the matrix $\mathbf{S}_2$ are distinct, then, according to definition 5, faults on sensor $\mathbf{S}_1$ and actuators $\mathbf{A}_1$ and $\mathbf{A}_2$ are isolable by the flat output $Z_2$. Moreover, the number of distinct signatures of $\mathbf{S}_2$ is $\mu_2 = 3$. However, since signatures $\Sigma_2$ and $\Sigma_3$ are identical, then faults on sensors $\mathbf{S}_2$ and $\mathbf{S}_3$ cannot be isolated. + +It remains to be verified whether the two flat outputs $Z_1$ and $Z_2$ are independent. + +The augmented signature matrix associated to $Z_1$ and $Z_2$ is given by: + +$$ \tilde{\mathbf{S}} = \begin{pmatrix} 1 & 1 & 1 & 0 & 0 \\ 1 & 0 & 1 & 1 & 0 \\ 1 & 0 & 1 & 0 & 1 \\ 1 & 1 & 1 & 0 & 0 \\ 0 & 1 & 1 & 1 & 0 \\ 0 & 1 & 1 & 0 & 1 \end{pmatrix}. \tag{31} $$ + +The number of distinct fault alarm signatures of $\tilde{\mathbf{S}}$ is $\tilde{\mu} = 5$, and we have + +$$ \tilde{\mu} > \mu_1 \quad \text{and} \quad \tilde{\mu} > \mu_2. $$ + +Then, according to definition 6, the flat output vectors $Z_1$ and $Z_2$ are independent. Moreover, since $\tilde{\mu} = p+m$, then flat output vectors $Z_1$ and $Z_2$ ensure full isolability of faults on the three tank system. + +Simulation results that confirm the effectiveness of this approach can be found in Martínez-Torres et al. (2013). + +# 5. CONCLUSION + +The current paper introduces a novel and rigorous definition of the isolability of faults affecting a system's sensors and actuators, using the flatness-based FDI approach. The described condition of isolability provides an efficient way to select flat outputs that are useful for fault isolation. Our results are tested and validated using the three tank system. Future work should focus on the development of a method that calculates independent flat outputs directly. +---PAGE_BREAK--- + +REFERENCES + +Chen, J., Li, H., Sheng, D., and Li, W. (2015). A hybrid data-driven modeling method on sensor condition monitoring and fault diagnosis for power plants. *International Journal of Electrical Power & Energy Systems*, 71, 274-284. + +Diversi, R., Simani, S., and Soverini, U. (2002). Robust residual generation for dynamic processes using decoupling technique. In *Proceedings of the International Conference on Control Applications*, volume 2, 1270–1275. IEEE. + +Flies, M., Lévine, J., Martin, P., and Rouchon, P. (1999). A lie-backlund approach to equivalence and flatness of nonlinear systems. *IEEE Transactions on automatic control*, 44(5), 922–937. + +Izadian, A. and Khayyer, P. (2010). Application of kalman filters in model-based fault diagnosis of a dc-dc boost converter. In *IECON 2010-36th Annual Conference on IEEE Industrial Electronics Society*, 369–372. IEEE. + +Kaminski, Y.J., Lévine, J., and Ollivier, F. (2018). Intrinsic and apparent singularities in differentially flat systems, and application to global motion planning. *Systems & Control Letters*, 113, 117–124. + +Kóscielny, J.M., Syfert, M., Rostek, K., and Sztyber, A. (2016). Fault isolability with different forms of the faults-symptoms relation. *International Journal of Applied Mathematics and Computer Science*, 26(4), 815–826. + +Levine, J. (2009). *Analysis and control of nonlinear systems: A flatness-based approach*. Springer Science & Business Media. + +Martínez-Torres, C., Lavigne, L., Cazaurang, F., Alcorta-García, E., and Díaz-Romero, D.A. (2013). Fault detection and isolation on a three tank system using differential flatness. In *2013 European Control Conference (ECC)*, 2433–2438. IEEE. + +Martínez-Torres, C., Lavigne, L., Cazaurang, F., Alcorta-García, E., and Díaz-Romero, D.A. (2014). Flatness-based fault tolerant control. *Dyna*, 81(188), 131–138. + +Nagy, A.M., Marx, B., Mourot, G., Schutz, G., and Ragot, J. (2009). State estimation of the three-tank system using a multiple model. In *Proceedings of the 48th IEEE Conference on Decision and Control (CDC) held jointly with 2009 28th Chinese Control Conference*, 7795–7800. IEEE. + +Noura, H., Theilliol, D., Ponsart, J.C., and Chamseddine, A. (2009). *Fault-tolerant control systems: Design and practical applications*. Springer Science & Business Media. + +Staroswiecki, M. and Comtet-Varga, G. (2001). Analytical redundancy relations for fault detection and isolation in algebraic dynamic systems. *Automatica*, 37(5), 687–699. + +Suryawan, F., De Doná, J., and Seron, M. (2010). Fault detection, isolation, and recovery using spline tools and differential flatness with application to a magnetic levitation system. In *2010 Conference on Control and Fault-Tolerant Systems (SysTol)*, 293–298. IEEE. + +Thirumarimurugan, M., Bagyalakshmi, N., and Paarkavi, P. (2016). Comparison of fault detection and isolation methods: A review. In *2016 10th International Conference on Intelligent Systems and Control (ISCO)*, 1–6. IEEE. + +Tousi, M. and Khorasani, K. (2011). Robust observer-based fault diagnosis for an unmanned aerial vehicle. In *2011 IEEE International Systems Conference*, 428–434. IEEE. + +Zhou, Y., Xu, G., and Zhang, Q. (2014). Overview of fault detection and identification for non-linear dynamic systems. In *2014 IEEE International Conference on Information and Automation (ICIA)*, 1040–1045. IEEE. \ No newline at end of file diff --git a/samples_new/texts_merged/4239587.md b/samples_new/texts_merged/4239587.md new file mode 100644 index 0000000000000000000000000000000000000000..593a83e6ad0c1e10a20d61b7dc2322189442cf9b --- /dev/null +++ b/samples_new/texts_merged/4239587.md @@ -0,0 +1,872 @@ + +---PAGE_BREAK--- + +RECOVERING CONDUCTIVITY AT THE BOUNDARY IN +THREE-DIMENSIONAL ELECTRICAL IMPEDANCE +TOMOGRAPHY + +GEN NAKAMURA + +Graduate school of Science, Hokkaido University +Sapporo 060-0810, Japan + +PÄIVI RONKANEN + +Department of Physics and Mathematics +University of Eastern Finland +FIN-70211 Kuopio, Finland + +SAMULI SILTANEN + +Department of Mathematics and Statistics +FI-00014 University of Helsinki, Finland + +KAZUMI TANUMA + +Department of Mathematics, Graduate School of Engineering +Gunma University +Kiryu 376-8515, Japan + +(Communicated by Matti Lassas) + +**ABSTRACT.** The aim of electrical impedance tomography (EIT) is to reconstruct the conductivity values inside a conductive object from electric measurements performed at the boundary of the object. EIT has applications in medical imaging, nondestructive testing, geological remote sensing and subsurface monitoring. Recovering the conductivity and its normal derivative at the boundary is a preliminary step in many EIT algorithms; Nakamura and Tanuma introduced formulae for recovering them approximately from localized voltage-to-current measurements in [Recent Development in Theories & Numerics, International Conference on Inverse Problems 2003]. The present study extends that work both theoretically and computationally. As a theoretical contribution, reconstruction formulas are proved in a more general setting. On the computational side, numerical implementation of the reconstruction formulae is presented in three-dimensional cylindrical geometry. These experiments, based on simulated noisy EIT data, suggest that the conductivity at the boundary can be recovered with reasonable accuracy using practically realizable measurements. Further, the normal derivative of the conductivity can also be recovered in a similar fashion if measurements from a homogeneous conductor (dummy load) are available for use in a calibration step. + +2000 Mathematics Subject Classification. Primary: 35R30; Secondary: 65N21. +Key words and phrases. Electrical impedance tomography, boundary determination, localized Dirichlet to Neumann map, inverse conductivity problem. +---PAGE_BREAK--- + +1. **Introduction.** The aim of Electrical Impedance Tomography (EIT) is imaging the conductivity distribution inside an unknown body from electrical measurements at the boundary. Applications of EIT include medical imaging, nondestructive testing and subsurface monitoring, see [6, 7, 13]. We introduce a new practical solution method for the subproblem of recovering the conductivity and its normal derivative at the boundary of a three-dimensional target from localized measurements. This is required in several EIT algorithms as the first step before full reconstruction. + +Assume given a bounded domain $\Omega \subset \mathbb{R}^3$ with Lipschitz boundary $\partial\Omega$ and a real-valued conductivity $\gamma \in L^\infty(\Omega)$ satisfying $\gamma(x) \ge c > 0$ almost everywhere in $\Omega$. We consider applying a voltage potential $f$ on the boundary and solving the Dirichlet problem + +$$ (1) \qquad \left\{ \begin{array}{ll} \nabla \cdot (\gamma \nabla u) = 0 & \text{in } \Omega, \\ u = f & \text{on } \partial\Omega, \end{array} \right. $$ + +where $u = u(x)$ is electric potential. The resulting distribution of current through the boundary is + +$$ (2) \qquad \Lambda_{\gamma} f = \gamma \frac{\partial u}{\partial \nu} |_{\partial \Omega} $$ + +where $\Lambda_{\gamma}$ is the Dirichlet-to-Neumann (DN) map and $\nu$ is the outward unit normal. The problem is to determine $\gamma$ from the knowledge of $\Lambda_{\gamma}$. This mathematical formulation was introduced by Calderón in [12]. + +Practical measurements are typically done using a finite number of electrodes on the surface of the body, and various data models including electrodes are discussed in [14, 44]. In this study we use the continuum model (1) for simplicity. However, the effect of electrodes is taken into account by considering the maximum frequency of spatial oscillations in Dirichlet data $f$ in (1) that can be approximated with reasonable accuracy using a given number of electrodes. + +The following cylindrical geometry is frequently used in our discussion. Take $\ell > 0$ and $R > 0$ and define + +$$ (3) \qquad \Omega := \{(x_1^2 + x_2^2)^{1/2} < R, |x_3| < \ell\} = \Omega' \times [-\ell, \ell] \subset \mathbb{R}^3, $$ + +where $\Omega' = D(0, R) \subset \mathbb{R}^2$. Denote the lateral boundary surface of $\Omega$ by + +$$ \Gamma := \{(x_1^2 + x_2^2)^{1/2} = R, |x_3| < \ell\} = (\partial\Omega') \times [-\ell, \ell] \subset \partial\Omega. $$ + +Parametrize a neighborhood of $\Gamma$ by boundary normal coordinates $(\tau, s, r)$: + +$$ (4) \qquad x_1 = (R - r) \cos(s/R), \quad x_2 = (R - r) \sin(s/R), \quad x_3 = \tau. $$ + +Then $\Omega$ and $\Gamma$ are given by $0 < r \le R$ and $r = 0$, respectively. + +Given a point $x_0 \in \Gamma$, we wish to recover $\gamma(x_0)$ and $\partial\gamma/\partial\nu(x_0)$ approximately from the (local) knowledge of $\Lambda_\gamma$. Without loss of generality we may put $x_0 = (0, 0, 0)$ in the coordinate system $(\tau, s, r)$. We assume that the conductivity $\gamma$ is once continuously differentiable in a neighborhood of the boundary. Let $\eta(\tau, s)$ be any function in $C_0^1(\Gamma)$, choose a unit vector $(t_1, t_2) \in \mathbb{R}^2$ and define + +$$ (5) \qquad \phi_N(\tau, s) = e^{iN(\tau t_1 + st_2)} \eta(\tau, s), $$ + +$$ (6) \qquad \psi_N(\tau, s) = e^{i\frac{N}{2}(\tau t_1 + st_2)} \eta(\tau, s). $$ + +The following formulas can be derived from Theorem 1 in Section 2: +---PAGE_BREAK--- + +$$ (7) \quad \int_{\mathbb{R}^2} \gamma(\tau, s, 0) \eta(\tau, s)^2 d\tau ds = \lim_{N \to \infty} N^{-1} \langle \Lambda_\gamma \phi_N, \overline{\phi_N} \rangle, $$ + +$$ (8) \quad \int_{\mathbb{R}^2} \frac{\partial \gamma}{\partial \nu}(\tau, s, 0) \eta(\tau, s)^2 d\tau ds = \lim_{N \to \infty} \left[ \begin{aligned} & \left(2 - \frac{1+t_1^2-t_2^2}{2RN}\right) \langle \Lambda_\gamma \phi_N, \overline{\phi_N} \rangle \\ & -4 \langle \Lambda_\gamma \psi_N, \overline{\psi_N} \rangle \end{aligned} \right]. $$ + +Practical implementation of formulas (7) and (8) in dimension two is reported in [40]; according to those results, formula (7) can be used reliably for approximate recovery of the conductivity at the boundary, while practical use of formula (8) seems to require an unrealistic number of electrodes. + +This paper has two goals. The theoretical goal is to prove generalizations of formulas (7) and (8) in a more general setting, where the forthcoming formulae (17) and (18) which generalize (7) and (8) contain geometric information about the boundary $\partial\Omega$. Such information may be used for estimating the shape of the boundary from EIT data, but we do not discuss such possibilities further in this work. Uncertainty in domain shape is a significant source of error in EIT reconstructions [30, 20, 1]. + +The computational goal is to implement (7) and (8) numerically in the three-dimensional cylindrical geometry (3) and study the possibilities of using them in practical EIT. We recover the trace of conductivity approximately at the boundary using formula (7) with a finite value of $N$. The results suggest that the right hand side of (7) converges as $N \to \infty$ quickly enough for the Dirichlet data (5) to remain only mildly oscillatory. It seems that the frequency of those oscillations is low enough for $\phi_N(\tau, s)$ to be represented reasonably accurately using 64 electrodes. + +Practical use of formula (8) to recover the normal derivative of the conductivity seems to be more problematic, as is the case in the two-dimensional situation [40]. The convergence of the right hand side of (8) is too slow for acceptable reconstructions from realistic voltage-to-current measurements. However, our numerical experiments suggest that the difference between the right hand sides of (8) corresponding to a nontrivial conductivity and to a constant conductivity do converge rather quickly as $N \to \infty$, allowing reasonable reconstructions of normal derivative from realistic data after a calibration step. + +Most three-dimensional EIT algorithms for recovering conductivity inside $\Omega$ are iterative methods where the direct problem (1) needs to be solved repeatedly using a numerical algorithm, typically the finite element method (FEM). This is computationally demanding since 3D FEM involves representing the conductivity with a large number of parameters. Out of the few published implementations of 3D EIT we mention the work of Barber, Brown, Metherall and Smallwood [31, 32, 33]; Blue, Goble, Cheney, Isaacson, Newell, Ross and Saulnier [5, 22, 41]; Morucci, Granie, Lei, Chabert and Marsili [34]; Kaipio, Savolainen, P.J. Vauhkonen and M. Vauhkonen [49, 50, 51]; and Wexler [52]. In all these works the reconstruction algorithms need a good initial guess for conductivity inside $\Omega$ in order to convergence to the global minimum; at present $\gamma$ is often assumed to be constant near the boundary. We believe that the knowledge of conductivity $\gamma$ and its normal derivative at the boundary $\partial\Omega$ helps to design better initial guesses for full reconstruction algorithms. + +Non-iterative 3D EIT algorithms have been suggested as well, see [4, 8, 16], and numerical inclusion detection algorithms are presented in [24, 19, 25]. In such +---PAGE_BREAK--- + +methods it may be important to continue the conductivity artificially outside $\Omega$ in a regular fashion; this involves recovering $\gamma$ and $\partial\gamma/\partial\nu$ at the boundary first. + +The mathematical formulation of the inverse conductivity problem was originally given by Calderón, who solved in [12] a linearized version of the problem. Unique determination of piecewise real-analytic conductivities from the DN map was proved for $\mathbb{R}^n$ with $n \ge 2$ in [29]. The possibility of uniquely determining an infinitely smooth conductivity with $n \ge 3$ was shown in [47]. Later, unique determination in the case $n \ge 3$ has been shown for conductivities having $\frac{3}{2}$ derivatives in [42] (see also the refinement [10]), and in [23] for conductivities allowed to have certain conormal singularities on submanifolds. In dimension $n=2$, uniqueness was proven in [46] for radially symmetric conductivities and in [36] for nonsymmetric, twice weakly differentiable conductivities. Generalization to one derivative was provided in [11], and Calderón's original $L^\infty$ question was solved in [3]. + +Previous theoretical results on the recovery of conductivity and its derivatives at the boundary include [2, 28, 29, 35, 36, 48] starting from infinite precision data measured on the whole boundary, and [9, 27, 37, 38, 39] starting from infinite precision data measured on a part of the boundary. The present work is the first numerical boundary reconstruction result in dimension three. + +This paper is organized as follows. In Section 2 we give a proof of formulas (7) and (8) using our main Theorem 1, which in turn is proved in Section 3. In Section 4 we explain how we simulate noisy voltage-to-current data, and in Section 5 we substitute the data to formulas (7) and (8) to study their convergence in practice as $N$ grows. Based on those numerical experiments we introduce and demonstrate a calibrated reconstruction method in Section 6. Finally, we conclude our findings in Section 7. + +**2. Basic theorem and derivation of formulas (7) and (8).** Let $\Omega$ be a bounded domain in $\mathbb{R}^n$ with $n \ge 2$. We assume that the boundary $\partial\Omega$ is Lipschitz and, in addition, locally $C^2$ near a recovery point $x_0 \in \partial\Omega$. Then there exists a $C^2$ diffeomorphism $y = \Psi(x)$ which induces a curvilinear coordinate system $y = (y', y_n) = (y_1, \dots, y_{n-1}, y_n)$ around $x_0$ such that $\Psi(x_0) = 0$ and $\Omega, \partial\Omega$ are given by + +$$ (9) \qquad \Omega = \{y_n > 0\}, \quad \partial\Omega = \{y_n = 0\} $$ + +locally around $y=0$. Let $G = (g_{ij})_{1\le i,j\le n}$ be the metric tensor associated with the diffeomorphism $y = \Psi(x)$, whose components are given by + +$$ (10) \qquad g_{ij} = e_i \cdot e_j. $$ + +Here the natural base related to the curvilinear coordinate system $y$ is formed by + +$$ (11) \qquad e_i = \left[ \frac{\partial x_k}{\partial y_i} \right]_{k \downarrow 1, 2, \dots, n} (i = 1, 2, \dots, n). $$ + +We assume that $y = (y', y_n)$ forms the boundary normal coordinates so that + +$$ (12) \qquad g_{nn} = 1, \quad g_{\alpha n} = g_{n\alpha} = 0 \quad (\alpha = 1, 2, \dots, n-1) $$ + +in a neighborhood of $x_0$ in $\bar{\Omega}$. The contravariant components $g^{ij}$ ($1 \le i,j \le n$) of $G^{-1}$ are defined matrixwise as + +$$ (g^{ij}) = G^{-1}. $$ +---PAGE_BREAK--- + +Furthermore, it is easy to see that + +$$ (13) \qquad g^{ij} = \sum_{k=1}^{n} \frac{\partial y_i}{\partial x_k} \frac{\partial y_j}{\partial x_k}. $$ + +It follows from (12) that in a neighborhood of $x_0$ in $\bar{\Omega}$ we have + +$$ (14) \qquad g^{nn} = 1, \quad g^{\alpha n} = g^{n\alpha} = 0 \quad (\alpha = 1, 2, \dots, n-1). $$ + +Let $d$ and $\delta$ be sufficiently small positive numbers so that the expressions (9) for $\Omega$ and $\partial\Omega$ are valid in a relatively open neighborhood $\mathcal{N}$ of $x_0 \in \partial\Omega$, where + +$$ (15) \qquad \mathcal{N} = \{ |y'| < d, 0 \le y_n < 2\delta \} \subset \bar{\Omega}. $$ + +Let $\phi = \phi(y')$ be a phase function which satisfies the following eikonal equation: + +$$ (16) \qquad \sum_{i,j=1}^{n-1} g^{ij}(y', 0) \frac{\partial \phi}{\partial y_i} \frac{\partial \phi}{\partial y_j} = 1. $$ + +We remark that equation (16) can be solved in $\{|y'| < d\}$ by the method of characteristic curves ([17]). When $g^{ij}(y', 0) = \delta_{ij}$ ($i, j = 1, 2, \dots, n-1$), we immediately have $\phi = y' \cdot t'$, where $t' = (t_1, \dots, t_{n-1})$ is any unit vector in $\mathbb{R}^{n-1}$. We shall use this specific phase function below in our numerical experiments. + +**Theorem 1.** Assume that $\gamma \in L^\infty(\Omega)$ is strictly positive: $\gamma \ge c > 0$ (a.e. $x \in \Omega$). Also, suppose that $\gamma = \gamma(y', y_n)$, as a function of $y_n \in [0, 2\delta]$ with values in the space of $L^2(\{|y'| \le d\})$, is right continuous at $y_n = 0$, and that $\nabla_{y'}\gamma(y', 0) \in L^2(\{|y'| \le d\})$. Let $\eta(y')$ be any function in $C_0^1(\mathbb{R}^{n-1})$ compactly supported in $\{|y'| \le d\}$ and define the Dirichlet data $f_N$ and $g_N$ for $N = 1, 2, 3, \dots$ by + +$$ f_N = e^{i N \phi(y')} \eta(y') \Big|_{y=\Psi(x), x \in \partial \Omega}, \qquad g_N = e^{i \frac{N}{2} \phi(y')} \eta(y') \Big|_{y=\Psi(x), x \in \partial \Omega}. $$ + +Then (i) and (ii) below hold: + +(i) We have the equality + +$$ (17) \qquad \lim_{N \to \infty} N^{-1} \langle \Lambda_\gamma f_N, \overline{f_N} \rangle = \int_{\mathbb{R}^{n-1}} \gamma(y', 0) \frac{\eta(y')^2}{\sqrt{\det(g^{ij}(y', 0))}} dy'. $$ + +(ii) Suppose that $\gamma = \gamma(y', y_n)$, as a function of $y_n \in [0, 2\delta]$ with values in the space of $L^2(\{|y'| \le d\})$, is right differentiable at $y_n = 0$, and that $\nabla_{y'}\gamma(y', 0) \in L^2(\{|y'| \le d\})$. Then + +$$ \lim_{N \to \infty} \left[ 4\langle \Lambda_\gamma g_N, \overline{g_N} \rangle - 2\langle \Lambda_\gamma f_N, \overline{f_N} \rangle \right] = \int_{\mathbb{R}^{n-1}} \frac{\partial \gamma}{\partial y_n}(y', 0) \frac{\eta(y')^2}{\sqrt{\det(g^{ij}(y', 0))}} dy' $$ + +$$ (18) + \frac{1}{2} \int_{\mathbb{R}^{n-1}} \gamma(y', 0) \left. \frac{\partial}{\partial y_n} \frac{\sum_{i,j=1}^{n-1} g^{ij} \phi_{y_i} \phi_{y_j} + 1}{\sqrt{\det(g^{ij})}} \right|_{y_n=0} = 0. $$ + +*Derivation of formulas (7) and (8) from the theorem 1.* + +Putting $n=3$ and $(y_1, y_2, y_3) = (y', y_3) = (\tau, s, r)$, from (11), (4) and (10) we get + +$$ G = (g_{ij}) = \begin{bmatrix} 1 & 0 & 0 \\ 0 & (1-r/R)^2 & 0 \\ 0 & 0 & 1 \end{bmatrix}, \qquad (g^{ij}) = G^{-1} = \begin{bmatrix} 1 & 0 & 0 \\ 0 & (1-r/R)^{-2} & 0 \\ 0 & 0 & 1 \end{bmatrix} $$ + +and $\det(g^{ij}) = (1 - r/R)^{-2}$. Thus we see that $(\tau, s, r)$ form boundary normal coordinates. Moreover, since $g^{ij}(y', 0) = \delta_{ij}$ ($i, j = 1, 2$), as a solution to eikonal +---PAGE_BREAK--- + +equation (16) we may choose $\phi = y' \cdot t' = \tau t_1 + s t_2$, where $t' = (t_1, t_2)$ is any unit vector in $\mathbb{R}^2$. Then formula (7) is an immediate consequence of formula (17). + +Noting that $-\frac{\partial \gamma}{\partial r}(\tau, s, 0) = \frac{\partial \gamma}{\partial \nu}(\tau, s, 0)$, where $\nu$ is the outward unit normal vector to the boundary $\partial\Omega$, we obtain + +$$ +\int_{\mathbb{R}^2} \frac{\partial \gamma}{\partial \nu}(\tau, s, 0) \eta(\tau, s)^2 d\tau ds = \lim_{N \to \infty} \left[ 2\langle \Lambda_\gamma \phi_N, \overline{\phi_N} \rangle - 4\langle \Lambda_\gamma \psi_N, \overline{\psi_N} \rangle \right] \\ +- \frac{1+t_1^2-t_2^2}{2R} \int_{\mathbb{R}^2} \gamma(\tau, s, 0) \eta(\tau, s)^2 d\tau ds, +$$ + +which combined with (7) yields (8). + +3. **Proof of Theorem 1.** Let $\zeta(y_n) \in C^\infty([0, \infty))$ satisfy $0 \le \zeta \le 1$, $\zeta(y_n) = 1$ for $0 \le y_n \le \delta$, and $\zeta(y_n) = 0$ for $2\delta \le y_n$. Then from the weak formulation of $\Lambda_\gamma$ it follows that + +$$ +(19) \qquad \langle \Lambda_{\gamma} f_{N}, \overline{f}_{N} \rangle = \int_{\Omega} \gamma \nabla u_{N} \cdot \nabla (\zeta \overline{F}_{N}) \, dx, +$$ + +where $u_N \in H^1(\Omega)$ is the solution to + +$$ +(20) \qquad \nabla \cdot (\gamma \nabla u_N) = 0 \quad \text{in } \Omega, \quad u_N|_{\partial\Omega} = f_N, +$$ + +and $F_N(x)$ is an $H^1(\Omega)$ extension of $f_N$, for which we take + +$$ +(21) \qquad F_N(x) = e^{iN\phi(y')} e^{-Ny_n} \eta(y') \Big|_{y=\Psi(x)}. +$$ + +Put $r_N = u_N - \zeta F_N$. Then we get from (19) + +$$ +\langle \Lambda_{\gamma} f_{N}, \bar{f}_{N} \rangle = \int_{\Omega} \gamma \nabla(\zeta F_{N}) \cdot \nabla(\zeta \bar{F}_{N}) dx + \int_{\Omega} \gamma \nabla r_{N} \cdot \nabla(\zeta \bar{F}_{N}) dx = I_{1} + I_{2}. +$$ + +It suffices to show that + +$$ +(22) \quad \lim_{N \to \infty} N^{-1} I_1 = \int_{\mathbb{R}^{n-1}} \gamma(y', 0) \frac{\eta(y')^2}{\sqrt{\det(g^{ij}(y', 0))}} dy', +$$ + +$$ +(23) \quad \lim_{N \to \infty} N^{-1} I_2 = 0. +$$ + +We denote the Jacobian of the diffeomorphism $y = \Psi(x)$ by $\nabla\Psi$, which is given by + +$$ +(24) \qquad \nabla\Psi = \left( \frac{\partial y_i}{\partial x_j} \right)_{i,j=1,2,\dots,n}. +$$ + +Then $\nabla = \nabla_x = {}^t\nabla\Psi\nabla_y$, where the superscript $t$ denotes transposition. By the change of the coordinate systems between $x$ and $y$, integral $I_1$ becomes + +$$ +I_1 = \int_N \gamma(y) {}^t\nabla\Psi\nabla_y(\zeta F_N) \cdot {}^t\nabla\Psi\nabla_y(\zeta F_N) |\det\nabla\Psi|^{-1} dy +$$ + +$$ +(25) \qquad = \int_N \gamma(y) \left( \frac{\nabla\Psi{}^t\nabla\Psi}{|\det\nabla\Psi|} \nabla_y(\zeta F_N) \right) \cdot \nabla_y(\zeta F_N) dy, +$$ + +where $\mathcal{N}$ is a relatively open neighborhood of $x_0 \in \partial\Omega$ defined by (15). Equations (13) and (24) imply that $|\det\nabla\Psi|^{-1}\nabla\Psi{}^t\nabla\Psi = (\det(g^{ij}))^{-1/2}(g^{ij})$. Henceforth, we use the $n \times n$ symmetric matrix $\tilde{\gamma}$ + +$$ +(26) \qquad \tilde{\gamma}(y) = \gamma(y) (\det(g^{ij}))^{-1/2} (g^{ij}). +$$ +---PAGE_BREAK--- + +Then $I_1 = \int_N \tilde{\gamma}(y) \nabla_y (\zeta F_N) \cdot \nabla_y (\zeta \overline{F_N}) dy$. Since $\zeta = 1$ for $0 \le y_n \le \delta$, it is convenient to put $D = \{|y'| \le d, 0 \le y_n \le \delta\}, D' = \{|y'| \le d, \delta \le y_n\}$ and decompose $I_1$ as + +$$ (27) \qquad I_1 = \int_D \tilde{\gamma}(y) \nabla_y F_N \cdot \nabla_y \overline{F_N} dy + \int_{D'} \tilde{\gamma}(y) \nabla_y (\zeta F_N) \cdot \nabla_y (\zeta \overline{F_N}) dy = I_3 + I_4. $$ + +Now (21) implies the following two equations: + +$$ (28) \qquad \nabla_y F_N = \left( N \begin{bmatrix} i \nabla_{y'} \phi \\ -1 \end{bmatrix} \eta(y') + \begin{bmatrix} \nabla_{y'} \eta \\ 0 \end{bmatrix} \right) e^{i N \phi(y')} e^{-N y_n}, $$ + +$$ (29) \qquad \nabla_y(\zeta F_N) = \left( N \begin{bmatrix} i \nabla_{y'} \phi \\ -1 \end{bmatrix} \zeta(y_n)\eta(y') + \nabla_y(\zeta\eta) \right) e^{i N\phi(y')} e^{-Ny_n}. $$ + +Thus we see that + +$$ I_3 = N^2 \int_D \tilde{\gamma}(y) \begin{bmatrix} i\nabla_{y'}\phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} -i\nabla_{y'}\phi \\ -1 \end{bmatrix} \eta(y')^2 e^{-2Ny_n} dy $$ + +$$ (30) \qquad + \int_D \tilde{\gamma}(y) \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} \cdot \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} e^{-2Ny_n} dy. $$ + +In deriving (30), we have used the fact that the term of the order $O(N)$ in the integrand of $I_3$ vanishes, because + +$$ \tilde{\gamma}(y) \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} \cdot \begin{bmatrix} -i\nabla_{y'}\phi \\ -1 \end{bmatrix} + \tilde{\gamma}(y) \begin{bmatrix} i\nabla_{y'}\phi \\ -1 \end{bmatrix} \cdot \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} = \tilde{\gamma}(y) \begin{bmatrix} \nabla_{y'}\eta \\ 0 \end{bmatrix} \cdot \begin{bmatrix} 0 \\ -2 \end{bmatrix} = 0, $$ + +the last equality of which follows from (14) and (26). + +After the scaling transformation $z_n = N y_n$ we get for large $N$ + +$$ I_3 = N \int_0^{N\delta} \left( \int_{|y'| 0$ is a constant used to tune the noise amplitude. + +5. **Numerical experiments.** In this section we test the reconstruction formulas (7) and (8) numerically with a sequence of three-dimensional conductivity distributions with increasing complexity. As difficulties arise, we design corrective steps to +---PAGE_BREAK--- + +overcome them. This process leads to a novel noise-robust reconstruction algorithm +that is presented in detail in Section 6. + +For a given conductivity $\gamma$ we define + +$$ (61) \qquad \tilde{g}_N := \frac{1}{N} \int_{\mathbb{R}^2} \overline{\phi_N} \Lambda_\gamma \phi_N d\tau ds. $$ + +As the surface measure on the lateral boundary $\Gamma$ is $d\tau ds$, by formula (7) we have + +$$ \tilde{g}_N \approx \int_{\mathbb{R}^2} \gamma(\tau, s, 0) \eta(\tau, s)^2 d\tau ds . $$ + +Furthermore, set + +$$ (62) \qquad \tilde{h}_N := \left(2 + \frac{t_2^2 - t_1^2 - 1}{2NR}\right) \int_{\mathbb{R}^2} \overline{\phi_N} \Lambda_\gamma \phi_N d\tau ds - 4 \int_{\mathbb{R}^2} \overline{\psi_N} \Lambda_\gamma \psi_N d\tau ds. $$ + +for any unit vector $(t_1, t_2)$. Then by formula (8) we have + +$$ \tilde{h}_N \approx \int_{\mathbb{R}^2} \frac{\partial \gamma}{\partial \nu}(\tau, s, 0) \eta(\tau, s)^2 d\tau ds. $$ + +We work in cylindrical geometry (3) with $R = 1$ and $h = 1.7671$. We use standard deviation $\sigma = 0.0001$ in (60), giving relative noise level 0.01 %. + +**5.1. Homogeneous conductivity.** Our first experiment uses simply the homogeneous conductivity distribution $\gamma_0 \equiv 1$. We substitute $\Lambda_{\gamma_0}$ to formula (7) and call the result $\tilde{g}_N^{(0)}$. Then we have $\tilde{g}_N^{(0)} \to 1$ as $N$ grows, and we can study numerically the speed of convergence using various values of the related parameters. + +Let us first get an idea how large $N$ is practically useful. Figure 3 shows the Dirichlet data for several values of $N$. Apparently there is hope of representing the data with $N = 20$ with 64 electrodes in a 8 × 8 configuration, but the data with $N = 50$ seems to need way too many electrodes to be practically feasible. This rough derivation is based on the simple idea that each minimum and maximum of the Dirichlet data needs to be evaluated on at least one electrode. Consequently we will restrict our experiments to $0 < N \le 20$. + +Next we examine the convergence rate $\tilde{g}_N^{(0)} \to 1$ as $N$ grows. Figure 4 shows $\tilde{g}_N^{(0)}$ as function of $N$ computed with finite element mesh with varying numbers of elements. We conclude that the mesh comprising 16285 nodes gives acceptable accuracy in the range $14 \le N \le 20$, and we will use that mesh in the sequel. + +We study the effect of cut-off function on speed of convergence by choosing different values for $\epsilon_1$ and $\epsilon_2$ in (57) and (58), respectively. Figure 5 illustrates that using a wider cut-off function leads to faster convergence. Thus there is a trade-off between (a) more accurate reconstruction using a narrow cut-off function that better approximates Dirac's delta, and (b) higher rate of convergence. + +We proceed to test the reconstruction of the normal derivative. We substitute $\Lambda_{\gamma_0}$ to formula (8) and call the result $\tilde{h}_N^{(0)}$. Then by formula (8) we have $\tilde{h}_N^{(0)} \to 0$ as $N \to \infty$. Line " *" in Figure 6 shows $\tilde{h}_N^{(0)}$ as function of $N$. We see that $\tilde{h}_N^{(0)}$ converges slowly and the apparent limit value is -5 instead of the value 0 predicted by theory. We conclude that more experimenting is needed to find out what's going on. +---PAGE_BREAK--- + +FIGURE 3. Plot of the Dirichlet data $\phi_N$ with three different values of $N$. $t_1 = t_2 = \frac{1}{\sqrt{2}}$ and $\epsilon_1 = \epsilon_2 = 4$. First row: $N = 10$. Second row: $N = 14$. Third row: $N = 20$. Fourth row: $N = 50$ + +**5.2. Radial conductivities with unit trace.** We define a collection of radially varying conductivity distributions when $0 \le r \le \frac{4R}{5}$ (constant distribution when $\frac{4R}{5} < r \le R$) for further testing of formula (62): + +$$ +\begin{aligned} +\gamma_1(\tau, s, r) &:= R - r, && \gamma_1|_{\partial\Omega} = 1, && \frac{\partial\gamma_1}{\partial\nu}|_{\partial\Omega} = 1, \\ +\gamma_2(\tau, s, r) &:= (R - r)^2, && \gamma_2|_{\partial\Omega} = 1, && \frac{\partial\gamma_2}{\partial\nu}|_{\partial\Omega} = 2, \\ +\gamma_3(\tau, s, r) &:= (R - r)^3, && \gamma_3|_{\partial\Omega} = 1, && \frac{\partial\gamma_3}{\partial\nu}|_{\partial\Omega} = 3. +\end{aligned} + $$ + +Figure 6 shows the convergence of $\tilde{h}_N$ as function of $N$ computed using formula (62) for $\gamma_1$ and $\gamma_2$ and $\gamma_3$. We see that the various $\tilde{h}_N$ converge slowly to limit values with systematic error of -5. +---PAGE_BREAK--- + +FIGURE 4. Estimated convolution $\tilde{g}_N$ defined in (61) as a function of N corresponding to one node on the boundary with different computational grids, $\epsilon_1 = \epsilon_2 = 4$. Correct value of conductivity is one. Line "* * *": 950 nodes in grid. Line "++": 2946 nodes in grid. Line "- -": 9324 nodes in grid. Line "o o": 16285 nodes in grid. Line "x x": 21385 nodes in grid. + +FIGURE 5. Estimated convolution $\tilde{g}_N$ computed with formula (61) as a function of N corresponding to one node on the boundary with computational grid of 16285 nodes and various $\epsilon_1$ and $\epsilon_2$. Correct value of conductivity is one. Line "* * *": $\epsilon_1 = \epsilon_2 = 2$. Line "++": $\epsilon_1 = \epsilon_2 = 4$. Line "- -": $\epsilon_1 = \epsilon_2 = 6$. Line "o o": $\epsilon_1 = \epsilon_2 = 8$. + +However, the evidence in Figure 6 suggests the relative values of $\tilde{h}_N$ are roughly correct throughout the computational interval $2 \le N \le 20!$ This surprising observation can be used to calibrate the results as follows. Suppose we have available +---PAGE_BREAK--- + +FIGURE 6. Estimated normal derivative $\tilde{h}_N$ computed with formula (62) as a function of N corresponding to one node on the boundary: computational grid of 16285 nodes, $R = 1$ and $\epsilon_1 = \epsilon_2 = 4$. Line “**”: $\gamma_0(\tau, s, r) = 1$ and $\frac{\partial\gamma_0}{\partial\nu}|_{\partial\Omega} = 0$. Line “+-”: $\gamma_1(\tau, s, r) = R-r$ and $\frac{\partial\gamma_1}{\partial\nu}|_{\partial\Omega} = 1$. Line “-”: $\gamma_2(\tau, s, r) = (R-r)^2$ and $\frac{\partial\gamma_2}{\partial\nu}|_{\partial\Omega} = 2$. Line “o o”: $\gamma_3(\tau, s, r) = (R-r)^3$ and $\frac{\partial\gamma_3}{\partial\nu}|_{\partial\Omega} = 3$. + +measurements $\Lambda_{\gamma_0}$ from the “dummy load” conductivity $\gamma_0 \equiv 1$. We can compute $\tilde{h}_{20}^{(0)} \approx -5$ corresponding to $\gamma_0$. Since the relative values of $\tilde{h}_N$ are close to correct for $\gamma_1$ and $\gamma_2$ and $\gamma_3$, we suggest that the formula + +$$ (63) \qquad \frac{\partial \gamma}{\partial \nu}(x_0) \approx \tilde{h}_{20}(x_0) - \tilde{h}_{20}^{(0)}(x_0) $$ + +serves as a calibrated reconstruction method of the normal derivative for any conductivity $\gamma$ with trace 1. + +5.3. **Radial conductivities with varying traces.** It remains to study the numerical properties of formula (62) in the case of conductivities whose trace is not 1. To this end, we define a collection of conductivities with varying traces as follows: + +$$ \begin{align*} \gamma_4(\tau, s, r) &:= R - r + 1, & \gamma_4|_{\partial\Omega} &= 2, & \frac{\partial\gamma_4}{\partial\nu}|_{\partial\Omega} &= 1, \\ \gamma_5(\tau, s, r) &:= R - r + 2, & \gamma_5|_{\partial\Omega} &= 3, & \frac{\partial\gamma_5}{\partial\nu}|_{\partial\Omega} &= 1, \\ \gamma_6(\tau, s, r) &:= R - r + 3, & \gamma_6|_{\partial\Omega} &= 4, & \frac{\partial\gamma_6}{\partial\nu}|_{\partial\Omega} &= 1. \end{align*} $$ + +The integral in formula (61) as function of N is shown in Figure 7. The slope of the curve increases when conductivity value on the boundary node increases; the values of the integral actually seem to depend linearly on the conductivity value at the boundary. For instance, for fixed N, the integral corresponding to $\gamma_4$ satisfying $\gamma_4(\tau, s, 0) = 2$ is twice as large as the integral corresponding to $\gamma_1$ satisfying $\gamma_1(\tau, s, 0) = 1$. + +Same phenomenon can be found when normal derivatives are estimated, see Fig. 8. Hence we suggest that $\tilde{h}_{20}^{(0)}$ in the calibrated algorithm (63) should be multiplied +---PAGE_BREAK--- + +FIGURE 7. The values of the integral in formula (61) as a function of N corresponding to one node on the boundary with computational grid of 16285 nodes, $R = 1$ and $\epsilon_1 = \epsilon_2 = 4$. Line "* * *": conductivity distribution $\gamma_1(\tau, s, r) = R - r$, on the boundary $\gamma_1(\tau, s, 0) = 1$. Line " + +": conductivity distribution $\gamma_4(\tau, s, r) = (R - r) + 1$, on the boundary $\gamma_4(\tau, s, 0) = 2$. Line "-": conductivity distribution $\gamma_5(\tau, s, r) = (R - r) + 2$, on the boundary $\gamma_5(\tau, s, 0) = 3$. Line "o-o": conductivity distribution $\gamma_6(\tau, s, r) = (R - r) + 3$, on the boundary $\gamma_6(\tau, s, 0) = 4$. + +by estimated conductivity $\tilde{g}_{20}(x_0)$. + +$$ (64) \qquad \frac{\partial \gamma}{\partial \nu}(\tau, s) \approx \tilde{h}_{20}(x_0) - \tilde{h}_{20}^{(0)}(x_0) \tilde{g}_{20}(x_0). $$ + +**6. The calibrated reconstruction algorithm.** The numerical experiments presented in Section 5 inspire us to suggest the following calibrated method for recovering the trace and normal derivative of a given conductivity $\gamma$ based on localized boundary measurements. + +1. Use the dummy load $\gamma_0 \equiv 1$ to find a big enough $N_0 > 0$ for $\tilde{g}_{N_0}^{(0)}$ computed by formula (7) to be reasonably close to 1. + +2. Use formula (7) to recover $\gamma|_{\Gamma}$ approximately as $\gamma(\tau, s, 0) \approx \tilde{g}_{N_0}(\tau, s)$. + +3. Substitute the dummy load to formula (8) and denote the result by $\tilde{h}_{N_0}^{(0)}$. + +4. Use formula (8) to recover the normal derivative of $\gamma$ approximately as + +$$ \frac{\partial \gamma}{\partial \nu}(\tau, s) \approx \tilde{h}_{N_0}(\tau, s) - \tilde{h}_{N_0}^{(0)}(\tau, s) \tilde{g}_{N_0}(\tau, s). $$ + +We tested the calibrated reconstruction algorithm with the simple cases discussed in Section 5. Figure 9 shows the calibrated normal derivatives for the radial conductivities with unit trace as function of $N$. The bigger change in conductivity in the normal direction the smaller gets the current density on the boundary in the case of finite $N$. Hence the integral in formula (61) gets smaller values even if the trace of the conductivity on the boundary is same in all cases. Therefore the calibrated algorithm (64) underestimates the normal derivatives of the conductivity. +---PAGE_BREAK--- + +FIGURE 8. Estimated normal derivative $\tilde{h}_N$ computed with formula (62) as a function of N corresponding to one node on the boundary for the conductivity distributions: computational grid of 16285 nodes, $R=1$ and $\epsilon_1 = \epsilon_2 = 4$. Line "**": $\gamma_1(\tau, s, r) = R-r$ and $\frac{\partial\gamma_1}{\partial\nu}|_{\partial\Omega} = 1$. Line "++": $\gamma_4(\tau, s, r) = (R-r)+1$ and $\frac{\partial\gamma_4}{\partial\nu}|_{\partial\Omega} = 1$. Line "--": $\gamma_5(\tau, s, r) = (R-r)+2$ and $\frac{\partial\gamma_5}{\partial\nu}|_{\partial\Omega} = 1$. Line "o": $\gamma_6(\tau, s, r) = (R-r)+3$ and $\frac{\partial\gamma_6}{\partial\nu}|_{\partial\Omega} = 1$. + +Figure 10 shows the calibrated normal derivatives for the radial conductivities with varying trace as function of N. As can be seen from the Table 1, absolute error between true conductivity and its estimated convolution increases when conductivity value increases. Therefore also error in calibrated normal derivative of the conductivity increases with the conductivity value. See Table 2 for reconstruction errors between true normal derivatives and calibrated normal derivatives. + +TABLE 1. Absolute errors and relative errors for the reconstruction of $\gamma|_{\partial\Omega}$ when $\frac{\partial\gamma_j}{\partial\nu}|_{\partial\Omega} = 1$ + +
|γ - g̃N|N=20|γ - g̃N|N=20
γ1(τ, s, 0) = 10.09150.0915
γ4(τ, s, 0) = 20.18440.0922
γ5(τ, s, 0) = 30.26610.0887
γ6(τ, s, 0) = 40.34790.0870
+ +At the moment the approximation properties of the above method are not well understood. However, we can gather intuition about the method by testing it also with a more demanding example. We test our calibrated reconstruction algorithm with a fairly complicated non-homogeneous distribution with three inclusions. Two of the inclusions touch the boundary and one is located in the middle of the target, see Figure 11. + +We estimate the conductivity and normal derivatives on 1216 boundary points (64 equidistantly placed points on 19 layers). A nonuniform finite element mesh +---PAGE_BREAK--- + +FIGURE 9. Calibrated normal derivative computed with formula (64) as a function of N corresponding to one node on the boundary: computational grid of 16285 nodes, $R = 1$ and $\epsilon_1 = \epsilon_2 = 4$. Line "*": $\gamma_0(\tau, s, r) = 1$ and $\frac{\partial\gamma_0}{\partial\nu}|_{\partial\Omega} = 0$. Line "+ +": $\gamma_1(\tau, s, r) = R-r$ and $\frac{\partial\gamma_1}{\partial\nu}|_{\partial\Omega} = 1$. Line "- -": $\gamma_2(\tau, s, r) = (R-r)^2$ and $\frac{\partial\gamma_2}{\partial\nu}|_{\partial\Omega} = 2$. Line "o o": $\gamma_3(\tau, s, r) = (R-r)^3$ and $\frac{\partial\gamma_3}{\partial\nu}|_{\partial\Omega} = 3$. + +TABLE 2. Absolute errors and relative errors for the reconstruction of $\frac{\partial \gamma}{\partial \nu} |_{\partial \Omega}$. + +
$|\frac{\partial \gamma}{\partial \nu} - \tilde{h}_N|_{N=20}$$\frac{\frac{\partial \gamma}{\partial \nu} - \tilde{h}_N|_{N=20}}{\frac{\partial \gamma}{\partial \nu}}$
γ₀(τ, s, 0) = 1,$\frac{\partial \gamma_0}{\partial \nu} = 0$0.1842
γ₁(τ, s, 0) = 1,$\frac{\partial \gamma_1}{\partial \nu} = 1$0.3002
γ₂(τ, s, 0) = 2,$\frac{\partial \gamma_2}{\partial \nu} = 2$0.5078
γ₃(τ, s, 0) = 3,$\frac{\partial \gamma_3}{\partial \nu} = 3$0.7990
γ₄(τ, s, 0) = 4,$\frac{\partial \gamma_4}{\partial \nu} = 1$0.5069
γ₅(τ, s, 0) = 1,$\frac{\partial \gamma_5}{\partial \nu} = 1$0.6978
γ₆(τ, s, 0) = 1,$\frac{\partial \gamma_6}{\partial \nu} = 1$0.8852
+ +is constructed corresponding to each boundary point; the number of nodes in the meshes is on the average 9000. See Figure 12 for the recovered trace and Figure 13 for the approximate normal derivative reconstructed using the above calibration. + +We computed the relative $L^2(\partial\Omega)$ and $L^\infty(\partial\Omega)$ errors between true conductivity distribution $\gamma|_{\partial\Omega}$ and its convolution $\gamma * \eta^2$. Errors were computed also between $\gamma * \eta^2$ and its approximation $\tilde{g}_N$. + +$$ (65) \quad E_\gamma^2 = \frac{\|\gamma - \gamma * \eta^2\|_{L^2(\partial\Omega)}}{\|\gamma\|_{L^2(\partial\Omega)}} $$ + +$$ E_\gamma^\infty = \frac{\max_{\partial\Omega} |\gamma - \gamma * \eta^2|}{\max_{\partial\Omega} |\gamma|} $$ + +$$ (66) \quad E_g^2(N) = \frac{\|\tilde{g}_N - \gamma * \eta^2\|_{L^2(\partial\Omega)}}{\|\gamma\|_{L^2(\partial\Omega)}} $$ + +$$ E_g^\infty(N) = \frac{\max_{\partial\Omega} |\tilde{g}_N - \gamma * \eta^2|}{\max_{\partial\Omega} |\gamma|} $$ +---PAGE_BREAK--- + +FIGURE 10. Calibrated normal derivative computed with formula (64) as a function of N corresponding to one node on the boundary for the conductivity distributions: computational grid of 16285 nodes, $R = 1$ and $\epsilon_1 = \epsilon_2 = 4$. Line "* * *": $\gamma_1(\tau, s, r) = R - r$ and $\frac{\partial\gamma_1}{\partial\nu}|_{\partial\Omega} = 1$. Line "++": $\gamma_4(\tau, s, r) = (R-r)+1$ and $\frac{\partial\gamma_4}{\partial\nu}|_{\partial\Omega} = 1$. Line "--": $\gamma_5(\tau, s, r) = (R-r)+2$ and $\frac{\partial\gamma_5}{\partial\nu}|_{\partial\Omega} = 1$. Line "o o": $\gamma_6(\tau, s, r) = (R-r)+3$ and $\frac{\partial\gamma_6}{\partial\nu}|_{\partial\Omega} = 1$. + +FIGURE 11. Left: True conductivity distribution on five cross-sectional planes. Right: True conductivity distribution on the lateral boundary. + +See Table 3 for reconstruction errors. + +TABLE 3. Relative errors (65) and (66) for the convolution and reconstruction of $\gamma|_{\partial\Omega}$. Three lowest and three highest boundary node layers have been removed. + +
E2γEγE2g(N)Eg(N)
0.01490.10090.02420.0384
+ +7. **Conclusion.** Our study of recovering trace and normal derivative of conductivity from static electric boundary measurements is based on two aspects: theoretical +---PAGE_BREAK--- + +FIGURE 12. True conductivity distribution $\gamma(\tau, s, 0)$ (first row), convolution $(\gamma|_{\partial\Omega} * \eta^2)(\tau, s)$ (second row) and estimated conductivity distribution $\tilde{g}_N$ (third row) with same colormap. + +FIGURE 13. True normal derivative $\frac{\partial\gamma}{\partial\nu}(\tau, s, 0)$ (first row) and estimated normal derivative $\tilde{h}_N$ (second row) with same colormap. + +and numerical. Theoretically, we show that it is possible to recover convolved approximations to both trace and normal derivative from localized boundary measurements. Our Theorem 1 is proved under quite general geometric assumptions. + +Our numerical experiments suggest that the trace of conductivity can be approximately recovered using Theorem 1 with a finite value of $N$ and simulated data with realistic noise level. The recovery of the normal derivative seems to be more difficult, but we are able to introduce a calibration method allowing useful reconstructions at least for our simulated examples. + +The applicability of our method for real-world measured data needs a further study. However, the voltage distributions applied at the boundary in our simulations seem to be representable using a 8 × 8 electrode array covering the support of the localized excitation pattern. Since we included simulated data with realistic noise level (relative error of the same order than in the ACT3 impedance imager of Rensselaer Polytechnic Institute [15]), we have a reason to believe that our method is implementable with a 64-channel impedance tomography device. +---PAGE_BREAK--- + +**Acknowledgments.** We thank the referees for carefully reading the manuscript and for giving us helpful comments. The work of KT was partly supported by Grant-in-Aid for Scientific Research (C) (Nos. 19540113 & 22540111), Society for the Promotion of Science, Japan. The work of SS was supported by Academy of Finland (Centre of Excellence in Inverse Problems Research (213476) and Computational Science Research Programme (134868)). During part of the preparation of this work, SS worked as professor at the Department of Mathematics of Tampere University of Technology. + +## REFERENCES + +[1] A. Adler, R. Guardo, and Y. Berthiaume, *Impedance imaging of lung ventilation: Do we need to account for chest expansion?*, IEEE Trans. Biomed. Eng., **43** (1996), 414–420. + +[2] G. Alessandrini, *Singular solutions of elliptic equations and the determination of conductivity by boundary measurements*, J. Diff. Eq., **84** (1990), 252–273. + +[3] K. Astala and L. Päivärinta, *Calderón's inverse conductivity problem in the plane*, Ann. of Math., **163** (2006), 265–299. + +[4] J. Bikowski, "Electrical Impedance Tomography Reconstructions in two and three Dimensions; From Calderón to Direct Methods," Ph.D thesis, Colorado State University, 2008. + +[5] R. Blue, "Real-time Three-dimensional Electrical Impedance Tomography," Ph.D thesis, R.P.I. in Troy, NY, 1997. + +[6] L. Borcea, *Electrical impedance tomography*, Inverse Problems, **18** (2002), R99-R136. + +[7] L. Borcea, *Addendum to "Electrical impedance tomography"*, Inverse Problems, **19** (2002), 997–998. + +[8] G. Boverman, D. Isaacson, T-J Kao, G. J. Saulnier and J. C. Newell, "Methods for Direct Image Reconstruction for EIT in Two and Three Dimensions," in "Electrical Impedance Tomography Conf.," Hanover, New Hampshire, USA, (2008). + +[9] R. M. Brown, Recovering the conductivity at the boundary from the Dirichlet to Neumann map: a pointwise result, J. Inverse and Ill-posed Prob., **9** (2001), 567–574. + +[10] R. Brown and R. Torres, Uniqueness in the inverse conductivity problem for conductivities with $3/2$ derivatives in $L^p, p > 2n$, J. Fourier Analysis Appl., **9** (2003), 1049–1056. + +[11] R. M. Brown and G. Uhlmann, Uniqueness in the inverse conductivity problem for nonsmooth conductivities in two dimensions, Comm. Partial Differential Equations, **22** (1997), 1009–1027. + +[12] A. P. Calderón, On an inverse boundary value problem, Seminar on Numerical Analysis and its Applications to Continuum Physics, Soc. Brasileira de Matemática, (1980), 65–73. + +[13] M. Cheney, D. Isaacson and J. C. Newell, Electrical impedance tomography, SIAM Review, **41** (1999), 85–101. + +[14] K-S Cheng, D. Isaacson, J. C. Newell and D. G. Gisser, Electrode models for electric current computed tomography, IEEE Transactions on Biomedical Imaging, (1989), 918–924. + +[15] R. D. Cook, G. J. Saulnier and J. C. Goble, A phase sensitive voltmeter for a high-speed, high-precision electrical impedance tomograph, in "Proc. Annu. Int. Conf. IEEE Engineering in Medicine and Biology Soc.," (1991), 22–23. + +[16] H. Cornean, K. Knudsen and S. Siltanen, Towards a d-bar reconstruction method for three-dimensional EIT, Journal of Inverse and Ill-Posed Problems, **14** (2006), 111–134. + +[17] R. Courant and D. Hilbert, "Methods of Mathematical Physics," Interscience Publishers, Vol. II 1962. + +[18] E. B. Davies, "Heat Kernels and Spectral Theory," Cambridge University Press, Cambridge, 1989. + +[19] B. Gebauer and N. Hyvönen, Factorization method and inclusions of mixed type in an inverse elliptic boundary value problem, Inverse Probl. Imaging, **2** (2008), 355–372. + +[20] E. Gersing, B. Hoffman, and M. Osypka, Influence of changing peripheral geometry on electrical impedance tomography measurements, Medical & Biological Engineering & Computing, **34** (1996), 359–361. + +[21] D. Gilbarg and N. S. Trudinger, "Elliptic Partial Differential Equations of Second Order," Grundlehren der Mathematischen Wissenschaften, Springer, Berlin, **224**, 1989. + +[22] J. Goble, M. Cheney and D. Isaacson, Electrical impedance tomography in three dimensions Appl. Comput. Electromagn. Soc. J., **7** (1992), 128–147. +---PAGE_BREAK--- + +[23] A. Greenleaf, M. Lassas and G. Uhlmann, *The Calderón problem for conormal potentials, I: Global uniqueness and reconstruction*, Comm. Pure Appl. Math., **56** (2003), 328–352. + +[24] M. Hanke and B. Schappel, *The factorization method for electrical impedance tomography in the half-space*, SIAM J. Appl. Math., **68** (2008), 907–924. + +[25] T. Ide, H. Isozaki, S. Nakata and S. Siltanen, *Local detection of three-dimensional inclusions in electrical impedance tomography*, Inverse Problems, **26** (2010), 35001–35017. + +[26] D. Isaacson, J. L. Mueller, J. C. Newell and S. Siltanen, *Reconstructions of chest phantoms by the d-bar method for electrical impedance tomography*, Physiol Meas., **27** (2006), 43–50. + +[27] H. Kang and K. Yun, *Boundary determination of conductivities and Riemannian metrics via local Dirichlet-to-Neumann operator*, SIAM J. Math. Anal., **34** (2003), 719–735. + +[28] R. V. Kohn and M. Vogelius, *Determining conductivity by boundary measurements*, Commun. Pure Appl. Math., **37** (1984), 289–298. + +[29] R. V. Kohn and M. Vogelius, *Determining conductivity by boundary measurements II. Interior results*, Commun. Pure Appl. Math., **38** (1985), 643–667. + +[30] V. Kolehmainen, M. Vauhkonen, P. A. Karjalainen and J. P. Kaipio, *Assessment of errors in static electrical impedance tomography with adjacent and trigonometric current patterns*, Physiological Measurement, **18** (1997), 289–303. + +[31] P. Metherall, D. C. Barber and R. H. Smallwood, *Three dimensional electrical impedance tomography*, in "IX Int. Conf. Electrical Bio-Impedance," Heidelberg, Germany, (1995), 510–511. + +[32] P. Metherall, D. C. Barber, R. H. Smallwood and B. H. Brown, *Three-dimensional electrical impedance tomography*, Nature, **380** (1996), 509–512. + +[33] P. Metherall, R. H. Smallwood and D. C. Barber, *Three dimensional electrical impedance tomography of the human thorax*, in "18th Int. Conf. IEEE Eng. Med. Biol. Society," (1996). + +[34] J. P. Morucci, M. Granie, M. Lei, M. Chabert and P. M. Marsili, *3D reconstruction in electrical impedance imaging using a direct sensitivity matrix approach*, Physiol. Meas., **16** (1995), A123–A128. + +[35] A. I. Nachman, *Reconstructions from boundary measurements*, Ann. of Math., **128** (1988), 531–576. + +[36] A. I. Nachman, *Global uniqueness for a two-dimensional inverse boundary value problem*, Ann. of Math., **143** (1996), 71–96. + +[37] G. Nakamura and K. Tanuma, *Local determination of conductivity at the boundary from the Dirichlet-to-Neumann map*, Inverse Problems, **17** (2001), 405–419. + +[38] G. Nakamura and K. Tanuma, *Direct determination of the derivatives of conductivity at the boundary from the localized Dirichlet to Neumann map*, Comm. Korean Math. Soc., **16** (2001), 415–425. + +[39] G. Nakamura and K. Tanuma, *Formulas for reconstructing conductivity and its normal derivative at the boundary from the localized Dirichlet to Neumann map*, in "Recent Development in Theories & Numerics, Int. Conf. on Inverse Problems" (eds. Yiu-Chung Hon, Masahiro Yamamoto, Jin Cheng and June-Yub Lee), World Scientific, (2003), 192–201. + +[40] G. Nakamura, K. Tanuma, S. Siltanen and S. Wang, *Numerical recovery of conductivity at the boundary from the localized Dirichlet to Neumann map*, Computing, **75** (2004), 197–213. + +[41] J. C. Newell, R. S. Blue, D. Isaacson, G. J. Saulnier and A. S. Ross, *Phasic three-dimensional impedance imaging of cardiac activity*, Physiol. Meas., **23** (2002), 203–209. + +[42] L. Päivärinta, A. Panchenko and G. Uhlmann, *Complex geometrical optics for Lipschitz conductivities*, Rev. Mat. Iberoam., **19** (2003), 57–72. + +[43] R. L. Robertson, *Boundary identifiability of residual stress via the Dirichlet to Neumann map*, Inverse Problems, **13** (1997), 1107–1119. + +[44] E. Somersalo, M. Cheney and D. Isaacson, *Existence and uniqueness for electrode models for electric current computed tomography*, SIAM J. Appl. Math., **52** (1992), 1023–1040. + +[45] G. Strang and G. Fix, "An Analysis of The Finite Element Method," Prentice Hall, 1973. + +[46] J. Sylvester, *A convergent layer stripping algorithm for the radially symmetric impedance tomography problem*, Comm. PDE, **17** (1992), 1955–1994. + +[47] J. Sylvester and G. Uhlmann, *A global uniqueness theorem for an inverse boundary value problem*, Ann. of Math., **125** (1987), 153–169. + +[48] J. Sylvester and G. Uhlmann, *Inverse boundary value problems at the boundary — continuous dependence*, Comm. Pure Appl. Math., **41** (1988), 197–221. + +[49] P. J. Vauhkonen, "Image Reconstruction in Three-Dimensional Electrical Impedance Tomography," Ph.D thesis, University of Kuopio, 2004. +---PAGE_BREAK--- + +[50] P. J. Vauhkonen, M. Vauhkonen, T. Savolainen and J. P. Kaipio, *Static three-dimensional electrical impedance tomography*, Ann. New York Acad. Sci., **873** (1999), 472–481. + +[51] P. J. Vauhkonen, M. Vauhkonen, T. Savolainen and J. P. Kaipio, *Three-dimensional electrical impedance tomography based on the complete electrode model*, IEEE Trans. Biomed. Eng., **46** (1999), 1150–1160. + +[52] A. Wexler, *Electrical impedance imaging in two and three dimensions*, Clin. Phys. Physiol. Meas., Suppl A, **9** (1988), 29–33. + +Received April 2010; revised August 2010. + +*E-mail address:* gnaka@math.sci.hokudai.ac.jp + +*E-mail address:* Paivi.Ronkanen@uef.fi + +*E-mail address:* samuli.siltanen@helsinki.fi + +*E-mail address:* tanuma@gunma-u.ac.jp \ No newline at end of file diff --git a/samples_new/texts_merged/4409661.md b/samples_new/texts_merged/4409661.md new file mode 100644 index 0000000000000000000000000000000000000000..a2dea56c90bca768b81f6e77981ad5dd1faa100e --- /dev/null +++ b/samples_new/texts_merged/4409661.md @@ -0,0 +1,2319 @@ + +---PAGE_BREAK--- + +# Marginal triviality of the scaling limits of critical 4D Ising and $\phi_4^4$ models + +Michael Aizenman* and Hugo Duminil-Copin† + +25 January 2021 + +## Abstract + +We prove that the scaling limits of spin fluctuations in four-dimensional Ising-type models with nearest-neighbor ferromagnetic interaction at or near the critical point are Gaussian. A similar statement is proven for the $\lambda\phi^4$ fields over $\mathbb{R}^4$ with a lattice ultraviolet cutoff, in the limit of infinite volume and vanishing lattice spacing. The proofs are enabled by the models' random current representation, in which the correlation functions' deviation from Wick's law is expressed in terms of intersection probabilities of random currents with sources at distances which are large on the model's lattice scale. Guided by the analogy with random walk intersection amplitudes, the analysis focuses on the improvement of the so-called tree diagram bound by a logarithmic correction term, which is derived here through multi-scale analysis. + +# 1 Introduction + +The results presented below address questions pertaining to two distinct research agendas: one aims at Constructive Field Theory and the other at the understanding of the critical behavior in Statistical Mechanics. While these two goals are somewhat different the questions and the answers are related. We start with their brief presentation. + +## 1.1 Constructive Quantum Field Theory and Functional Integration + +Quantum field theories with local interaction play an important role in the physics discourse, where they appear in subfields ranging from high energy to condensed matter physics. The mathematical challenge of proper formulation of this concept led to programs of Constructive Quantum Field Theory (CQFT). A path towards that goal was charted through the proposal to define quantum fields as operator valued distributions whose essential properties are formulated as the Wightman axioms [50]. Wightman's reconstruction theorem allows one to recover this structure from the collection of the corresponding correlation functions, defined over the Minkowski space-time. By the Osterwalder-Schrader theorem [39, 40], correlation functions with the required properties may potentially be obtained through analytic continuation from those of random distributions defined over the corresponding Euclidean space that meet a number of conditions: suitable analyticity, permutation symmetry, Euclidean covariance, and reflection-positivity. + +* aizenman@princeton.edu Departments of Physics and Mathematics, Princeton University +† duminil@ihes.fr Institut des Hautes Études Scientifiques and Université de Genève +---PAGE_BREAK--- + +Seeking natural candidates for such *Euclidean fields*, one ends up with the task of constructing probability averages over random distributions $\Phi(x)$, for which the expectation value of functionals $F(\Phi)$ would have properties fitting the formal expression + +$$ \langle F(\Phi) \rangle \approx \frac{1}{\text{norm}} \int F(\Phi) \exp[-H(\Phi)] \prod_{x \in \mathbb{R}^d} d\Phi(x), \quad (1.1) $$ + +where $H(\Phi)$ is the Hamiltonian. In this context, it seems natural to consider expressions of the form + +$$ H(\Phi) \coloneqq (\Phi, A\Phi) + \int_{\mathbb{R}^d} P(\Phi(x)) dx \quad (1.2) $$ + +with $(\Phi, A\Phi)$ a positive definite and reflection-positive quadratic form, and $P(\Phi(x))$ a polynomial (or a more general function) whose terms of order $\Phi(x)^{2k}$ are interpreted heuristically as representing $k$-particle interactions. An example of a quadratic form with the above properties (at $K, b > 0$) and also rotation invariance is + +$$ (\Phi, A\Phi) := \int_{\mathbb{R}^d} (K |\nabla\Phi|^2(x) + b |\Phi(x)|^2) dx. \quad (1.3) $$ + +The functionals $F(\Phi)$ to which (1.1) is intended to apply include the smeared averages + +$$ T_f(\Phi) := \int_{\mathbb{R}^d} f(x)\Phi(x)dx \quad (1.4) $$ + +associated with continuous functions of compact support $f \in C_0(\mathbb{R}^d)$. By linearity, the expectation values of products of such variables take the form + +$$ \left\langle \prod_{j=1}^{n} T_{f_j}(\Phi) \right\rangle := \int_{(\mathbb{R}^d)^n} dx_1 \dots dx_n S_n(x_1, \dots, x_n) \prod_{j=1}^{n} f(x_j), \quad (1.5) $$ + +with $S_n(x_1, \dots, x_n)$ characterizing the probability measure on the space of distribution +which corresponds to the expectation value $\langle - - \rangle$. This is summarized by saying that in a +distributional sense + +$$ \left\langle \prod_{j=1}^{n} \Phi(x_j) \right\rangle = S_n(x_1, \dots, x_n), \qquad (1.6) $$ + +with $S_n$ referred to as the *Schwinger functions* of the corresponding euclidean field theory. + +A relatively simple class of Euclidean fields are the Gaussian fields, for which $H$ con- +tains only quadratic terms. Gaussian fields (whether reflection-positive or not) are alter- +natively characterized by having their structure determined by just the two-point function, +with the $2n$-point Schwinger functions computable through Wick's law: + +$$ S_{2n}(x_1, \ldots, x_{2n}) = \sum_{\pi} \prod_{j=1}^{n} S_2(x_{\pi(2j-1)}, x_{\pi(2j)}) := \mathcal{G}_n[S_2](x_1, \ldots, x_{2n}), \quad (1.7) $$ + +where $\pi$ ranges over pairing permutations of $\{1, \ldots, 2n\}$. The field theoretic interpretation +of (1.7) is the absence of interaction. Due to that, and to their algebraically simple +structure, such fields have been referred to as *trivial*. + +When interpreting (1.1), one quickly encounters a number of problems. Even in the +generally understood case of the Gaussian free field, with $H$ consisting of just the quadratic +term (1.3), Equation (1.1) is not to be taken literally as the measure is supported by non- +differentiable functions for which the integral in the exponential is almost surely divergent. + +A natural step to tackle next seems to be the addition of the lowest order even term, +i.e. $\lambda\Phi^4$. However, in dimensions $d > 1$, the free field is no longer a random function but a +---PAGE_BREAK--- + +random distribution which even locally is unbounded. Thus such simple looking proposals +lead to additional divergences, whose severity increases with the dimension. + +The heuristic “renormalization group” approach to the problem by K. Wilson [51] indi- +cates that in low enough dimensions, specifically $d < 4$ for $\lambda\Phi^4$, the problem could be tack- +led through cutoff-dependent counter-terms. Partially successful attempts to carry such +a project rigorously have been the focus of a substantial body of works. The means em- +ployed have included: counter-terms, which are allowed to depend on regularizing cutoffs, +scale decomposition, renormalization group flows, the theory or regularity structures [27], +etc. + +A natural starting point towards such a construction of a $\Phi_d^4$ functional integral (1.1) is to regularize it with a pair of cutoffs: at the short distance (ultraviolet) scale and the large distance (infrared) scale. A lattice version of that is the restriction of $\Phi(\cdot)$ to the vertices of a finite graph with the vertex set + +$$ \mathcal{V}_{a,R} = (a\mathbb{Z})^d \cap \Lambda_R, \quad \Lambda_R := [-R, R]^d. \tag{1.8} $$ + +For the corresponding finite collection of variables $\{\Phi(x)\}_{x \in \mathcal{V}_{a,R}}$ the Hamiltonian (1.2) is initially interpreted in terms of the Riemann-sum style discrete analog of the integral expressions. Moments of $\Phi(x)$ are to be accompanied by lower order counter-terms. In particular, the fourth power addition takes the form + +$$ P(\Phi(x)) = \lambda\Phi^4 - c(\lambda, a, R)\Phi^2, \tag{1.9} $$ + +The cutoffs are removed, through the limit $R \nearrow \infty$ followed by $a \searrow 0$. Parameter such as $c(\lambda, a, R)$ are allowed to be adjusted in the process, so as to stabilize the Schwinger functions $S_n(x_1, \dots, x_n)$ on the continuum limit scale. + +The constructive field theory program has yielded non-trivial scalar field theories over $\mathbb{R}^2$ and $\mathbb{R}^3$ [11, 21, 26, 40]. (Here we do not discuss here gauge field theories, cf. [31]). However, the progression of constructive results was halted when it was proved that for dimensions $d > 4$ the attempt to construct $\Phi_d^4$ with + +$$ \lim_{|x-y| \to \infty} S_2(x,y) = 0 \tag{1.10} $$ + +by the method outlined above (in essence: taking the scaling limit of the lattice models +at $\beta \le \beta_c$) yields only Gaussian fields [1, 17]. + +Various partial results have indicated that the same may hold true for the critical +dimension $d = 4$ (cf. [7, 8, 9, 20, 29]), however a sweeping statement such as proved for +$d > 4$ has remained open. In this work we address this case. + +For clarity let us note that, like the no-go statements of [1, 17], the results presented +here do not involve explicit computations of the counterterms along the above scheme. +Instead, they are based on dimension-dependent relations among the Schwinger functions +which may emerge in any such limit. + +## 1.2 Statement of the main result + +The probability measures which correspond to (1.1) with the lattice and finite volume +cutoffs (1.8) take the form of a statistical-mechanics Gibbs equilibrium state average + +$$ \langle F(\phi) \rangle = \frac{1}{\text{norm}} \int F(\phi) \exp[-H(\phi)] \prod_{x \in \Lambda_R} \rho(d\phi_x), \tag{1.11} $$ +---PAGE_BREAK--- + +with a Hamiltonian $H(\phi)$ and an a-priori measure $\rho(d\phi)$ of the form + +$$ H(\phi) = - \sum_{\{x,y\} \in \Lambda_R} J_{x,y} \phi_x \phi_y, \quad \rho(d\phi_x) = e^{-\lambda \phi_x^4 + b \phi_x^2} d\phi_x, \qquad (1.12) $$ + +where $d\phi_x$ is the Lebesgue measure on $\mathbb{R}$ and $J_{x,y}$ is zero for non-nearest neighbour vertices, and $J \ge 0$ otherwise. To keep the notation simple, the basic variables are written here as they appear from the perspective of the lattice but our attention is focused on the correlations at distances of the order of $L$, with + +$$ 1 \ll L \ll R. \qquad (1.13) $$ + +In terms of the scaling limit discussed above, $a$ is equal to $1/L$. + +A point of fundamental importance is that since the interaction through which the field variables are correlated is local (nearest neighbor on the lattice scale), for the field correlations functions to exhibit non-singular variation on the scales $L \gg 1$, the system's parameters $(J, \lambda, b)$ need to be very close to the critical manifold, along which the correlation length of the lattice system diverges¹. + +Quantities whose joint distribution we track in the scaling limit are based on the collections of random variables of the form + +$$ T_{f,L}(\phi) := \frac{1}{\sqrt{\sum_L}} \sum_{x \in \mathbb{Z}^d} f(x/L) \phi_x, \qquad (1.14) $$ + +where $f$ ranges over compactly supported continuous functions, whose collection is denoted $C_0(\mathbb{R}^d)$, and $\sum_L$ denotes the variance of the sum of spins over the box of size $L$, i.e. + +$$ \Sigma_L := \left\langle \left( \sum_{x \in \Lambda_L} \phi_x \right)^2 \right\rangle. \qquad (1.15) $$ + +**Definition 1.1** A discrete system as described above, parametrized by $(J, \lambda, b, R, L)$, converges in distribution, in the double limit $\lim_{L \to \infty} \lim_{R/L \to \infty}$ (with a possible restriction to a subsequence along which also the other parameters are allowed to vary) if for any finite collection of test functions $f \in C_0(\mathbb{R}^d)$ the joint distributions of the random variables $\{T_{f,L}(\phi)\}$ converge. + +Through a standard probabilistic construction, the limit can be presented as a random field $\Phi$, to whose weighted averages $T_f(\Phi)$ the above variables converge in distribution. We omit here the detailed discussion of this point², but remark that for the models considered here the construction is simplified by i) the exclusion of delta functions $\delta(x)$ and their derivatives from the family of considered test functions, and ii) the uniform local integrability of the rescaled correlation functions (before and at the limit). This important condition is implied in the present case by the *infrared bound*, which is presented below in Section 5.3. + +Our main result concerning the euclidean field theory is the following. + +¹The scaling limit of a correlation function with exponential decay which on the lattice scale is of a fixed correlation length results in a white noise distribution in the limit. + +²By the Kolmogorov extension theorem, one may start by selecting sequences of the parameter values so as to establish convergence in distribution for a countable collection of test functions $f$, which is dense in $C_0(\mathbb{R}^d)$, and then use the uniform local integrability of the rescaled correlation function and of the limiting Schwinger functions, to extend the statement by continuity arguments to all $f \in C_0(\mathbb{R}^d)$. One may then recast the limiting variables as associated with a single random $\Phi$, as in (1.4). +---PAGE_BREAK--- + +**Theorem 1.2 (Gaussianity of $\Phi_4^d$)** For dimension $d=4$, any random field reachable by the above constructions, and satisfying (1.10), is a generalized Gaussian process. + +Let us mention that the precise asymptotic behaviour of scaling limits of lattice models which start from sufficiently small perturbations of the Gaussian free field, i.e. small enough $\lambda$, have been obtained through rigorous renormalization techniques [9, 16, 20, 29]. In comparison, our result also covers arbitrarily “hard” $\phi^4$ fields. However, we do not currently provide comparable analysis of the convergence in terms of the exact scale of the logarithmic corrections, and the exact expression for the covariance of the limiting Gaussian field. + +Let us also note that what from the perspective of constructive field theory may be regarded as disappointment is a positive and constructive result from the perspective of statistical mechanics. The theoreticians' goal there is to understand the critical behavior in models which lie beyond the reach of exact solutions. The proven gaussianity of the limit is therefore also a constructive result. + +## 1.3 The statistical mechanics perspective + +Statistical mechanics provides a general approach for studying the behaviour of extensive systems of a divergent number of degrees of freedom. Among the theoretically gratifying observations in this field has been the discovery of “universality”. The term means that some of the key features of phase diagrams, and critical behavior (including the critical exponents), appear to be the same across broad classes of systems of rather different microscopic structure. This has accorded relevance to studies of the phase transitions in drastically streamlined mathematical models. The ferromagnetic Ising spin model to which we turn next are among the earliest, and most studied such systems. + +An intuitive explanation of universality is that the large scale behavior of models of rich short scale structure is described by statistical field theories for which there are far fewer options. A heuristic perspective on this phenomenon is provided by the renormalization group theory, c.g. [51]. In particular, the mechanism underlying the simplicity of the scaling limit is related to simplicity of the critical exponents, which means that for $d \ge 4$ they assume their mean field values. Rigorous results for the latter (though still partial, in terms of logarithmic corrections) were presented in [46, 6]. + +The Ising spin model on $\Lambda \subset \mathbb{Z}^d$ has as its basic variables a collection of $\pm 1$ valued variables $\{\sigma_x\}_{x \in \Lambda}$, and a Hamiltonian (the energy function) of the form + +$$ H_{\Lambda,J,h}(\sigma) := - \sum_{\{x,y\} \subset \Lambda} J_{x,y} \sigma_x \sigma_y - \sum_{x \in \Lambda} h \sigma_x. \quad (1.16) $$ + +The model's finite volume Gibbs equilibrium state $\langle \cdot \rangle_{\Lambda,J,h,\beta}$ at inverse temperature $\beta \ge 0$ is the probability measure under which the expectation value of any function $F: \{\pm 1\}^\Lambda \to \mathbb{R}$ is given by + +$$ \langle F \rangle_{\Lambda, J, h, \beta} := \frac{1}{Z(\Lambda, J, h, \beta)} \sum_{\sigma \in \{\pm 1\}^\Lambda} F(\sigma) \exp[-\beta H_{\Lambda, J, h}(\sigma)], \quad (1.17) $$ + +where the normalizing factor $Z(\Lambda, J, h, \beta)$ is the model's partition function. Infinite volume Gibbs states on $\mathbb{Z}^d$, which we shall denote by $\langle \cdot \rangle_{J,h,\beta}$, are defined through suitable limits (over sequences $\Lambda_n \nearrow \mathbb{Z}^d$) of the above. + +We focus here on the nearest neighbor ferromagnetic interaction (n.n.f.) + +$$ J_{x,y} = \begin{cases} J & \|x-y\| = 1 \\ 0 & \text{otherwise} \end{cases} \quad (1.18) $$ +---PAGE_BREAK--- + +with $J > 0$. In dimensions $d > 1$, this model exhibits a line of first-order phase transitions (in the plane of the model's thermodynamics parameters $(\beta, h)$) along the line $h = 0$, $\beta \in (\beta_c(d), \infty)$. The line terminates at the critical point $(\beta_c, 0)$ at which the model's correlation length diverges. Our discussion concerns the scaling limits at, or near, this point. Since the phase transition occurs at zero magnetic field, we restrict the discussion to $h = 0$ and will omit $h$ from the notation. + +Away from the critical point the model's truncated correlation functions decay exponentially fast [3, 14]. This leads to the definition of the *correlation length* $\xi(\beta)$ as: + +$$ \xi(\beta) := \lim_{n \to \infty} -n / \log \langle \sigma_0; \sigma_{ne_1} \rangle_\beta \quad (\text{with } \mathbf{e}_1 = (1, 0, \dots, 0)). \qquad (1.19) $$ + +The correlation length is proven to be finite for any $\beta < \beta_c$ [3] and divergent in the limit $\beta \to \beta_c$ [44]. At the critical point $\xi(\beta_c) = +\infty$ as the decay of the 2-point function slows to a power-law (see [44] and the discussion around Corollary 5.8). + +At this point, one may notice the similarity between the Ising model’s Gibbs equilibrium distribution (1.17) and the discretized functional integral (1.11). Furthermore, in view of the probability measures’ relation + +$$ \frac{1}{2} [\delta(\sigma - 1) + \delta(\sigma + 1)] d\sigma = 2 \lim_{\lambda \to \infty} e^{-\lambda(\phi^2-1)^2} d\phi / \text{Norm}(\lambda) \quad (1.20) $$ + +the Ising spin’s a-priori (binary) distribution can be viewed as the “hard” limit of the $\phi^4$ measure. Hence included in Theorem 1.2 is the statement that for $d=4$ any scaling limit of the critical Ising model is Gaussian. + +However, our analysis flows in the opposite direction. In essence, the argument is structured as follows: + +1. deploying methods which take advantage of the Ising systems' structure, the stated results are first proven for the n.n.f. Ising model (in four dimensions); + +2. the analysis is adapted to the model's extension, in which each spin is replaced by a block average of 'elemental' Ising spins with an intrablock ferromagnetic coupling; + +3. through weak limits the statement is extended to systems of variables whose a-priori single spin distribution belongs to the Griffiths-Simon (G-S) class. + +Included in the G-S class (defined below) are the $\Phi^4$ measure $\rho(d\varphi)$ of (1.12). + +To reduce the repetition, some of the relevant relations are presented below in a form which may not be the simplest for n.n.f. but is suitable for the model's generalized version. However in the rest of this section we focus on the n.n.f. case. + +As it is known, and made explicit in Section 6.3, for Ising models a bellwether for Gaussian behaviour at large distances is the asymptotic validity of Wick's law at the level of the four-point function [1, 38]. The deviation is expressed in the *Ursell function* + +$$ U_4^\beta(x, y, z, t) := \langle \sigma_x \sigma_y \sigma_z \sigma_t \rangle_\beta - \left[ \langle \sigma_x \sigma_y \rangle_\beta \langle \sigma_z \sigma_t \rangle_\beta + \langle \sigma_x \sigma_z \rangle_\beta \langle \sigma_y \sigma_t \rangle_\beta + \langle \sigma_x \sigma_t \rangle_\beta \langle \sigma_y \sigma_z \rangle_\beta \right] (1.21) $$ + +the relevant question being whether $U_4(x,y,z,t)/\langle\sigma_x\sigma_y\sigma_z\sigma_t\rangle_\beta$ vanishes asymptotically for quadruples of sites at large distances, of comparable order between the pairs. + +Gaussianity of the scaling limits for $d > 4$ was previously established through the combination of the *tree diagram bound* of [1]: + +$$ |U_4^\beta(x, y, z, t)| \le 2 \sum_{u \in \mathbb{Z}^d} \langle \sigma_u \sigma_x \rangle_\beta \langle \sigma_u \sigma_y \rangle_\beta \langle \sigma_u \sigma_z \rangle_\beta \langle \sigma_u \sigma_t \rangle_\beta \qquad (1.22) $$ +---PAGE_BREAK--- + +and the *Infrared Bound* of [19, 21] + +$$ +\langle \sigma_x \sigma_y \rangle_{\beta_c} \leq \frac{C}{|x-y|^{d-2}}. \tag{1.23} +$$ + +At the heuristic level, the triviality of the scaling limit for $d > 4$ is indicated by the +following dimension counting. Assume that at $\beta_c$ the two-point function is of comparable +values for pairs of sites at similar distances (which is false for $\beta \neq \beta_c$ at distances much +larger than $\xi(\beta)$). Then, for quadruples of points at mutual distances of order $L$, the +sum in the tree diagram bound (1.22) contributes a factor $L^d$ while the summand has two +extra correlation function factors, in comparison to $\langle \sigma_x \sigma_y \sigma_z \sigma_t \rangle_\beta$, each factor dominated by +$1/L^{d-2}$. This suggests that $U_4(x,y,z,t)$ in comparison to the full correlation functions may +be of the order $O(L^{4-d})$, which for $d > 4$ vanishes in the limit $L \to \infty$. Up to numerous +technical details this is the essence of the argument presented in [1, 17]. However, the +above estimate is clearly inconclusive for $d = 4$. + +The key advance presented here is the following improvement of the tree diagram +bound. The multiplicative factor by which it improves (1.22) is derived through a multi +scale analysis which is of relevance at the marginal dimension $d = 4$. + +**Theorem 1.3 (Improved tree diagram bound inequality)** For the n.n.f. Ising model in dimension $d=4$, there exist $c, C > 0$ such that for every $\beta \le \beta_c$, every $L \le \xi(\beta)$ and every $x, y, z, t \in \mathbb{Z}^d$ at a distance larger than $L$ of each other, + +$$ +|U_4^\beta(x, y, z, t)| \leq \frac{C}{B_L(\beta)^c} \sum_{u \in \mathbb{Z}^4} \langle \sigma_u \sigma_x \rangle_\beta \langle \sigma_u \sigma_y \rangle_\beta \langle \sigma_u \sigma_z \rangle_\beta \langle \sigma_u \sigma_t \rangle_\beta, \quad (1.24) +$$ + +where $B_L(\beta)$ is the bubble diagram truncated at a distance $L$ defined by the formula + +$$ +B_L(\beta) := \sum_{x \in \Lambda_L} (\sigma_0 \sigma_x)_\beta^2 . \tag{1.25} +$$ + +For a heuristic insight on the implications of this improvement for $d = 4$, one may consider separately the two following scenarios: the two-point function $\langle \sigma_0 \sigma_x \rangle_\beta$ may be roughly of the order $L^{2-d}$ (meaning that the Infrared Bound is saturated up to constant), or it may be much smaller. In the first case (which is conjectured to hold when $d = 4$), $B_L(\beta)$ is of order $\log L$, so that the improved tree diagram bound indicates that $|U_4|/S_4 = O(\log L)^{-c}$, and thus is asymptotically negligible. In the second case (which is not the one expected to hold), already the unadulterated tree diagram bound (1.22) suffices. + +We derive (1.24) making extensive use of the Ising model's random current representation that is presented in Section 3. It enables combinatorial identities through which the deviations from Wick's law can be expressed in terms of intersection probabilities of the random clusters which link pairwise the specified source points. + +Beyond the four point function, the full statement of the scaling limit’s gaussianity is established here through the following estimate of the characteristic function of smeared averages of spins. + +**Proposition 1.4** There exist $c, C > 0$ such that for the n.n.f. Ising model on $\mathbb{Z}^4$, every $\beta \le \beta_c$, every $L \le \xi(\beta)$, and test function $f \in C_0(\mathbb{R}^4)$, + +$$ +\left| \left\langle \exp[z T_{f,L}(\sigma) - \frac{z^2}{2} \langle T_{f,L}(\sigma)^2 \rangle_{\beta}] \right\rangle_{\beta} - 1 \right| \leq \frac{C \|f\|_{\infty}^4 r_f^{12}}{(\log L)^c} z^4, \quad (1.26) +$$ + +with $\|f\|_{\infty} := \max\{|f(x)| : x \in \mathbb{R}^4\}$ and $r_f$ the diameter of the function's support. +---PAGE_BREAK--- + +The claimed gaussianity follows since (by the Infrared Bound, applied on the left-hand side) for any non-negative continuous function $f \neq 0$ with bounded support, + +$$Cr_f^2 \|f\|_\infty^2 \geq \langle T_{f,L}(\sigma)^2 \rangle_\beta \geq c_f > 0, \quad (1.27)$$ + +uniformly in $\beta \leq \beta_c$ and $L$, we get that for $L \gg 1$ the distribution of $T_{f,L}(\sigma)$ is approximately Gaussian of variance $\langle T_{f,L}(\sigma)^2 \rangle_\beta$. + +**Organization of the proof:** The result proven here is unconditional. However, to better convey the argument's structure, we first establish the claimed result for the scaling limits of critical models ($\beta = \beta_c$) under the auxiliary assumption that the two-point function behaves regularly on all scales, in a sense defined below. We then present an unconditional proof for $\beta \leq \beta_c$ in which we add to the above analysis the proof that the two-point function is regular on a sufficiently large collection of distance scales, up to the correlation length $\xi(\beta)$. + +**Organization of the article:** In the next section, we present the Griffiths-Simon construction of random variables which can be obtained as local aggregates of ferromagnetically coupled Ising spins. It yields a useful link between the $\phi^4$ and Ising variables. Following that, in Section 3 we present the basics of Ising models' random current representation, and the intuition based on random walk intersection probabilities. Section 4 contains a conditional proof of the improved tree diagram bound at criticality, derived under a power-law decay assumption on the two-point function. Next, as a preparation for the unconditional proof, in Section 5 we present some relevant properties of Ising model's two-point function. These estimates are stated and proved in the context of systems of real valued variables with the single-spin distribution in the aforementioned Griffiths-Simon class. Included there are mostly known but also some new results. Section 6 contains the unconditional proof of our main results for the Ising model. Section 7 is devoted to its extension to the Griffiths-Simon class. The appendix contains some auxiliary technical statements that are of independent interest. + +# 2 The Griffiths-Simon class of measures + +The discrete approximations of the $\varphi^4$ functional integral and the Gibbs states of an Ising model are not only analogous, as explained above, but are actually related. + +In one direction one has (1.20) and the implications mentioned next to it. However, in this work we shall make use of another relation, which permits us to apply tools which are initially developed for general Ising models to the study of the $\varphi^4$ functional integral. This relation is based on a construction which was initiated by Griffiths [23], and advanced further by Simon-Griffiths [45]. + +**Definition 2.1** A probability measure on $\rho(d\varphi)$ on $\mathbb{R}$ is said to belong to the Griffiths-Simon (GS) class if either of the following conditions is satisfied + +1) the expectation values with respect to $\rho$ can be presented as + +$$\int F(\varphi) \rho(d\varphi) = \sum_{\sigma \in \{-1,1\}^N} F\left(\alpha \sum_{n=1}^{N} b_n \sigma_n\right) e^{\sum_{n,m=1}^{N} K_{n,m} \sigma_n \sigma_m} / \text{Norm}. \quad (2.1)$$ + +with some $\{b_n\} \subset \mathbb{R}$, and $K_{n,m} \geq 0$. + +2) $\rho$ can be presented as a (weak) limit of probability measures of the above type, and +---PAGE_BREAK--- + +Figure 1: The decorated graph, in which the sites $x \in \Lambda$ of a graph of interest are re- +placed by “blocks” $\mathcal{B}_x$ of sites indexed as $(x, n)$. The Ising “constituent spins” $\sigma_{x,n}$ are +coupled pairwise through intra-block couplings $\delta_{x,y} K_{n,m}$ and inter-block couplings $J_{x,y}$. +The depicted lines indicate a possible realization of the corresponding random current. + +is of sub-gaussian growth: + +$$ +\int e^{|\varphi|^\alpha} \rho(d\varphi) < \infty \quad \text{for some } \alpha > 2. \qquad (2.2) +$$ + +A random variable is said to be of Griffiths-Simon type if its probability distribution is in +the GS class. + +The construction (1) was employed by Griffiths [23] for a proof that the Ising model’s +Lee-Yang property as well as the Griffiths correlation inequalities hold also for a broader +class of similar models with other notable spin variables. Subsequently, Simon and Grif- +fiths [45] pointed out that upon taking weak limits this can be extended to cover alsothe +$\phi^4$ a-priori measures, spelled in (1.12). + +More specifically, a finite collection of the variables {$\varphi_x$}$_{x\in\Lambda}$ with the a-priori measure +$\rho(d\varphi) = e^{-\lambda\varphi^4+b\varphi^2}d\varphi/\text{norm can be produced as the } N\to\infty \text{ limit (in distribution) of the} +$ +collection of the block averages of elemental Ising spins {$\sigma_{x,n}$} (the dots in Fig. 1 ) + +$$ +\varphi_x^{(N)} = \alpha_N(\lambda, b) \sum_{n=1}^{N} \sigma_{x,n} \qquad (2.3) +$$ + +under the “ultra-local” coupling (which is to be added to the intersite interaction $H$ of +(1.12)) + +$$ +H_{\text{inner}} = - \frac{g_N(\lambda, b)}{N} \sum_{x \in \Lambda} \sum_{n,m} \sigma_{x,n} \sigma_{x,m} \quad (2.4) +$$ + +with suitably adjusted $(\alpha_N, g_N)$. Their exact values are not important for our discussion, +but let us note that $H_{\text{inner}}$ is a mean field interaction and thus it is easy to see that for +each $(\lambda, b)$ with $\lambda \neq 0$: $g_N(\lambda, b)$ tends to 1 as $N$ tends to infinity, at a $(\lambda, b)$ dependent +rate. + +In this representation, any system of $\phi^4$ variables associated with the sites of a graph $\mathcal{V}$, and coupled through the graph's edges, is presentable as the limit ($N \to \infty$) of a system of constituent Ising spins associated with the Cartesian graph product $\mathbb{Z}^d \times \mathcal{K}_N$, with $\mathcal{K}_N$ denoting the complete graph of $N$ vertices. +---PAGE_BREAK--- + +# 3 Random current intersection probabilities + +## 3.1 Definition and switching lemma + +Starting with the Ising model, in this section we briefly introduce its random current representation, which allows to express the model’s subtle correlation effects in more tangible stochastic geometric terms. The utility of the random current representation is enhanced by the combinatorial symmetry expressed in its *switching lemma*, which enables to structure some of the essential truncated correlations in terms guided by the analysis of the intersection properties of the traces of random walks. + +**Definition 3.1** A current configuration **n** on Λ is an integer-valued function defined over unordered pairs (x, y) ∈ Λ. The current's set of sources is defined as the set + +$$ \partial \mathbf{n} := \{ x \in \Lambda : (-1)^{\sum_{y \in \Lambda} \mathbf{n}(x,y)} = -1 \}. \quad (3.1) $$ + +For a given Ising model on $\Lambda$, we associate to a current configuration the weight + +$$ w(\mathbf{n}) = w_{\Lambda, J, \beta}(\mathbf{n}) := \prod_{\{x,y\} \subset V} \frac{(\beta J_{x,y})^{\mathbf{n}(x,y)}}{\mathbf{n}(x,y)!}. \quad (3.2) $$ + +Starting from Taylor's expansion + +$$ \exp(\beta J_{x,y} \sigma_x \sigma_y) = \sum_{\mathbf{n}(x,y) \ge 0} \frac{(\beta J_{x,y} \sigma_x \sigma_y)^{\mathbf{n}(x,y)}}{\mathbf{n}(x,y)!}, \quad (3.3) $$ + +one can see that the Ising model’s partition function (defined below (1.17)) can be expressed in terms of the corresponding random current: + +$$ Z(\Lambda, \beta) = 2^{|\Lambda|} \sum_{\mathbf{n}:\partial\mathbf{n}=\emptyset} w(\mathbf{n}). \quad (3.4) $$ + +Furthermore, the spin-spin correlation functions can be represented as + +$$ \langle \prod_{x \in A} \sigma_x \rangle_{\Lambda, \beta} = \frac{\sum_{\mathbf{n}: \partial\mathbf{n}=A} w(\mathbf{n})}{\sum_{\mathbf{n}: \partial\mathbf{n}=\emptyset} w(\mathbf{n})}. \quad (3.5) $$ + +At this point, it helps to note that any configuration with $\partial\mathbf{n} = \emptyset$, i.e. without sources, can be viewed as the edge count of a multigraph which is decomposable into a union of loops. In contrast, any configuration with $\partial\mathbf{n} = A$, such as the one appearing in the numerator of (3.5), can be viewed as describing the edge count of a multigraph which is decomposable into a collection of loops and of paths connecting pairwise the sources, i.e. sites of $A$. In particular, a configuration with $\partial\mathbf{n} = \{u, v\}$ can be viewed as giving the “flux numbers” of a family of loops together with a path from $u$ to $v$. Thus, the random current representation allows to present the spin-spin correlation as the effect on the partition function of a loop system with the addition of a path linking the two sources. In these terms, the spin-spin correlation $\langle \sigma_{x_1} \cdots \sigma_{x_{2n}} \rangle_\beta$ represents the sum of the multiplicative effect of the introduction of $n$ paths pairing the sources. + +Connectivity properties of currents play a significant role in our analysis. To express those we shall employ the following terminology and notation. +---PAGE_BREAK--- + +**Definition 3.2** i) We say that $x$ is connected to $y$ (in $\mathbf{n}$), and denote the event by $x \xrightarrow{\mathbf{n}} y$, if there exists a path of vertices $x = u_0, u_1, \dots, u_k = y$ with $\mathbf{n}(u_i, u_{i+1}) > 0$ for every $0 \le i < k$. We say that $x$ is connected to a set $S$ if it is connected to a vertex in $S$. + +ii) The cluster of $x$, denoted by $C_n(x)$, is the set of vertices connected to $x$ in $\mathbf{n}$. + +iii) For a set of vertices $B$, we denote by $\mathcal{F}_B$ the set of $\mathbf{n}$ satisfying that there exists a sub-current $\mathbf{m} \le \mathbf{n}$ such that $\partial\mathbf{m} = B$. + +Some of the most powerful properties of the random current representation are best seen when considering pairs of random currents and using the following lemma. + +**Lemma 3.3 (Switching lemma)** For any $A, B \subset \Lambda$ and any function $F$ from the set of currents into $\mathbb{R}$, + +$$ \sum_{\substack{\mathbf{n}_1:\partial\mathbf{n}_1=A \\ \mathbf{n}_2:\partial\mathbf{n}_2=B}} F(\mathbf{n}_1+\mathbf{n}_2)w(\mathbf{n}_1)w(\mathbf{n}_2) = \sum_{\substack{\mathbf{n}_1:\partial\mathbf{n}_1=A\Delta B \\ \mathbf{n}_2:\partial\mathbf{n}_2=\emptyset}} F(\mathbf{n}_1+\mathbf{n}_2)w(\mathbf{n}_1)w(\mathbf{n}_2)\mathbf{1}_{\mathbf{n}_1+\mathbf{n}_2 \in \mathcal{F}_B}. \quad (3.6) $$ + +where $A\Delta B$ denotes the symmetric difference of sets, $A\Delta B := (A \setminus B) \cup (B \setminus A)$. + +The switching lemma appeared as a combinatorial identity in Griffiths-Hurst-Sherman's derivation of the GHS inequality [24]. Its greater potential for the geometrization of the correlation functions was developed in [1], and works which followed. In this paper, we employ two generalizations of this useful identity. In the first, the two currents **n**₁ and **n**₂ need not be defined on the same graph (see [4, Lemma 2.2] for details). The second will involve a slightly more general switching statement, which was used in several occasions in the past (cf. [5, Lemma 2.1] and reference therein). + +It should be recognized that other stochastic geometric representations of spin correlations and/or interactions can be found (e.g. the Symanzik representation of the $\phi^4$ action [48], and the BFS random walk representation of the correlation functions [11]). It is conceivable that the overall strategy could be applied also through other means. However we find the random current representation particularly useful for our purpose. + +## 3.2 Representation of Ursell's four-point function + +The switching lemma enables one to rewrite spin-spin correlation ratios in terms of probabilities of events expressed in terms of the random currents. The first of these is the relation + +$$ \frac{\langle \sigma_A \rangle_{\Lambda, \beta} \langle \sigma_B \rangle_{\Lambda, \beta}}{\langle \sigma_A \sigma_B \rangle_{\Lambda, \beta}} := \mathbf{P}_{\Lambda, \beta}^{A\Delta B, \emptyset} [\mathbf{n}_1 + \mathbf{n}_2 \in \mathcal{F}_B], \qquad (3.7) $$ + +for which we denote by $\mathbf{P}_{\Lambda,\beta}^A (\mathbf{n})$ the probability distribution on random currents constrained by the source condition $\partial\mathbf{n} = A$, or more explicitly + +$$ \mathbf{P}_{\Lambda, \beta}^{A}(\mathbf{n}) := \frac{2^{|\Lambda|} w(\mathbf{n})}{\langle \prod_{x \in A} \sigma_x \rangle_{\Lambda, \beta} Z(\Lambda, \beta)} \mathbb{I}[\partial\mathbf{n} = A], \quad (3.8) $$ + +and by $\mathbf{P}_{\Lambda,\beta}^{A_1, \dots, A_i}$ we denote the law of an independent family of currents $(\mathbf{n}_1, \dots, \mathbf{n}_i)$ + +$$ \mathbf{P}_{\Lambda, \beta}^{A_1, \dots, A_i} := \mathbf{P}_{\Lambda, \beta}^{A_1} \otimes \cdots \otimes \mathbf{P}_{\Lambda, \beta}^{A_i}. \quad (3.9) $$ + +For two-point sets we may write $A = xy$ instead of $\{x,y\}$. +---PAGE_BREAK--- + +As we will also work with the infinite volume Gibbs measures, let us note that random currents and the switching lemma admit a generalization to infinite volume³. Existing continuity results [4] permit to extend (3.7) to the infinite volume, expressed in terms of the weak limits of the random current measures $\mathbf{P}_{\Lambda_n, \beta}^A$ and $\mathbf{P}_{\Lambda_n, \beta}^{A_1, \dots, A_i}$, in the limit $\Lambda_n \nearrow \mathbb{Z}^d$. The limiting statement is similar to (3.7) but without the finite volume subscript $\Lambda$: + +$$ \frac{\langle \sigma_A \rangle_\beta \langle \sigma_B \rangle_\beta}{\langle \sigma_A \sigma_B \rangle_\beta} = \mathbf{P}_\beta^{A\Delta B, \emptyset}[\mathbf{n}_1 + \mathbf{n}_2 \in \mathcal{F}_B]. \quad (3.10) $$ + +Combining (3.10) for the different values of the product of spin-spin correlations leads to + +$$ U_4^\beta(x, y, z, t) = -2 \langle \sigma_x \sigma_y \rangle_\beta \langle \sigma_z \sigma_t \rangle_\beta \mathbf{P}_\beta^{xy,zt} [\mathbf{C}_{n_1+n_2}(x) \cap \mathbf{C}_{n_1+n_2}(z) \neq \emptyset]. \quad (3.11) $$ + +This equality is of fundamental importance to the question discussed here. It was the basis of the analysis of [1], and is the starting point for our discussion. + +By (3.11), the relative magnitude of the deviation of the four-point function $\langle \sigma_x \sigma_y \sigma_z \sigma_t \rangle_\beta$ from the Gaussian law (i.e. the discrepancy in Wick's formula) is bounded in terms of intersection properties of the two clusters that link the indicated sources pairwise: + +$$ \frac{|U_4^\beta(x, y, z, t)|}{\langle \sigma_x \sigma_y \sigma_z \sigma_t \rangle_\beta} \le 2 \mathbf{P}_\beta^{xy,zt} [\mathbf{C}_{n_1+n_2}(x) \cap \mathbf{C}_{n_1+n_2}(z) \neq \emptyset]. \quad (3.12) $$ + +The random sets $\mathbf{C}_{n_1+n_2}(x)$ and $\mathbf{C}_{n_1+n_2}(z)$ are not independently distributed. However (3.12) can be further simplified through a monotonicity property of random currents. As proved in [1], and recalled here in the Appendix, the probability of an intersection can only increase upon the two sets' replacement by a pair of independently distributed clusters defined through the addition of two sourceless currents: + +$$ \mathbf{P}_\beta^{xy,zt} [\mathbf{C}_{n_1+n_2}(x) \cap \mathbf{C}_{n_1+n_2}(z) \neq \emptyset] \leq \mathbf{P}_\beta^{xy,zt,\emptyset,\emptyset} [\mathbf{C}_{n_1+n_3}(x) \cap \mathbf{C}_{n_2+n_4}(z) \neq \emptyset]. \quad (3.13) $$ + +This leads to the simpler upper bound in which the two random sets are independent: + +$$ |U_4^\beta(x, y, z, t)| \le 2 \langle \sigma_x \sigma_y \rangle_\beta \langle \sigma_z \sigma_t \rangle_\beta \mathbf{P}_\beta^{xy,zt,\emptyset,\emptyset} [\mathbf{C}_{n_1+n_3}(x) \cap \mathbf{C}_{n_2+n_4}(z) \neq \emptyset]. \quad (3.14) $$ + +Bounding the intersection probability by the expected number of intersection sites and applying the switching lemma leads directly to the tree diagram bound (1.22). However, as was explained above, to tackle the marginal dimension $d=4$ one needs to improve on that. + +While $\mathbf{C}_{n_1+n_3}(x)$ and $\mathbf{C}_{n_2+n_4}(z)$ are bulkier and exhibit less independence than simple random walks linking the sources $\{x,y\}$ and $\{z,t\}$, the analogy is of help in guiding the intuition towards useful estimate strategies. In particular, it is classical that in dimension $d=4$ the probability that the traces of two random walks starting at distance $L$ of each other intersect, tends to 0 (as $1/\log L$, see [2, (2.8)] and [34]), but nevertheless the expected number of points of intersection remains of order $\Omega(1)$. The discrepancy is explained by the fact that although the intersections occur rarely, the conditional expectation of the number of intersection sites, conditioned on there being at least one, diverges logarithmically in $L$. The thrust of our analysis will be to establish similar behaviour in the system considered here. More explicitly, we will prove that the conditional expectation of the clusters' intersection size, conditioned on it being non-empty, grows at least as $(\log L)^c$. + +The analysis of clusters’ intersection properties is more difficult than that of the paths of simple random walks for at least two reasons: + +³The extension of the switching lemma to $\mathbb{Z}^d$ is straightforward for $\beta \le \beta_c$ since then $\mathbf{n}_1 + \mathbf{n}_2$ does not contain infinite paths of positive currents, almost surely under $\mathbf{P}_\beta^{A,B}$. For $\beta < \beta_c$ this is implied by the discussion of [1] for $\beta < \beta_c$, and for $\beta = \beta_c$ it follows from the continuity result of [4] for $\beta = \beta_c$. +---PAGE_BREAK--- + +* Missing information on the two-point function: Most analyses of intersection properties of random walks involve estimates on the Green function. In our system its role is to some extent taken by the two-point spin-spin correlation function. However, unlike the former case we do not a priori know the two-point function's exact order of magnitude (though a good one-sided inequality is provided by the Infrared Bound). This raises a difficulty that we address by studying the regularity properties of the two-point function in Section 5. + +* The lack of a simple Markov property: in one way or another, the analysis of intersections for random walks involves the random walk's Markov property. Among its other applications, the walk's renewal property facilitates de-correlating the walks' behaviour at different places. In comparison, the random current clusters exhibit only a multidimensional domain Markov property. One of the main contributions of this paper will be to show a mixing property of random currents which will enable us to bypass the difficulty raised by the lack of a renewal property. + +We expect that both the regularity estimates and the mixing properties established here are of independent interest, and may be of help in studies of the model also in three dimensions. + +# 4 A conditional improvement of the tree diagram bound for + +$$ \beta = \beta_c $$ + +To better convey the strategy by which the tree diagram bound is improved, we start with a conditional proof of (1.24) for the Ising model on $\mathbb{Z}^4$ at criticality (i.e. when $\beta = \beta_c$), under the following assumption on the model’s two-point function. The removal of this assumption will raise substantial problems which are presented in the sections that follow. Below, $|·|$ denotes the infinity-norm + +$$ |x| := \max\{|x_i|, 1 \le i \le d\}. \quad (4.1) $$ + +**Assumption 4.1 (Power-law decay)** There exist $\eta$ and $c, C \in (0, \infty)$ such that for every $x \in \mathbb{Z}^d$, + +$$ \frac{c}{|x|^{d-2+\eta}} \le \langle \sigma_0 \sigma_x \rangle_{\beta_c} \le \frac{C}{|x|^{d-2+\eta}}. \quad (4.2) $$ + +The Infrared Bound (5.37) guarantees that $\eta \ge 0$ in any dimension $d > 2$. Note that if $\eta > 0$ for $d = 4$, then $B_L(\beta_c)$ is bounded uniformly in $L$ in which case the tree diagram bound implies the improved one. Thus, under this assumption the case requiring attention is just $\eta = 0$ (which is the generally expected value). + +## 4.1 Intersection clusters + +Our starting point is (3.14) in which $U_4^{\beta_c}$ is bounded by the probability of intersection of two independently distributed clusters $C_{n_1+n_3}(x)$ and $C_{n_2+n_4}(z)$, of which $\mathbf{n}_1$ and $\mathbf{n}_2$ include paths linking pairwise widely separated sources, $\partial \mathbf{n}_1 = \{x,y\}$ and $\partial \mathbf{n}_2 = \{z,t\}$. Introduce the notation + +$$ \mathcal{T} := C_{n_1+n_3}(x) \cap C_{n_2+n_4}(z), \quad (4.3) $$ +---PAGE_BREAK--- + +and let $|\mathcal{T}|$ be the set's cardinality. The tree diagram bound corresponds to the first moment estimate: + +$$ \mathbf{P}_{\beta_c}^{xy,zt,\emptyset,\emptyset} [|\mathcal{T}| > 0] \leq \mathbf{E}_{\beta_c}^{xy,zt,\emptyset,\emptyset} [|\mathcal{T}|], \quad (4.4) $$ + +in which the intersection probability is bounded by the intersection set's expected size. + +Although the set $\mathcal{T}$ is less tractable than the intersection of a pair of Markovian random walks, their intuitive example provides a useful guide. The intersection of the traces of two simple random walks in dimension $d = 4$ has a Cantor-set like structure. Guided by this analogy, and taking advantage of the switching lemma, we show that conditioned on the event that $u$ belongs to $\mathcal{T}$, the intersection $|\mathcal{T}|$ is typically very large. This is in line with our expectation that the vertices in the intersection set occur in large (disconnected) clusters, causing the expected size of $|\mathcal{T}|$ to be much larger than the probability of it being non-zero. + +Below and in the rest of this article, we introduce the annulus of sizes $k \le n$ and the boundary of a box as follows: + +$$ \mathrm{Ann}(k,n) := \Lambda_n \times \Lambda_{k-1} \quad \text{and} \quad \partial\Lambda_n := \mathrm{Ann}(n,n) \tag{4.5} $$ + +(cf. Fig. 2). + +In the proof, we apply the following deterministic covering lemma, which links the number of points in a set $\mathcal{X} \subset \mathbb{Z}^d$ with the number of concentric annuli of the form $u + \mathrm{Ann}(\ell_k, \ell_{k+1})$, with $u \in \mathcal{X}$, which it takes to cover $\mathcal{X}$. To state it we denote, for any (possibly finite) increasing sequence of lengths $\mathcal{L} = (\ell_k)$, every $u \in \mathbb{Z}^d$, and every integer $K$, + +$$ \mathbf{M}_u(\mathcal{X}; \mathcal{L}, K) = \mathrm{card}\{k \le K : \mathcal{X} \cap [u + \mathrm{Ann}(\ell_k, \ell_{k+1})] \ne \emptyset\} \tag{4.6} $$ + +(cf. Fig. 2). + +**Lemma 4.2** (Annular covering) In the above notation, for any sequence $\mathcal{L} = (\ell_k)$ with $\ell_1 \ge 1$ and $\ell_{k+1} \ge 2\ell_k$ + +$$ |\mathcal{X}| \ge 2^{\min\{\mathbf{M}_u(\mathcal{X};\mathcal{L},K)/5: u \in \mathcal{X}\}}. \tag{4.7} $$ + +**Proof** It suffices to show that if $|\mathcal{X}| < 2^r$ for some $r$, then there exists a site $u \in \mathcal{X}$ for which $\mathbf{M}_u(\mathcal{X};\mathcal{L},K) < 5r$. + +We prove the following stronger statement: For every set $\mathcal{X}$ containing the origin and every $K$, if $|\mathcal{X} \cap \Lambda_{\ell_K}| < 2^r$, then there exists $u \in \mathcal{X} \cap \Lambda_{\ell_K}$ with $M_u(\mathcal{X};\mathcal{L},K) < 5r$. + +The assertion is obviously true for $r=1$ as one can pick $u$ to be the origin. Next, consider the case of $r > 1$ assuming the statement holds for all smaller values. If the intersection of $\mathcal{X}$ and $\Lambda_{\ell_{K-1}}$ is reduced to the origin, then $M_0(\mathcal{X};\mathcal{L},K) \le 2$ (only the annuli $\mathrm{Ann}(\ell_l, \ell_{l+1})$ with $l$ equal to $K-1$ or $K$ can intersect $\mathcal{X}$) as required so we now assume that this is not the case. Consider $0 \le k \le K-2$ maximal such that there exists $u \in \mathcal{X}$ with $\ell_k < |u| \le \ell_{k+1}$. + +Since $\mathcal{X} \cap \Lambda_{\ell_{k-1}}$ and $\mathcal{X} \cap (u + \Lambda_{\ell_{k-1}})$ are disjoint (we use that $\ell_k \ge 2\ell_{k-1}$), one of the two sets has cardinality strictly smaller than $2^{r-1}$. Assume first that it is $\mathcal{X} \cap \Lambda_{\ell_{k-1}}$. The induction hypothesis implies the existence of $v \in \mathcal{X} \cap \Lambda_{\ell_{k-1}}$ such that + +$$ \mathbf{M}_v(\mathcal{X};\mathcal{L},k-1) < 5(r-1). \tag{4.8} $$ + +By our choice of $k$, every site in $\mathcal{X}$ is either in $\Lambda_{\ell_{k+1}}$ or outside of $\Lambda_{\ell_{K-1}}$. This implies that only the annuli $\mathrm{Ann}(\ell_l, \ell_{l+1})$ with $l$ equal to $k, k+1, K-2, K-1$ or $K$ can intersect $\mathcal{X}$, so that + +$$ \mathbf{M}_v(\mathcal{X};\mathcal{L},K) \leq \mathbf{M}_v(\mathcal{X};\mathcal{L},k-1) + 5 < 5r. \tag{4.9} $$ +---PAGE_BREAK--- + +Figure 2: The two (duplicated)-currents $\mathbf{n}_1+\mathbf{n}_3$ and $\mathbf{n}_2+\mathbf{n}_4$ in blue and black respectively. The clusters of $x$ (or equivalently $y$) in $\mathbf{n}_1+\mathbf{n}_3$ and $z$ (or equivalently $t$) in $\mathbf{n}_2+\mathbf{n}_4$ are depicted in bold. The red vertices are the elements of the intersection $\mathcal{T}$. We illustrated the annuli around one element, denoted $u$, of $\mathcal{T}$ and draw them in gray when an intersection occurs. Here, we therefore have $M_u(\mathcal{T}; \mathcal{L}, 5) = 3$ since three annuli contain an intersection. + +If it is $\mathcal{X} \cap (u + \Lambda_{\ell_{k-1}})$ which has small cardinality, simply translate the set by $u$ and apply +the same reasoning. The distance between the vertex $v$ obtained by the procedure and 0 +is at most $\ell_{k-1} + \ell_k \le \ell_K$, so that the claim follows in this case as well. □ + +In the following conditional statement, we denote by $\mathcal{L}\alpha$ a sequence of integers defined +recursively so that $\ell_{k+1} = \ell_k^\alpha$ with a specified $\alpha > 1$ and $\ell_0$ a large enough integer. + +**Proposition 4.3 (Conditional intersection-clustering bound)** *Under the assumption that the Ising model on $\mathbb{Z}^4$ satisfies (4.2) with $\eta = 0$ and restricting to $\alpha > 3^8$: there exist $\ell_0 = \ell_0(\alpha)$ and $\delta = \delta(\alpha) > 0$ such that for every $K > 2$ and every $u, x, y, z, t \in \mathbb{Z}^4$ with mutual distance between $x, y, z, t$ larger than $2\ell_K$,* + +$$ +\mathbf{P}_{\beta_c}^{ux,uz,uy,ut}[\mathbf{M}_u(\mathcal{T}; \mathcal{L}_\alpha, K) < \delta K] \le 2^{-\delta K}. \quad (4.10) +$$ + +Before deriving this estimate, which is proven in the next section, let us show how it +leads to the improved tree diagram bound. + +**Proof of Theorem 1.3 under the assumption** (4.2). As the discussion is limited here to $\beta = \beta_c$, we omit it from the notation. If $\eta > 0$ the bubble diagram is finite and hence the desired statement is already contained in the tree diagram bound (1.22). Focus then on the case $\eta = 0$, for which the bubble diagram diverges logarithmically. Fix $\alpha > 3^8$ and let $\ell_0$ and $\delta$ be given by Proposition 4.3. Since $x, y, z, t$ are at mutual distances at least $L$, there exists $c = c(\alpha) > 0$ such that one may pick + +$$ +K = K(L) \geq c \log \log L \tag{4.11} +$$ + +in such a way that $L \ge 2\ell_K$. +---PAGE_BREAK--- + +Using Lemma 4.2, then the switching lemma, and finally Proposition 4.3, we get + +$$ +\begin{align*} +\mathbf{P}^{xy,zt,\emptyset,\emptyset}[0 < |\mathcal{T}| < 2^{\delta K/5}] & \le \sum_{u \in \mathbb{Z}^4} \mathbf{P}^{xy,zt,\emptyset,\emptyset}[u \in \mathcal{T}, \mathbf{M}_u(\mathcal{T}; \mathcal{L}_\alpha, K) < \delta K] \\ +& = \sum_{u \in \mathbb{Z}^4} \frac{\langle \sigma_u \sigma_x \rangle \langle \sigma_u \sigma_y \rangle \langle \sigma_u \sigma_z \rangle \langle \sigma_u \sigma_t \rangle}{\langle \sigma_x \sigma_y \rangle \langle \sigma_z \sigma_t \rangle} \mathbf{P}^{ux,uz,uy,ut}[\mathbf{M}_u(\mathcal{T}; \mathcal{L}_\alpha, K) < \delta K] \\ +& \le 2^{-\delta K} \sum_{u \in \mathbb{Z}^4} \frac{\langle \sigma_u \sigma_x \rangle \langle \sigma_u \sigma_y \rangle \langle \sigma_u \sigma_z \rangle \langle \sigma_u \sigma_t \rangle}{\langle \sigma_x \sigma_y \rangle \langle \sigma_z \sigma_t \rangle}. \tag{4.12} +\end{align*} +$$ + +For the larger values of $|\mathcal{T}|$, the Markov inequality and the switching lemma give + +$$ +\begin{align} +\mathbf{P}^{xy,zt,\emptyset,\emptyset}[|\mathcal{T}| \ge 2^{\delta K/5}] &\le 2^{-\delta K/5} \mathbf{E}^{xy,zt,\emptyset,\emptyset}[|\mathcal{T}|] \\ +&= 2^{-\delta K/5} \sum_{u \in \mathbb{Z}^4} \frac{\langle \sigma_u \sigma_x \rangle \langle \sigma_u \sigma_y \rangle \langle \sigma_u \sigma_z \rangle \langle \sigma_u \sigma_t \rangle}{\langle \sigma_x \sigma_y \rangle \langle \sigma_z \sigma_t \rangle}. \tag{4.13} +\end{align} +$$ + +Adding (4.12) and (4.13) gives an improved tree diagram bound which, in view of (4.11) +and of the logarithmic divergence of $B_L(\beta_c)$ implied by $\eta = 0$, yields (1.24). $\square$ + +## 4.2 Derivation of the conditional intersection-clustering bound (Proposition 4.3) + +The intuition underlying the conditional intersection-clustering bound and the choice of $\ell_k$ is guided by the aforementioned example of simple random walks. In dimension 4, the traces of two independent random walks starting at the origin intersect in an annulus of the form $\text{Ann}(n, n^\alpha)$ with probability at least $c(\alpha) > 0$ uniformly in $n$. Since the paths traced by these random walks within different annuli are roughly independent, one may expect the number of annuli among the $K$ first ones in which the paths intersect to be, with large probability, of the order of $\delta K$. + +However, in the case considered here, the clusters of $u$ in $\mathbf{n}_1 + \mathbf{n}_3$ and $\mathbf{n}_2 + \mathbf{n}_4$ do not have the renewal structure of Markovian random walks. We shall compensate for that in two steps: + +(i) *reformulate the intersection property*, + +(ii) *derive an asymptotic mixing statement*. + +For the first step, let $I_k$ be the event (with $I$ standing for intersection) that there exist unique clusters of $\text{Ann}(\ell_k, \ell_{k+1})$ in $\mathbf{n}_1 + \mathbf{n}_3$ and $\mathbf{n}_2 + \mathbf{n}_4$ crossing the annulus from the inner boundary to the outer boundary and that these two clusters are intersecting. Lemma 4.4 presents the statement that the probability that the event occurs and that these clusters intersect, is bounded away from 0 uniformly in $k$. + +Note that the annuli $\text{Ann}(\ell_k, \ell_{k+1})$ are wide enough so that sourceless currents will typically have no radial crossing, and when such crossings are forced by the placement of sources (for instance when one source, is at the common center of a family of nested annuli and the other at a distant site outside), in each annulus there will most likely be only one crossing cluster. It then follows that all the crossing clusters of $\mathbf{n}_1 + \mathbf{n}_3$ belong to the $\mathbf{n}_1 + \mathbf{n}_3$ cluster of the sources, and a similar property holds for the crossing clusters of $\mathbf{n}_2 + \mathbf{n}_4$. + +For the second step, we prove that events observed within sufficiently separated annuli are roughly independent. The exact assertion is presented below in Proposition 4.6 and will be the crux of the whole paper. + +Following is the first of these two statements. +---PAGE_BREAK--- + +**Lemma 4.4 (Conditional intersection-clustering property)** Assume (4.2) holds for the Ising model on $\mathbb{Z}^4$ with $\eta = 0$. For $\alpha > 3/4$, there exist $\ell_0 = \ell_0(\alpha)$ and $c = c(\alpha, \ell_0) > 0$ such that for every $x, z \notin \Lambda_{2\ell_{k+1}}$, + +$$ +\mathbf{P}_{\beta_c}^{0x,0z,\emptyset,\emptyset}[I_k] \geq c. \tag{4.14} +$$ + +The main ingredient in the proof is a second moment method on the number of inter- +sections in $\text{Ann}(\ell_k, \ell_{k+1})$ of the clusters of the origin in $\mathbf{n}_1 + \mathbf{n}_3$ and $\mathbf{n}_2 + \mathbf{n}_4$. A second part +of the proof is devoted to the uniqueness of the clusters crossing the annulus. This makes +the event under consideration measurable in terms of the currents within just the specified +annulus, allowing us to apply the mixing property for the proof of Proposition 4.3, which +follows further below. + +**Proof** Drop $\beta_c$ from the notation. Fix $\alpha > 3^4$ and set $\varepsilon > 0$ so that $\alpha > (1+\varepsilon)(3+\varepsilon)^4$. The constants $c_i$ below depend on $\varepsilon$ only. Introduce the intermediary integers $n \le m \le M \le N$ satisfying + +$$ +n \ge \ell_k^{3+\epsilon}, \quad m \ge n^{3+\epsilon}, \quad M \ge m^{1+\epsilon}, \quad N \ge M^{3+\epsilon}, \quad \ell_{k+1} \ge N^{3+\epsilon}. \tag{4.15} +$$ + +We start by proving that $\mathcal{M} := \mathbf{C}_{n_1+n_3}(0) \cap \mathbf{C}_{n_2+n_4}(0) \cap \text{Ann}(m, M)$ is non-empty with positive probability by applying a second-moment method on $|\mathcal{M}|$. Namely, the switching lemma (more precisely (A.10)) and (4.2) imply that + +$$ +\begin{align*} +\mathbf{E}^{0x,0z,\emptyset,\emptyset}[|\mathcal{M}|] &= \sum_{v \in \text{Ann}(m,M)} \mathbf{P}^{0x,\emptyset}[v \underset{n_1+n_2}{\stackrel{n_1+n_2}{\rightleftarrows}} 0] \mathbf{P}^{0z,\emptyset}[v \underset{n_1+n_2}{\stackrel{n_1+n_2}{\rightleftarrows}} 0] \\ +&= \sum_{v \in \text{Ann}(m,M)} \frac{\langle \sigma_0 \sigma_v \rangle \langle \sigma_v \sigma_x \rangle}{\langle \sigma_0 \sigma_x \rangle} \frac{\langle \sigma_0 \sigma_v \rangle \langle \sigma_v \sigma_z \rangle}{\langle \sigma_0 \sigma_z \rangle} \\ +&\ge c_1 (B_M - B_{m-1}) \ge c_2 \log(M/m). \tag{4.16} +\end{align*} +$$ + +On the other hand, we find that + +$$ +\mathbf{E}^{0x,0z,\emptyset,\emptyset}[|\mathcal{M}|^2] = \sum_{v,w \in \text{Ann}(m,M)} \mathbf{P}^{0x,\emptyset}[v,w \stackrel{\mathbf{n}_1+\mathbf{n}_2}{\longleftrightarrow} 0] \mathbf{P}^{0z,\emptyset}[v,w \stackrel{\mathbf{n}_1+\mathbf{n}_2}{\longleftrightarrow} 0]. \quad (4.17) +$$ + +Now, by a delicate application of the switching lemma and a monotonicity argument we +have the following inequality (stated and proven as Proposition A.3 in the Appendix), + +$$ +\mathbf{P}^{0x,\emptyset}[v, w \xleftarrow{\hspace{2em}} 0] \leq \frac{\langle\sigma_0\sigma_v\rangle\langle\sigma_v\sigma_w\rangle\langle\sigma_w\sigma_x\rangle}{\langle\sigma_0\sigma_x\rangle} + \frac{\langle\sigma_0\sigma_w\rangle\langle\sigma_w\sigma_v\rangle\langle\sigma_v\sigma_x\rangle}{\langle\sigma_0\sigma_x\rangle}. \quad (4.18) +$$ + +Together with (4.2), this gives + +$$ +\mathbf{E}^{0x,0z,\emptyset,\emptyset}[|\mathcal{M}|^2] \le C_3(B_M - B_{m-1}) B_{2M} \le C_4(\log M)^2. \quad (4.19) +$$ + +The second moment (or Cauchy-Schwarz) inequality, and the bound $M \ge m^{1+\epsilon}$ thus imply + +$$ +\mathbf{P}^{0x,0z,\emptyset,\emptyset}[M \neq 1] \geq \frac{\mathbf{E}^{0x,0z,\emptyset,[|M|]^2}}{\mathbf{E}^{0x,0z,\emptyset,[|M|^2]}} \geq c_5 > 0. \quad (4.20) +$$ + +At this stage, one may feel that the main point of the lemma was established: we showed +that with uniformly positive probability the clusters of 0 in **n**₁ + **n**₃ and **n**₂ + **n**₄ intersect +in Ann(*m*, *M*). However, to conclude the argument we need to establish the uniqueness, +with large probability, of the crossing cluster in **n**₁ + **n**₃ (the same then holds true for +---PAGE_BREAK--- + +**n**₂ + **n**₄). This part of the proof is slightly more technical and may be omitted in a first reading. It is here that we shall need α to be large enough. + +To prove the uniqueness of crossings, we employ the notion of the current's *backbone*⁴, on which more can be found in [1, 3, 12, 13, 15]. If the event {M ≠ Ø} occurs but not Iₖ, then one of the following four events must occur (see e.g. Fig. 3): + +$F_1 := \text{the backbone } \Gamma(\mathbf{n}_1) \text{ of } \mathbf{n}_1 \text{ does two successive crossings of } \mathrm{Ann}(\ell_k, n);$ + +$F_2 := \mathbf{n}_1 + \mathbf{n}_2 \text{ contains a cluster crossing } \mathrm{Ann}(n, m) \setminus \Gamma(\mathbf{n}_1);$ + +$F_3 := \mathbf{n}_1 + \mathbf{n}_2 \text{ contains a cluster crossing } \mathrm{Ann}(M, N) \setminus \Gamma(\mathbf{n}_1);$ + +$F_4 := \text{the backbone } \Gamma(\mathbf{n}_1) \text{ of } \mathbf{n}_1 \text{ does two successive crossings of } \mathrm{Ann}(N, \ell_{k+1}).$ + +We bound the probabilities of these events separately. For $F_1$ to occur, the backbone $\Gamma(\mathbf{n}_1)$ must do a zigzag: to go from 0 to a vertex $v \in \partial\Lambda_n$, then to a vertex $w \in \partial\Lambda_{\ell_k}$, and finally to $x$. The *chain rule* for backbones (see e.g. [3]) combined with the assumed condition (4.2), jointly imply that + +$$ +\mathbf{P}^{0x,\emptyset}[F_1] \leq \sum_{\substack{v \in \partial\Lambda_n \\ w \in \partial\Lambda_{\ell_k}}} \frac{\langle \sigma_0 \sigma_v \rangle \langle \sigma_v \sigma_w \rangle \langle \sigma_w \sigma_x \rangle}{\langle \sigma_0 \sigma_x \rangle} \leq C_6 n^3 \ell_k^3 n^{-4} \leq C_7 \ell_k^{-\epsilon}. \quad (4.21) +$$ + +To bound the probability of $F_2$, condition on $\Gamma(\mathbf{n}_1)$. The remaining current in $\mathbf{n}_1$ is a sourceless current with depleted coupling constants (see [3, 12, 13] for details on this type of reasoning). The probability that some $v \in \partial\Lambda_n$ and $w \in \partial\Lambda_m$ are connected in $Z^4 \setminus \Gamma(\mathbf{n}_1)$ to each other can then be bounded by $\langle\sigma_v\sigma_w\rangle\langle\sigma_v\sigma_w\rangle'$ where the $\langle\cdot\rangle'$ denotes an Ising measure with depleted coupling constants (the depletion depends on $\Gamma(\mathbf{n}_1)$ and the switching lemma concerns one current with depletion and one without; we refer to [4] for the statement and proof of the switching lemma in this context, and some applications). At the risk of repeating ourselves, we refer to [3] for an illustration of this line of reasoning. The Griffiths inequality [22] implies that this probability is bounded by $\langle\sigma_v\sigma_w\rangle^2$, which together with (4.2), immediately leads to the following sequence of inequalities: + +$$ +\mathbf{P}^{0x,\emptyset}[F_2] \leq \sum_{\substack{v \in \partial\Lambda_n \\ w \in \partial\Lambda_m}} (\sigma_v \sigma_w)^2 \leq C_8 n^{-\epsilon}. \tag{4.22} +$$ + +The event $F_4$ is bounded similarly to $F_1$, and $F_3$ similarly to $F_2$. For $\ell_0 = \ell_0(\epsilon)$ large enough the sum of the four probabilities does not exceed half of the constant $c_5$ in (4.20), and the main statement follows. $\square$ + +**Remark 4.5** The condition $\alpha > 3^4$ is used in the second part of the proof, where we need +the exponent connecting the inner and outer radii of annuli to be strictly larger than 3. +We did not try to improve on this exponent. + +The second of the above described statements is one of the main innovations of this +paper. It concerns a mixing property, which in Section 6.1 will be stated under a more +general form and derived unconditionally for every $d \ge 4$. + +⁴We mentioned that a current **n** with sources x and y can be seen as the superposition of one path from x to y and loops. The backbone Γ(**n**) is an appropriate choice of such a path induced by an ordering of the edges. Again, we refrain ourselves from providing more details here and refer to the relevant literature for details on this notion. +---PAGE_BREAK--- + +Figure 3: In this picture, $\Gamma(\mathbf{n}_1)$ does only one crossing of $\text{Ann}(\ell_k, n)$, and $\mathbf{n}_1 + \mathbf{n}_2 - \Gamma(\mathbf{n}_1)$ does not cross $\text{Ann}(n, m) \setminus \Gamma(\mathbf{n}_1)$. This prevents the fact that the cluster in red, made of loops in $\mathbf{n}_1+\mathbf{n}_2-\Gamma(\mathbf{n}_1)$ would connect an excursion of $\Gamma(\mathbf{n}_1)$ outside of $\Lambda_{\ell_k}$ but not reaching $\partial\Lambda_n$ to $\partial\Lambda_m$ (which would potentially create an additional cluster crossing $\text{Ann}(\ell_k, m)$). + +**Proposition 4.6 (Conditional mixing property)** Assume that the complementary pair of power law bounds (4.2) holds for the Ising model on $Z^4$ with $\eta = 0$, and fix $\alpha > 3^8$. Then there exists $C > 0$ such that for every $n^\alpha \le N$, every $x \notin \Lambda_N$, and every pair of events E and F depending on the restriction of **n** to edges within $\Lambda_n$ and outside of $\Lambda_N$ respectively, + +$$ +| \mathbf{P}_{\beta_c}^{0x}[E \cap F] - \mathbf{P}_{\beta_c}^{0x}[E] \mathbf{P}_{\beta_c}^{0x}[F] | \le \frac{C}{\sqrt{\log(N/n)}}. \quad (4.23) +$$ + +The heart of the proof will be the use of a (random) resolution of identity $\mathbb{N}$, meaning +a random variable which is concentrated around 1, given by a weighted sum of indicator +functions $\Pi[y \xleftarrow{n_1+n_2} 0]$ with $y \in \mathbb{Z}^d$, where $\partial \mathbf{n}_1 = \{0, x\}$ and $\partial \mathbf{n}_2 = \emptyset$, which will enable us +to write + +$$ +\mathbf{P}^{0x}[E \cap F] \approx \mathbf{E}^{0x,\emptyset}[\mathbb{N}\Pi(\mathbf{n}_1 \in E \cap F)]. \quad (4.24) +$$ + +Since $\mathbb{N}$ will be a certain convex combination of the random variables $\Pi[y \underset{\mathbf{n}_1+\mathbf{n}_2}{\stackrel{\hspace{2em}}{\rightleftharpoons}} 0] / (\sigma_0 \sigma_y)$, +the term on the right will be a convex sum of $\mathbf{P}^{0x,\emptyset}$-probabilities of the events $\{y \underset{\mathbf{n}_1+\mathbf{n}_2}{\stackrel{\hspace{2em}}{\rightleftharpoons}} 0, \mathbf{n}_1 \in E \cap F\}$. For each fixed $y$, we will use the switching principle to transform the +sources $\{0, x\}$ and $\emptyset$ of $\mathbf{n}_1$ and $\mathbf{n}_2$ into $\{0, y\}$ and $\{y, x\}$, exchanging at the same time the +roles of $\mathbf{n}_1$ and $\mathbf{n}_2$ inside $\Lambda_n$ without changing anything outside $\Lambda_N$. This useful operation +has a nice byproduct: the event $\mathbf{n}_1 \in F$ becomes $\mathbf{n}_2 \in F$ which is independent of $\mathbf{n}_1 \in E$. +Deducing the mixing from there will be a matter of elementary algebraic manipulations. + +The error term will be (almost entirely) due to how concentrated around 1 N is. In +order to prove this fact, we will implement a refined second moment method in which we +estimate the expectation and the second moment of N sharply. The proof will require +some regularity assumptions on the gradient of the two-point function: for every $x \in \mathbb{Z}^d$, + +$$ +|\nabla_x \langle \sigma_0 \sigma_x \rangle| \leq \frac{C}{|x|} \langle \sigma_0 \sigma_x \rangle, \tag{4.25} +$$ + +which follows from (4.2) by an argument that we choose to postpone to Section 5.5 (after +the required technology has been introduced). +---PAGE_BREAK--- + +**Proof** Let us recall that we are discussing here $\beta = \beta_c$, omitting the symbol from the notation. Fix $\alpha > 3^4$ (the power 4 instead of 8 suffices at this stage) and choose $\varepsilon > 0$ so that $\alpha > (1+\varepsilon)(9+\varepsilon)^2$. Below, the constants $C_i$ are independent of $\beta$ and $n^\alpha = N \le \xi(\beta)$ (we may assume equality between $N$ and $n^\alpha$ without loss of generality). Introduce two intermediary integers $m \le M$ satisfying that + +$$m \ge n^{9+\varepsilon}, \quad M \ge m^{1+\varepsilon}, \quad N \ge M^{9+\varepsilon} \tag{4.26}$$ + +as well as the notation $n_k = 2^k m$ for $k \ge 1$. Set $K$ such that $n_{K+1} \le M < n_{K+2}$. The key to our proof will be the random variable + +$$\mathbf{N} := \frac{1}{K} \sum_{k=1}^{K} \frac{1}{\alpha_k} \sum_{y \in \text{Ann}(n_k, n_{k+1})} \mathbb{I}[y^{\mathbf{n}_1 + \mathbf{n}_2} \xleftarrow{\hspace{2em}} 0] \quad \text{where } \alpha_k := \sum_{y \in \text{Ann}(n_k, n_{k+1})} \langle \sigma_0 \sigma_y \rangle. \tag{4.27}$$ + +Combining the regularity assumptions (4.25) and (4.2) with Proposition A.3 (the precise computation is presented in Section 6.2), we find + +$$\mathbf{E}^{0x,\emptyset}[\mathbf{N}] = \frac{1}{K} \sum_{k=1}^{K} \frac{1}{\alpha_k} \sum_{y \in \text{Ann}(n_k, n_{k+1})} \frac{\langle \sigma_0 \sigma_y \rangle \langle \sigma_y \sigma_x \rangle}{\langle \sigma_0 \sigma_x \rangle} \geq 1 - \frac{C_1}{K}, \tag{4.28}$$ + +$$\mathbf{E}^{0x,\emptyset}[\mathbf{N}^2] \leq \frac{1}{K^2} \sum_{k,l=1}^{K} \frac{1}{\alpha_k \alpha_l} \sum_{\substack{y \in \text{Ann}(n_k, n_{k+1}) \\ z \in \text{Ann}(n_l, n_{l+1})}} \frac{\langle \sigma_0 \sigma_y \rangle \langle \sigma_y \sigma_z \rangle \langle \sigma_z \sigma_x \rangle + \langle \sigma_0 \sigma_z \rangle \langle \sigma_z \sigma_y \rangle \langle \sigma_y \sigma_x \rangle}{\langle \sigma_0 \sigma_x \rangle} \leq 1 + \frac{C_2}{K}. \tag{4.29}$$ + +The Cauchy-Schwarz inequality and the fact that $\mathbf{P}^{0x}[E \cap F] = \mathbf{P}^{0x,\emptyset}[\mathbf{n}_1 \in E \cap F]$ thus imply that + +$$|\mathbf{P}^{0x,\emptyset}[\mathbf{n}_1 \in E \cap F] - \mathbf{E}^{0x,\emptyset}[\mathbf{N}\mathbb{I}_{\mathbf{n}_1 \in E \cap F}]| \leq \sqrt{\mathbf{E}^{0x,\emptyset}[(\mathbf{N}-1)^2]} \leq \frac{C_3}{\sqrt{K}}. \tag{4.30}$$ + +Now, fix $y \in \text{Ann}(m, M)$ and let $G(y)$ be the event (depending on $\mathbf{n}_1 + \mathbf{n}_2$ only) that there exists $\mathbf{k} \le \mathbf{n}_1 + \mathbf{n}_2$ such that $\mathbf{k}=0$ on $\Lambda_n$, $\mathbf{k}=\mathbf{n}_1+\mathbf{n}_2$ outside $\Lambda_N$, and $\partial\mathbf{k} = \{x,y\}$. We find that + +$$\mathbf{P}^{0x,\emptyset}[\mathbf{n}_1 \in E \cap F, y^{\mathbf{n}_1 + \mathbf{n}_2} \xleftarrow{\hspace{2em}} 0, G(y)] = \frac{\langle \sigma_0 \sigma_y \rangle \langle \sigma_y \sigma_x \rangle}{\langle \sigma_0 \sigma_x \rangle} \mathbf{P}^{0y,yx}[\mathbf{n}_1 \in E, \mathbf{n}_2 \in F, G(y)], \tag{4.31}$$ + +where we use the following reasoning: for $\mathbf{m} \in G(y)$, consider the multi-graph $\mathcal{M}$ obtained by duplicating every edge of the graph into $\mathbf{m}(x, y)$ edges. If $G(y)$ occurs, the existence of $\mathbf{k}$ guarantees the existence of a subgraph $\mathcal{K} \subset \mathcal{M}$ with $\partial\mathcal{K} = \{x, y\}$ containing no edge with endpoints in $\Lambda_n$ and all those of $\mathcal{M}$ with endpoints outside $\Lambda_N$, so that the generalized switching principle formulated in [5, Lemma 2.1] implies that + +$$ +\begin{align} +\sum_{\mathcal{T} \subseteq M : \partial\mathcal{T} = \{0, x\}} \mathbb{I}[\mathcal{T} \in E \cap F] &= +\sum_{\substack{\mathcal{T} \subseteq M : \partial\mathcal{T} = (\mathcal{T} \Delta K) = \{0, x\}}} +\mathbb{I}[\mathcal{T} \Delta K \in E \cap F] \\ +&= +\sum_{\substack{\mathcal{T} \subseteq M : +\partial\mathcal{T} = +\begin{smallmatrix} +0, x +\end{smallmatrix} +}} +\mathbb{I}[\mathcal{T} +\in E, +M +\setminus +\mathcal{T} +\in F], +\tag{4.32} +\end{align} +$$ + +where we allow ourselves the latitude of calling $E$ and $F$ the events defined for multi-graphs corresponding to the events $E$ and $F$ for currents. One gets (4.31) when rephrasing this equality in terms of weighted currents (exactly like in standard proofs of the switching principle, see e.g. [1] or [5] for a closely related reasoning). +---PAGE_BREAK--- + +Observe now that forgetting about $G(y)$ on the right-hand side of (4.31) gives + +$$ +\mathbf{P}^{0y,yx}[\mathbf{n}_1 \in E, \mathbf{n}_2 \in F] = \mathbf{P}^{0y}[E]\mathbf{P}^{yx}[F]. \quad (4.33) +$$ + +Furthermore, since $x \notin \Lambda_N$ and $y \in \Lambda_m$, (4.25) implies that + +$$ +\left| \frac{\langle \sigma_0 \sigma_y \rangle \langle \sigma_y \sigma_x \rangle}{\langle \sigma_0 \sigma_x \rangle} - \langle \sigma_0 \sigma_y \rangle \right| \leq \frac{C_4 m}{N}. \quad (4.34) +$$ + +Last but not least, we can bound (from below) $\mathbf{P}^{0x,\emptyset}[G(y)]$ and $\mathbf{P}^{0y,yx}[G(y)]$ as follows. +We only briefly describe the argument since we will present it in full details in Section 6.2. +The event $G(y)$ clearly contains the event that $\text{Ann}(M,N)$ is not crossed by a cluster +in $\mathbf{n}_1$, and $\text{Ann}(n,m)$ is not crossed by a cluster in $\mathbf{n}_2$, since in such case $\mathbf{k}$ can be +defined as the sum of $\mathbf{n}_1$ restricted to the clusters intersecting $\Lambda_N^c$ (this current has no +sources) and $\mathbf{n}_2$ restricted to the clusters intersecting $\Lambda_m^c$ (this current has sources $x$ and +$y$). Now, we can bound the probability of $\mathbf{n}_1$ crossing $\text{Ann}(M,N)$ in the same spirit as we +bounded the probabilities for $F_1$ and $F_3$ in the previous proof by splitting $\text{Ann}(M,N)$ in +two annuli $\text{Ann}(\sqrt{MN}, N)$ and $\text{Ann}(M, \sqrt{MN})$, then estimating the probability that the +backbone of $\mathbf{n}_1$ crosses the inner annulus more than once, and then the probability that +the remaining current (which is sourceless) crosses the outer annulus. Doing the same for +the probability that a cluster of $\mathbf{n}_2$ crosses $\text{Ann}(n,m)$, we find that + +$$ +\frac{\langle \sigma_0 \sigma_x \rangle}{\langle \sigma_0 \sigma_y \rangle \langle \sigma_y \sigma_x \rangle} \mathbf{P}^{0x,\emptyset}[G(y), y \xrightarrow[n_1+n_2=2]{\substack{n_1+ n_2 \\ \leftrightarrow}} 0] = \mathbf{P}^{0y,yx}[G(y)] \geq 1 - \frac{C_5}{n^\epsilon} \geq 1 - C_6 \left(\frac{n}{N}\right)^{\epsilon/(\alpha-1)}. \quad (4.35) +$$ + +Note that we use that $M \ge N^{9+\epsilon}$ in this part of the proof. + +Overall, the value of *K* and (4.30)–(4.35) put together imply + +$$ +|\mathbf{P}^{0x}[E \cap F] - \sum_{y \in \mathrm{Ann}(m,M)} \delta(y) \mathbf{P}^{0y,\emptyset}[E] \mathbf{P}^{yx,\emptyset}[F]| \leq \frac{C_7}{\sqrt{\log(N/n)}}, \quad (4.36) +$$ + +with $\delta(y) = (\sigma_0\sigma_y)/(K\alpha_{k(y)})$ where $k(y)$ is such that $y \in \mathrm{Ann}(n_{k(y)}, n_{k(y)+1})$. + +The end of the proof is now a matter of elementary algebraic manipulations. Applying +this inequality twice (once with $x$ and once with $x'$) for $F$ being the full set, we obtain +that for every $x, x' \notin \Lambda_N$ and every event $E$ which is depending on $\Lambda_n$ only, + +$$ +|\mathbf{P}^{0x}[E] - \mathbf{P}^{0x'}[E]| \leq \frac{2C_7}{\sqrt{\log(N/n)}}. \tag{4.37} +$$ + +Now, assume the stronger assumption that $\alpha > 3^8$ and fix $m = [\sqrt{Nn}]$. Applying + +• (4.36) for *m* and *N*, the full event and *F*, + +• then (4.37) for *n*, *m* and *E* (note that *m* ≥ *n*³), + +• and (4.36) for *m* and *N*, *E* and *F*, + +gives that for every $x \notin \Lambda_N$, + +$$ +\begin{align*} +& | \mathbf{P}^{0x}[E \cap F] - \mathbf{P}^{0x}[E] \mathbf{P}^{0x}[F] | \\ +&\leq |\mathbf{P}^{0x}[E \cap F] - \mathbf{P}^{0x}[E] \sum_{y} \delta(y) \mathbf{P}^{yx}[F]| + \frac{C_7}{\sqrt{\log(N/n)}} \\ +&\leq |\mathbf{P}^{0x}[E \cap F] - \sum_{y} \delta(y) \mathbf{P}^{0y}[E] \mathbf{P}^{yx}[F]| + \frac{3C_7}{\sqrt{\log(N/n)}} \\ +&\leq \frac{4C_7}{\sqrt{\log(N/n)}}. \tag{4.38} +\end{align*} +$$ + +Using Lemma 4.4 and Proposition 4.6, we may now establish the clustering of inter-sections, under (4.2). + +$\square$ + + +---PAGE_BREAK--- + +**Proof of Proposition 4.3** In view of the translation invariance of the claimed statement, we take $u$ to be the origin. Since $x$ and $y$ are at a distance larger than $2\ell_K$ of each other, one of them is at a distance (larger than or equal to) $\ell_K$ of $u$. Without loss of generality we take that to be $x$, and make a similar assumption about $z$. + +Let $\mathcal{S}_K$ denote the set of subsets of $\{1, \dots, K-2\}$ containing even integers only and fix $S \in \mathcal{S}_K$. Let $A_S$ be the event that no $I_k$ occurs for $k \in S$. If $s$ denotes the maximal element of $S$, the mixing property Proposition 4.6 used with $n = \ell_{s-1}$ and $N = \ell_s$ gives + +$$ \mathbf{P}^{0x,0z,\emptyset,\emptyset}[A_S] \leq \mathbf{P}^{0x,0z,\emptyset,\emptyset}[I_s^c]\mathbf{P}^{0x,0z,\emptyset,\emptyset}[A_{S\setminus\{s\}}] + \frac{C}{\sqrt{\log \ell_{s-1}}}. \quad (4.39) $$ + +To be precise and honest, we use a multi-current version, with four currents, of the mixing property. We will state and prove this property in Sections 6.1 and 6.2 and ignore this additional difficulty for now. Also, it is here that the stronger restriction $\alpha > 3^\delta$ is used, along with the choice of $\ell_0 = \ell_0(\alpha)$, to enable the mixing. Note that we used that the event $I_s$ is expressed in terms of just the restriction of the currents $\mathbf{n}_1, \dots, \mathbf{n}_4$ to $\text{Ann}(\ell_s, \ell_{s+1})$. + +Now, the intersection property Lemma 4.4 and an elementary bound on $\ell_{s-1}$ gives the existence of $c_0 > 0$ small enough that + +$$ \mathbf{P}^{0x,0z,\emptyset,\emptyset}[A_S] \le (1-2c_0)\mathbf{P}^{0x,0z,\emptyset,\emptyset}[A_{S\setminus\{s\}}] + c_0(1-c_0)^{|S|-1}. \quad (4.40) $$ + +An induction gives immediately that for every $S \in \mathcal{S}_K$, + +$$ \mathbf{P}^{0x,0z,\emptyset,\emptyset}[A_S] \le (1-c_0)^{|S|}. \quad (4.41) $$ + +Let $B_S \subset A_S$ be the event that the clusters of 0 in $\mathbf{n}_1 + \mathbf{n}_3$ and $\mathbf{n}_2 + \mathbf{n}_4$ do not intersect in any of the annuli $\text{Ann}(\ell_s, \ell_{s+1})$ for $s \in S$. Thanks to Corollary A.2, the probability of $B_S$ increases when removing sources, so that + +$$ \mathbf{P}^{0x,0z,0y,0t}[B_S] \le \mathbf{P}^{0x,0z,\emptyset,\emptyset}[B_S] \le \mathbf{P}^{0x,0z,\emptyset,\emptyset}[A_S] \le (1-c_0)^{|S|}. \quad (4.42) $$ + +To conclude, observe that if $\mathbf{M}_0(\mathcal{T}; \mathcal{L}_\alpha, K) \le \delta K$, then there must exist a set $S \in \mathcal{S}_K$ of cardinality at least $(\frac{1}{2} - \delta)K$ such that $B_S$ occurs. We deduce that + +$$ +\begin{aligned} +\mathbf{P}^{0x,0z,0y,0t}[\mathbf{M}_0(\mathcal{T}; \mathcal{L}_\alpha, K) < \delta K] &\le \sum_{S \in \mathcal{S}_K : |S| \ge (1/2 - \delta)K} \mathbf{P}^{0x,0z,0y,0t}[B_S] \\ +&\le \binom{K/2}{\delta K} (1-c_0)^{(1/2-\delta)K}, +\end{aligned} +\quad (4.43) +$$ + +which implies the claim by appropriately choosing the value of $\delta$. $\square$ + +# 5 Weak regularity of the two-point function + +Progressing towards the unconditional proof of Theorem 1.3 we establish in this section the abundance, below the correlation length, of regular scales at which the two-point function has properties similar to those it would have under the power-law decay assumption (4.2). This auxiliary result is stated here as Theorem 5.12. + +Towards this goal we focus here on the two-point function, and present some old and new observations. In particular, we discuss the following three properties of the two-point function: + +(i) *monotonicity* (Section 5.1) +---PAGE_BREAK--- + +(ii) *sliding-scale spatial Infrared Bound* (Section 5.3), + +(iii) *gradient estimate* (Section 5.5), + +(iv) a lower bound for the two point function at $\beta_c$. + +The first three are based on the reflection-positivity of the n.n.f. interaction, and apply to +systems of real valued variables of arbitrary (but common) distribution, of sub-gaussian +growth, i.e. satisfying (2.2). That includes the Ising and $\varphi^4$ variables which are of partic- +ular interest for us. The last item is proven for systems with spins in the GS class. + +**Some unifying notation:** In statements which apply to both the Ising and $\varphi^4$ systems, we shall refer to the spin/field variables by the “neutral” symbol $\tau$. Its a-priori distribution is denoted $\rho(d\tau)$. It may be displayed as a subscript, but also will often be omitted. + +The expectation value functional with respect to the Gibbs measure, or functional +integral, for a system in the domain $\Lambda$ is denoted $\langle \cdot \rangle_{\Lambda,\rho,\beta}$, with $\langle \cdot \rangle_{\rho,\beta}$ denoting the states' +natural infinite volume limit. + +We also denote by $\beta_c(\rho)$ (or just $\beta_c$ where the spins' a-priori distribution $\rho$ is clear +from the context) is the critical inverse temperature and $\xi(\rho,\beta)$ the correlation length. + +Throughout this section $|J| := \sum_y J_{0,y}$ and + +$$ +S_{\rho,\beta}(x) := \langle \tau_0 \tau_x \rangle_{\rho,\beta}. \tag{5.1} +$$ + +We refer to points in $\mathbb{R}^d$ as $x = (x_1, \dots, x_d)$ and denote by $e_j$ the unit vector with $x_j = 1$. + +5.1 Messager-Miracle-Sole monotonicity for the two-point function + +The Messenger-Miracle-Solé (MMS) inequality [30, 37, 41] states that for models with n.n.f. interactions (and more generally reflection-positive interactions) in a region $\Lambda$ endowed with reflection symmetry, the correlation function $\langle \prod_{x \in A} \tau_x \prod_{x \in B} \tau_x \rangle_{\Lambda, \rho, \beta}$ at sets of sites A and B which are on the same side of a reflection plane, can only decrease when B is replaced by its reflected image, $\mathcal{R}(B)$, i.e. + +$$ +\left\langle \prod_{x \in A} \tau_x \prod_{x \in B} \tau_x \right\rangle_{\Lambda, \rho, \beta} \geq \left\langle \prod_{x \in A} \tau_x \prod_{x \in \mathcal{R}(B)} \tau_x \right\rangle_{\Lambda, \rho, \beta}. \quad (5.2) +$$ + +In the infinite volume limit on $\mathbb{Z}^d$, this principle can be invoked for reflections with respect +to + +• hyperplanes passing through vertices or mid-edges, i.e. reflections changing only one coordinate $x_i$, which is sent to $L - x_i$ for some fixed $L \in \frac{1}{2}\mathbb{Z}$, + +• “diagonal” hyperplanes, i.e. reflections changing only two coordinates $x_i$ and $x_j$, which are sent to $x_j \pm L$ and $x_i \mp L$ respectively, for some $L \in \mathbb{Z}$. + +In particular, this implies the following useful comparison principle. + +**Proposition 5.1 (MMS monotonicity)** For the n.n.f. model on $\mathbb{Z}^d$ ($d \ge 1$) with real valued spin variables satisfying (2.2): + +i) along the principal axes the two-point function is monotone decreasing in $\|x\|_\infty$ + +$$ +\textit{ii) for any } x = (x_1, \dots, x_d) \in \mathbb{Z}^d, +$$ + +$$ +S_{\rho,\beta}((\|x\|_{\infty}, 0_{\perp})) \geq S_{\rho,\beta}(x) \geq S_{\rho,\beta}((\|x\|_{1}, 0_{\perp})), \quad (5.3) +$$ + +where $\|x\|_1 := \sum_{j=1}^{d} |x_j|$, $\|x\|_{\infty} := \max_j |x_j|$, and $0_{\perp}$ is the null vector in $\mathbb{Z}^{d-1}$. +---PAGE_BREAK--- + +The above carries the useful implication that for any $x, y \in \mathbb{Z}^d$ with $\|y\|_{\infty} \ge d \|x\|_{\infty}$, + +$$S_{\rho,\beta}(x) \ge S_{\rho,\beta}(y) \quad (5.4)$$ + +since $\|x\|_1 \le d \|x\|_\infty \le \|y\|_\infty$, by (5.3) the two quantities are on the correspondingly opposite sides of $S_{\rho,\beta}((\|x\|_1, 0_\perp))$. + +We shall encounter below also monotonicity statements of the Fourier transform. Both are useful in extracting point-wise implications from bounds on the corresponding two point function's bulk averages (as in Corollary 5.8, below). + +## 5.2 The two-point function’s Fourier transform + +In view of the model’s translation invariance it is natural to consider the system’s behavior also through its Fourier spin-wave modes. These are defined as + +$$\tilde{\tau}_{\beta}(p) := \frac{1}{\sqrt{(2L)^d}} \sum_{x \in (-L,L]^d} e^{ip \cdot x} \tau_x \qquad (5.5)$$ + +with $p$ ranging over $\Lambda_L^* := [-\pi, \pi)^d \cap \frac{\pi}{L}\mathbb{Z}^d$. + +These variables are especially relevant in case the Hamiltonian is taken with the periodic boundary conditions, under which sites $x, y \in \Lambda_L$ are neighbors if either $\|x - y\|_1 = 1$ or $|y_i - x_i| = 2L - 1$ for some $i \in \{1, \dots, d\}$. With these boundary conditions, the model is invariant under cyclic shifts, and its Hamiltonian decomposes into a sum of single-mode contributions: + +$$H_{\Lambda_R}(\tau) = \sum_{p \in \Lambda_L^*} \mathcal{E}(p) |\hat{\tau}_\beta(p)|^2, \quad (5.6)$$ + +with + +$$\mathcal{E}(p) := 2 \sum_{j=1}^{d} [1 - \cos(p_j)] = 4 \sum_{j=1}^{d} \sin^2(p_j/2). \quad (5.7)$$ + +Among the various relations in which the Fourier transform plays a useful role, the following statements will be of relevance for our discussion. + +i) The spin-wave modes' second moment coincides with the finite volume Fourier transform of the two-point correlation function ($S^{(L)}(p)$): + +$$\widetilde{S}_{\rho,\beta}^{(L)}(p) := \sum_{x \in \Lambda_L} e^{ip \cdot x} \langle \tau_0 \tau_x \rangle_{\Lambda_L, \rho, \beta}^{(b.c.)} = (\langle \hat{\tau}_\beta(p) \rangle^2)_{\Lambda_L, \rho, \beta}^{(b.c.)} \ge 0. \quad (5.8)$$ + +ii) For the n.n.f. interaction, and more generally reflection-positive interactions, the following gaussian-domination (aka *infrared*) bound holds [18, 19]: + +$$\mathcal{E}(p) \widetilde{S}_{\rho,\beta}^{(L)}(p) \leq \frac{1}{2|J|\beta}. \quad (5.9)$$ + +The bound appeals to the physicists’ intuition, reminding one of the equipartition law. Alas, it has so far been proven only for reflection-positive interactions. + +iii) The Parseval-Plancherel identity yields the sum rule: + +$$\langle \tau_0^2 \rangle_{\Lambda_L, \rho, \beta}^{(b.c.)} = \frac{1}{|\Lambda_L|} \sum_{p \in \Lambda_L^*} \widetilde{S}_{\rho, \beta}^{(L)}(p). \quad (5.10)$$ +---PAGE_BREAK--- + +As was pointed out in [19], the combination of (5.10) with (5.9) yields a (then novel) way to prove the occurrence of spontaneous magnetization in dimensions $d > 2$, at high enough $\beta$. + +More explicitly, in (5.10) one may note that the Infrared Bound (5.9) does not provide any direct control on the $p=0$ term, since $\mathcal{E}(0) = 0$. And, in fact, the hallmark of the low temperature phase ($\beta > \beta_c(\rho)$) is that this single value of the summand attains macroscopic size: + +$$ \widehat{S}_{\rho,\beta}^{(L)}(0) \approx |\Lambda_L| M(\rho, \beta)^2 \quad (5.11) $$ + +with $M(\rho, \beta)$ the model's spontaneous magnetization. + +We shall also use the following statement on the relation between the finite volume and the infinite volume states. + +**Proposition 5.2** For a model in the GS class on $\mathbb{Z}^d$, $d > 2$, with translation invariant finite range interactions, for any $\beta < \beta_c(\rho)$: + +1. the system has only one infinite volume Gibbs equilibrium state. + +2. the correlation functions of that state satisfy, for any finite $A \subset \mathbb{Z}^d$, and any sequence of finite volumes $V_n \subset \mathbb{Z}^d$ which asymptotically cover any finite region, + +$$ \langle \prod_{x \in A} \tau_x \rangle_{\rho, \beta} = \lim_{V_n \to \mathbb{Z}^d} \langle \prod_{x \in A} \tau_x \rangle_{V_n, \rho, \beta}^{(b.c.)} \quad (5.12) $$ + +with $\langle - \rangle^{(b.c.)}_{V_n, \rho, \beta}$ denoting the correlation function under boundary conditions which may include either cross-boundary spin couplings (e.g. periodic), or arbitrary specified values of $\tau_{|\partial V_n}$. + +3. with the finite volumes taken as the rectangular domains $\Lambda_L$, also the Fourier Transform functions converge, i.e. for any $p \in [-\pi, \pi]^d$, and sequence as in (5.12) + +$$ \lim_{n \to \infty} \sum_{x \in V_n} e^{ip \cdot x} \langle \tau_0 \tau_x \rangle_{V_n, \rho, \beta}^{(b.c.)} = \sum_{x \in \mathbb{Z}^d} e^{ip \cdot x} S_{\rho, \beta}(x) =: \hat{S}_{\rho, \beta}(p) \quad (5.13) $$ + +The statement follows by standard arguments that we omit here. The main ingredients are the exponential decay of correlations, which at any $\beta < \beta_c(\rho)$ are exponentially bounded, uniformly in the volume, and the FKG inequality. The first two points hold also for $\beta = \beta_c(\rho)$ [4]. However not the last, (5.13), since at the critical temperature the correlation function is not summable. + +We shall employ the freedom which Proposition 5.2 provides in establishing the different monotonicity properties of $S(p)$ in $p$. + +Furthermore, for the *disordered regime*, where $M(\rho, \beta) = 0$, the sum rule combined with the Infrared Bound implies that for every $\beta < \beta_c(\rho)$, + +$$ \langle \tau_0^2 \rangle_\beta = \int_{[-\pi, \pi]^d} \hat{S}_{\rho, \beta}(p) dp \leq \int_{[-\pi, \pi]^d} \frac{dp}{2|J|\beta \mathcal{E}(p)}. \quad (5.14) $$ + +Since $\mathcal{E}(p)$ vanishes only at $p=0$ and there at the rate $\mathcal{E}(p) \sim |p|^2$, the integral is convergent for $d > 2$ and one gets + +$$ \langle \tau_0^2 \rangle_{\rho, \beta_c(\rho)} \leq \frac{C_d}{2|J|\beta_c(\rho)} \quad (5.15) $$ + +with $C_d < \infty$ for $d > 2$. This bound will be used in Section 7. +---PAGE_BREAK--- + +## 5.3 The spectral representation and a sliding-scale Infrared Bound + +We next present a Fourier transform counterpart (though one derived by different means) of the Messager-Miracle-Sole monotonicity stated in Section 5.1, and use it for a sliding-scale extension of the Infrared Bound (5.9). The results, which include both old [21, 47] and new observations, are based on the relation of the two-point function with the transfer matrix, and the positivity of the latter. + +The transfer matrix has been the source of many insights on the structure of statistical mechanical systems with finite range interactions. Its appearance can be seen in Ising's study of one dimensional systems, for which it permits a simple proof of the absence of phase transition. Also, in higher dimensions it has played an essential role in many important developments [19, 21, 42], some of which rely on positivity properties. Here we shall use the following consequences of its spectral representation for the two-point function. + +**Proposition 5.3 (Spectral Representation)** In the n.n.f. model on $\mathbb{Z}^d$ ($d \ge 1$), at $\beta < \beta_c(\rho)$, for every square summable function $v: \mathbb{Z}^{d-1} \to \mathbb{C}$, there exists a positive measure $\mu_{v,\beta}$ of finite mass + +$$ \int_{1/\xi(\rho, \beta)}^{\infty} d\mu_{v,\beta}(a) = \sum_{x_{\perp}, y_{\perp} \in \mathbb{Z}^{d-1}} v_{x_{\perp}} \overline{v_{y_{\perp}}} S_{\rho, \beta}((0, y_{\perp} - x_{\perp})) \quad (5.16) $$ + +such that for every $n \in \mathbb{Z}$ + +$$ \sum_{x_{\perp}, y_{\perp} \in \mathbb{Z}^{d-1}} v_{x_{\perp}} \overline{v_{y_{\perp}}} S_{\rho, \beta}((n, x_{\perp} - y_{\perp})) = \int_{1/\xi(\rho, \beta)}^{\infty} e^{-a|n|} d\mu_{v,\beta}(a). \quad (5.17) $$ + +And for every $p_{\perp} \in [-\pi, \pi]^{d-1}$ there exists a positive measure $\mu_{p_{\perp},\beta}$ of finite mass such that for every $p_1 \in [-\pi, \pi]$ + +$$ \hat{S}_{\rho, \beta}(p) = \int_0^\infty \frac{e^a - e^{-a}}{\mathcal{E}_1(p_1) + (e^{a/2} - e^{-a/2})^2} d\mu_{p_1, \beta}(a), \quad (5.18) $$ + +with $\mathcal{E}_1(k) := 2[1 - \cos(k)] = 4\sin^2(k/2)$. + +Although the spectral representation is quite well-known (cf. [21] and references therein) for completeness of the presentation we include the derivation of (5.17) in the Appendix. Equation (5.18) then follows by applying (5.17) to the function + +$$ v_{p_1}(x_\perp) := \frac{1}{\sqrt{|\Lambda_\ell^{(d-1)}|}} e^{ip_1 \cdot x_\perp} I[x \in \Lambda_\ell^{(d-1)}] $$ + +and taking the limit $\ell \to \infty$. Here $\Lambda_\ell^{(d-1)}$ is the $d-1$ dimensional version of the box $\Lambda_L$ and $I[\cdot]$ is the indicator function. The convergence is facilitated by the exponential decay of correlations at $\beta < \beta_c$. + +Of particular interest for us are the following implications of (5.18) (the first was noted and applied in [21]). + +**Proposition 5.4** For a n.n.f. model on $\mathbb{Z}^d$ ($d \ge 1$), at any $\beta < \beta_c(\rho)$: + +1. $\hat{S}_{\rho,\beta}(p_1, p_2, \dots, p_d)$ is monotone decreasing in each $|p_j|$, over $[-\pi, \pi]$, + +2. $\mathcal{E}_1(p_1)\hat{S}_{\rho,\beta}(p)$ and $|p_1|^2\hat{S}_{\rho,\beta}(p_1)$ are monotone increasing in $|p_1|,$ +---PAGE_BREAK--- + +3. the function + +$$ +\hat{S}_{\rho,\beta}^{(\text{mod})}(p) := \hat{S}_{\rho,\beta}(p) + \hat{S}_{\rho,\beta}(p + \pi(1,1,0,\dots,0)) \quad (5.19) +$$ + +is monotone decreasing in $\delta$ along the line of constant $\{p_3, \dots, p_d\}$ and + +$$ +(p_1, p_2) = (|p_1 - p_2| + \delta, |p_1 - p_2| - \delta), \quad \delta \in [0, |p_1 - p_2|]. \tag{5.20} +$$ + +and the above remains true under any permutation of the indices. + +The correction in (5.19) is insignificant in the regime where $\hat{S}_{\rho,\beta}(p)$ is large. That is so since $|\hat{S}_{\rho,\beta}(p+\pi(1,1,0,\dots,0))| \le C/\beta$ uniformly for $|p| \le \pi/2$. (The main term diverges in the limit $\beta \nearrow \beta_c(\rho)$ and $p \to 0$.) + +**Proof** The first two statements are implied by the combination of (5.18) with the observation that each of the following functions is monotone in $k \in [0, \pi]: k \mapsto \mathcal{E}_1(k)$, $k \mapsto k^2/\mathcal{E}_1(k)$, and for each $a \ge 0$, $k \mapsto \mathcal{E}_1(k)/(\mathcal{E}_1(k) + (e^{a/2} - e^{-a/2})^2)$. + +The third statement is based on the application of the transfer matrix in the diagonal direction (cf. Fig. 4). More explicitly, to produce the spectral representation one may start by considering a partially rotated rectangular region, whose main axes are associated with the coordinate system ($x_1+x_2, x_1-x_2, x_3, \dots, x_d$). The finite-volume Hamiltonian is taken with the correspondingly modified periodic boundary conditions which produce cyclicity in these directions. As stated in (5.13), for $\beta < \beta_c(\rho)$ the change does not affect the two-point function’s infinite volume limit. + +In this case, there are two transfer matrices $T$ and $T^*$ corresponding to adding one layer of even (resp. odd) vertices, i.e. vertices with $x_1+x_2$ even (resp. odd). The argument by which monotonicity was proven above for the Cartesian directions applies to the two-point function's restriction to the sub-lattice of even vertices since the proof would involve the matrix $TT^*$, which is positive. + +Then, if $\hat{S}_{\rho,\beta}^{(\mathrm{mod})}$ is given by (5.19), one finds + +$$ +\hat{S}_{\rho, \beta}^{(\mathrm{mod})}(p) = \sum_{x \in \Lambda_L} e^{ip \cdot x} S_{\rho, \beta}(0,x) \sum_{k=0,1} e^{i\pi(x_1+x_2)k} = 2 \sum_{\substack{x \in \Lambda_L \\ x_1+x_2 \text{ even}}} e^{ip \cdot x} S_{\rho, \beta}(0,x). \quad (5.21) +$$ + +Thus, the third monotonicity statement follows by a direct adaptation of the proof of the first one. +$\square$ + +**Corollary 5.5** For a n.n.f. model on $\mathbb{Z}^d$ ($d \ge 1$) at any $\beta < \beta_c(\rho)$, the two-point function satisfies, for all $p \in [-\pi/2, \pi/2]^d$, + +$$ +\hat{S}_{\rho,\beta}(\|p\|_{\infty}, 0_{\perp}) \geq \hat{S}_{\rho,\beta}(p) \geq \hat{S}_{\rho,\beta}(\|p\|_{1}, 0_{\perp}) - \frac{C}{\beta}, \quad (5.22) +$$ + +with $C$ depending on the dimension only. + +The restriction to $p \in [-\pi/2, \pi/2]^d$ guarantees that the second term of (5.19) can be bounded by $C/\beta$ (this bounds corresponds to the $-C/\beta$ term on the right-hand side of (5.22)), as explained below Proposition 5.4. +---PAGE_BREAK--- + +Figure 4: The split of $Z^2$ into even and odd sub-lattices and their stratification into intertwined diagonal hyperplanes. The partition function of $(-L, L]^2$ with rotated-periodic boundary condition can be written as $Z_{2L} = \text{tr}(T_2 T_1)^L$, with $T_j$ a pair of conjugate mapping between the Hilbert spaces of the even and the odd hyperplanes. The product $T_2 T_1 = T_1^* T_1$ provides the even subgraph's transfer matrix in one of this graph's principal directions. + +**Proof** The inequality follows from Proposition 5.4 through the monotonicity lines used for (5.3). $\square$ + +The previous bound combined with the second statement in Proposition 5.4 yield an interesting consequence for the behaviour of the *susceptibility* truncated at a distance *L*, which we define as + +$$ \chi_L(\rho, \beta) := \sum_{x \in \Lambda_L} S_{\rho, \beta}(x). \qquad (5.23) $$ + +**Theorem 5.6 (Sliding-scale Infrared Bound)** There exists a constant $C = C(d) > 0$ such that for every n.n.f. model on $\mathbb{Z}^d$ ($d > 2$), every $\beta \le \beta_c(\rho)$ and $L \ge \ell \ge 1$, + +$$ \frac{\chi_L(\rho, \beta)}{L^2} \le \frac{C \chi_\ell(\rho, \beta)}{\beta \ell^2}. \qquad (5.24) $$ + +The case $\ell = 1$ is in essence similar to the Infrared Bound (5.9), as is explained below, so that (5.24) may be viewed as a *sliding-scale* version of this inequality. One may also note that (5.24) is a sharp improvement (replacing the exponent $d$ by 2) on the more naive application of the Messager-Miracle-Sole inequality giving that for every $L \ge \ell \ge 1$, + +$$ \frac{\chi_L(\rho, \beta)}{L^d} \le \frac{\chi_\ell(\rho, \beta)}{\ell^d}. \qquad (5.25) $$ + +**Proof** Let us first note that it suffices to prove the claim for all $\beta < \beta_c(\rho)$, with a uniform constant $C$. Its extension to the critical point can be deduced from the continuity + +$$ S_{\rho,\beta}(x) = \lim_{\beta \to \beta_c(\rho)} S_{\beta_c(\rho)}(x) \qquad (5.26) $$ + +(which follows from the main result of [4]). This observation allows us to apply the monotonicity results discussed above. +---PAGE_BREAK--- + +Below, the constants $C_i$ are to be understood as dependent on $d$ only. Consider the smeared version of $\chi_L(\rho, \beta)$ defined by + +$$ \tilde{\chi}_L(\rho, \beta) := \sum_{x \in \mathbb{Z}^d} e^{-(\|x\|_2/L)^2} S_{\rho,\beta}(x). \quad (5.27) $$ + +with $\|p\|_2^2 := \sum_{i=1}^d p_i^2$. The MMS monotonicity statement (5.4) implies that + +$$ e^{-d\chi_L(\rho, \beta)} \le \tilde{\chi}_L(\rho, \beta) \le C_1\chi_L(\rho, \beta) \quad (5.28) $$ + +for every $L$, so that it suffices to prove that for every $L \ge \ell \ge 1$, + +$$ \frac{\tilde{\chi}_L(\rho, \beta)}{L^2} \le C_2 \frac{\tilde{\chi}_\ell(\rho, \beta)}{\ell^2}. \quad (5.29) $$ + +We will work in Fourier space, and use the identity + +$$ \tilde{\chi}_L(\rho, \beta) \asymp L^d \int_{[-\pi, \pi]^d} e^{-\|p\|^2} \hat{S}_{\rho, \beta}(p) dp, \quad (5.30) $$ + +where $f \asymp g$ means $cg \le f \le Cg$ with $c, C$ independent of everything else (we use that the Fourier transform of the Gaussian on the lattice is a Jacobi theta-function within multiplicative constants of $e^{-\|p\|^2}$ on $[-\pi, \pi]^d$). + +Now, let + +$$ A := \{p \in [-\frac{\pi\ell}{L}, \frac{\pi\ell}{L}]^d : |p_1| = \|p\|_\infty\}. \quad (5.31) $$ + +Using the symmetries of $\hat{S}_{\rho,\beta}$ and the decay of Corollary 5.5, we find that + +$$ \int_{[-\pi, \pi]^d} e^{-\|p\|^2 L^2} \hat{S}_{\rho, \beta}(p) dp \le (d + C_3 e^{-\ell^2}) \int_A e^{-\|p\|^2 L^2} \hat{S}_{\rho, \beta}(p) dp. \quad (5.32) $$ + +Since $|p_1| = \|p\|_\infty$ for $p \in A$ and $\|p\|_\infty \ge \|p\|_1/d$, the second property of Proposition 5.4 and Corollary 5.5 give that + +$$ \hat{S}_{\rho,\beta}(p) \le \hat{S}_{\rho,\beta}(\|p\|_{\infty}, 0_{\perp}) \le (d^L)^2 \hat{S}_{\rho,\beta}(\frac{L}{\ell}\|p\|_1, 0_{\perp}) \le (d^L)^2 (\hat{S}_{\rho,\beta}(\frac{L}{\ell}p) + C/\beta). \quad (5.33) $$ + +Using this inequality and making the change of variable $p \mapsto q = L/\ell p$ gives + +$$ \int_A \exp[-\|p\|^2 L^2] \hat{S}_{\rho,\beta}(p) dp \le C_4 (\frac{\ell}{L})^{d-2} \left( \int_{[-\pi, \pi]^d} \exp[-\|q\|^2 L^2] \hat{S}_{\rho,\beta}(q) dq + C_5/\beta \right), \quad (5.34) $$ + +which after plugging in (5.32) and taking the Fourier transform implies that + +$$ \tilde{\chi}_L(\rho, \beta) \le C_6 (\frac{\ell}{L})^{d-2} (\tilde{\chi}_\ell(\rho, \beta) + C_5/\beta). \quad (5.35) $$ + +The inequality (5.29) follows from the fact that $\tilde{\chi}_\ell(\rho, \beta) \ge 1$, so that the constant $C_5/\beta$ can be removed by changing $C_6$ into a larger constant $C_7/\beta$. $\square$ + +Inequality (5.4) and then the sliding-scale Infrared Bound with $L = |x|$ and $\ell = 1$ (5.24) implies that for every $x \in \mathbb{Z}^d$, + +$$ S_{\rho,\beta}(x) \le \frac{C_1}{|x|^d} \sum_{y \in \text{Ann}(d|x|, 2d|x|)} S_{\rho,\beta}(y) \le \frac{C_1}{|x|^d} \chi_{2d|x|}(\rho, \beta) \le \frac{C_2 \langle \tau_0^2 \rangle_{\rho,\beta}}{|x|^{d-2}}. \quad (5.36) $$ + +The factor $\langle \tau_0^2 \rangle_\beta$ in the upper bound may seem pointless for the Ising model where it is simply equal to 1, but it becomes very important when studying unbounded spins, as in Section 7, where it is essential for a dimensionless improved tree diagram bound. + +It may be noted that the combination of (5.36) with (5.14) leads to the more standard formulation [18, 19] of the Infrared Bound in $x$-space: + +$$ S_{\rho,\beta}(x) \le \frac{C}{\beta |J||x|^{d-2}}. \quad (5.37) $$ +---PAGE_BREAK--- + +## 5.4 A lower bound + +The above upper bound will next be supplemented by a power-law lower bound on the two point function at $\beta_c$. Conceptually, it originates in the observation that it the correlations drop on some scale by a fast enough power law then on larger scales they decay exponentially fast. An early version of this principle can be found in Hammersley's analysis of percolation [28]. A general statement was presented in Dobrushin's analysis of the constructive criteria for the high temperature phase. For Ising systems a simple version of such statement can be deduced from the following observation. + +**Lemma 5.7** For every ferromagnetic model in the GS class on $\mathbb{Z}^d$ ($d \ge 1$) with coupling constants that are invariant under translations, every finite $0 \in \Lambda \subset \mathbb{Z}^d$ and every $y \notin \Lambda$, + +$$S_{\rho,\beta}(y) \le \sum_{\substack{u \in \Lambda \\ v \notin \Lambda}} S_{\rho,\beta}(u) \beta J_{u,v} S_{\rho,\beta}(y-v). \quad (5.38)$$ + +This statement is a mild extension of Simon's inequality which was originally formulated for the n.n.f. Ising models [44]. Being spin-dimension balanced, it is valid also for the Griffiths-Simon class of variables and more general pair interactions⁵. + +The MMS monotonicity allows us to extract the following point-wise implication, which will be used below + +**Corollary 5.8 (Lower bound on $S_{\rho,\beta}$)** For a n.n.f. model in the GS class on $\mathbb{Z}^d$ ($d \ge 1$), there exists $c = c(d) > 0$ such that for every $\beta \le \beta_c(\rho)$ and $x \in \mathbb{Z}^d$, + +$$S_{\rho,\beta}(x) \ge \frac{c}{\beta|J| \|x\|_{\infty}^{d-1}} \exp\left(-\frac{d\|x\|_{\infty} + 1}{\xi(\rho, \beta)}\right). \quad (5.39)$$ + +**Proof** Let us introduce + +$$Y_{\rho,\beta}(\Lambda) := \sum_{\substack{u \in \Lambda \\ v \notin \Lambda}} S_{\rho,\beta}(u) \beta J_{u,v}. \quad (5.40)$$ + +Set $L := d\|x\|_{\infty}$. Applying (5.38) with $\Lambda_L$ and $y = ne_1$, and iterating it $\lfloor \frac{n}{L+1} \rfloor$ times (i.e. as many as possible without reducing the last factor to a distance shorter than $L$), we get + +$$\beta |J| S_{\rho,\beta}(ne_1) \le Y_{\rho,\beta}(\Lambda_L)^{\lceil \frac{n}{L+1} \rceil}. \quad (5.41)$$ + +Since $\lim_n S_{\rho,\beta}(ne_1)^{1/n} = e^{-1/\xi(\rho,\beta)}$, we deduce that + +$$e^{-1/\xi(\rho, \beta)} \le Y_{\rho, \beta}(\Lambda_L)^{\frac{1}{L+1}}. \quad (5.42)$$ + +On the other hand, by (5.4), for each $x \in \mathbb{Z}^d$, $S_{\rho,\beta}(u) \le S_{\rho,\beta}(x)$ for all $u \in \partial\Lambda_L$, and hence + +$$\frac{Y_{\rho,\beta}(\Lambda_L)}{|\partial\Lambda_L|} \le \beta |J| S_{\rho,\beta}(x). \quad (5.43)$$ + +The substitution of (5.42) in (5.43) yields the claimed lower bound (5.39). $\square$ + +⁵The factor $S_{\rho,\beta}(u)$ in (5.38) can also be replaced by the finite volume expectation $\langle \tau_0 \tau_u \rangle_\Lambda$, as in Lieb's improvement of Simon's inequality [36]. Both versions have an easy proof through a simple application of the switching lemma, in its mildly improved form. +---PAGE_BREAK--- + +5.5 Regularity of the two-point function’s gradient + +**Proposition 5.9 (gradient estimate)** *There exists C = C(d) > 0 such that for every n.n.f. model in the GS class, every $\beta \le \beta_c(\rho)$, every $x \in \mathbb{Z}^d$ and every $1 \le i \le d$,* + +$$ +|S_{\rho,\beta}(x \pm \mathbf{e}_i) - S_{\rho,\beta}(x)| \leq \frac{F(|x|)}{|x|} S_{\rho,\beta}(x), \quad (5.44) +$$ + +where + +$$ +F(n) := C \frac{S_{\rho, \beta}(dn\mathbf{e}_1)}{S_{\rho, \beta}(n\mathbf{e}_1)} \log \left( \frac{2S_{\rho, \beta}(\frac{n}{2}\mathbf{e}_1)}{S_{\rho, \beta}(n\mathbf{e}_1)} \right). \quad (5.45) +$$ + +The previous proposition is particularly interesting when $S_{\rho,\beta}(2d\mathbf{e}_1) \ge c_0 S_{\rho,\beta}(\frac{n}{2}\mathbf{e}_1)$, +in which case we obtain the existence of a constant $C_0 = C_0(c_0, d) > 0$ such that for every +$x \in \partial\Lambda_n$ and $1 \le i \le d$, + +$$ +|S_{\rho,\beta}(x \pm \mathbf{e}_i) - S_{\rho,\beta}(x)| \leq \frac{C_0}{|x|} S_{\rho,\beta}(x). \tag{5.46} +$$ + +**Proof** Without loss of generality, we may assume that $x = (|x|, x_\perp)$. We first assume that $i=1$. Introduce the three sequences $u_n := S_{\rho,\beta}(ne_1)$, $v_n := S_{\rho,\beta}((n,x_\perp))$ and $w_n := u_n + v_n$. The spectral representation applied to the function $v$ being the sum of the Dirac functions at $0_\perp$ and $x_\perp$ implies the existence of a finite measure $\mu_{x_\perp,\beta}$ such that + +$$ +w_n = \int_0^\infty e^{-na} d\mu_{x_\perp, \beta}(a). \tag{5.47} +$$ + +Cauchy-Schwarz gives $w_n^2 \le w_{n-1}w_{n+1}$, which when iterated between $n$ and $n/2$ (assume $n$ +is even, the odd case is similar) leads to + +$$ +\frac{w_{n+1}}{w_n} \geq \left(\frac{w_n}{w_{n/2}}\right)^{2/n} \geq 1 - \frac{2}{n} \log \left(\frac{w_{n/2}}{w_n}\right). \quad (5.48) +$$ + +We now use that $u_{n/2} \ge v_{n/2}$, $u_n \ge v_n$, and $u_n \ge u_{n+1}$ which are all consequences of the +Messager-Miracle-Sole inequality. Together with trivial algebraic manipulations, we get + +$$ +v_{n+1} \ge v_n - \frac{4\log(2u_{n/2}/u_n)}{n} u_n. \quad (5.49) +$$ + +The bound we are seeking corresponds to $n = |x|$. + +To get the result for $i \neq 1$, use the Messager-Miracle-Sole inequality applied twice to +get that + +$$ +|S_{\rho, \beta}(x \pm \mathbf{e}_i) - S_{\rho, \beta}(x)| \leq S_{\rho, \beta}(x - d\mathbf{e}_1) - S_{\rho, \beta}(x + d\mathbf{e}_1), \quad (5.50) +$$ + +and then refer to the previous case to conclude (one obtains the result for $n = |x| - d$, but +the proof can be easily adapted to get the result for $n = |x|$). $\square$ + +**Remark 5.10** When $x = ne_1$ and $i = 1$, running through the lines of the previous proof shows that one can take $F(n) = 2\log(S_{\rho,\beta}(ne_1)/S_{\rho,\beta}(\frac{n}{2}e_1))$ which is bounded by $(2+o(1))\log n$ thanks to the lower bound (5.39) and the Infrared Bound (5.37). We therefore get that for every $n \le \xi(\rho, \beta)$, + +$$ +S_{\rho, \beta}(ne_1) - S_{\rho, \beta}((n+1)e_1) \le (2+o(1)) \frac{\log n}{n} S_{\rho, \beta}(ne_1). \quad (5.51) +$$ + +It would be of interest to remove the log n factor, as this would enable a proof that $S_{\rho,\beta}(ne_1)$ does not drop too fast between different scales. +---PAGE_BREAK--- + +## 5.6 Regular scales + +Using the dyadic distance scales, we shall now introduce the notion of regular scales, which in essence means that on the given scale the two-point function has the properties which in the conditional proof of Section 4, were available under the assumption (4.2). + +**Definition 5.11** Fix $c, C > 0$. An annular region $\text{Ann}(n/2, 4n)$ is said to be regular if the following four properties are satisfied: + +$$ P1 \quad \text{for every } x, y \in \text{Ann}(n/2, 4n), S_{\rho,\beta}(y) \le C S_{\rho,\beta}(x); $$ + +$$ P2 \quad \text{for every } x, y \in \text{Ann}(n/2, 4n), |S_{\rho,\beta}(x) - S_{\rho,\beta}(y)| \le \frac{C|x-y|}{|x|} S_{\rho,\beta}(x); $$ + +$$ P3 \quad \text{for every } x \in \Lambda_n \text{ and } y \notin \Lambda_{Cn}, S_{\rho,\beta}(y) \le \frac{1}{2} S_{\rho,\beta}(x); $$ + +$$ P4 \quad \chi_{2n}(\rho, \beta) \ge (1+c)\chi_n(\rho, \beta). $$ + +A scale $k$ is said to be regular if the above holds for $n = 2^k$, and a vertex $x \in \mathbb{Z}^d$ will be said to be in a regular scale if it belongs to an annulus with the above properties. + +One may note that $P1$ follows trivially from $P2$ but we still choose to state the two properties independently (the proof would work with weaker versions of $P2$ so one can imagine cases where the notion of regular scale could be used with a different version of $P2$ not implying $P1$). + +Under the power-law assumption (4.2) of Section 4 every scale is regular at criticality. However, for now we do not have an unconditional proof of that. For an unconditional proof of our main results, this gap will be addressed through the following statement, which is the main result of this section. + +**Theorem 5.12 (Abundance of regular scales)** Fix $d > 2$ and $\alpha > 2$. There exist $c = c(d) > 0$ and $C = C(d) > 0$ such that for every n.n.f. model in the GS class and every $n^\alpha \le N \le \xi(\rho, \beta)$, there are at least $c \log_2(N/n)$ regular scales $k$ with $n \le 2^k \le N$. + +**Proof** The lower bound (5.8) for $S_{\rho,\beta}$ and the Infrared Bound (5.37) imply that + +$$ \chi_N(\rho, \beta) \ge c_0 N \ge c_0 (N/n)^{(\alpha-2)/(\alpha-1)} n^2 \ge c_1 (N/n)^{(\alpha-2)/(\alpha-1)} \chi_n(\rho, \beta). \quad (5.52) $$ + +Using the sliding-scale Infrared Bound (5.25), there exist $r, c_2 > 0$ (independent of $n, N$) such that there are at least $c_2 \log_2(N/n)$ scales $m = 2^k$ between $n$ and $N$ such that + +$$ \chi_{rm}(\rho, \beta) \ge \chi_{4dm}(\rho, \beta) + \chi_m(\rho, \beta). \quad (5.53) $$ + +Let us verify that the different properties of regular scales are satisfied for such an $m$. Applying (5.4) in the first inequality, the assumption (5.53) in the second, and (5.4) in the third, one has + +$$ |\text{Ann}(4dm, rm)| S_{\rho,\beta}(4dme_1) \ge \chi_{rm}(\rho, \beta) - \chi_{4dm}(\rho, \beta) \ge \chi_m(\rho, \beta) \ge |\Lambda_{m/(4d)}| S_{\rho,\beta}(\frac{1}{4}me_1). \quad (5.54) $$ + +This implies that $S_{\rho,\beta}(4dme_1) \ge c_0 S_{\rho,\beta}(\frac{1}{4}me_1)$, which immediately gives $P1$ by (5.4) for $S_{\rho,\beta}$ and $P2$ by the gradient estimate given by Proposition 5.9. Furthermore, the fact that $S_{\rho,\beta}(x) \ge S_{\rho,\beta}(4dme_1) \ge \frac{c_3}{m^d}\chi_m(\rho, \beta)$ for every $x \in \text{Ann}(m, 2m)$ implies $P4$. To prove P3, +---PAGE_BREAK--- + +observe that for every $R$, the previous displayed inequality together with the sliding-scale Infrared Bound (5.24) give that for every $y \notin \Lambda_{dRm}$ and $x \in \Lambda_m$, + +$$|\Lambda_{Rm}|S_{\rho,\beta}(y) \le \chi_{Rm}(\rho, \beta) \le C_4 R^2 \chi_m(\rho, \beta) \le C_5 R^2 m^d S_{\rho,\beta}(x), \quad (5.55)$$ + +which implies the claim for $C$ and $c$ respectively large and small enough using here the assumption that $d > 2$. $\square$ + +# 6 Unconditional proofs of the Ising's results + +In this section, we prove our results for every $\beta \le \beta_c$ without making the power-law assumption of Section 4. We emphasize that unlike the introductory discussion of that section, the proofs given below are unconditional. The discussion is also not restricted to the critical point itself and covers more general approaches of the scaling limits, from the side $\beta \le \beta_c$ (hence the correlation length will be mentioned in several places). However, at this stage the discussion is still restricted to the n.n.f. Ising model. + +## 6.1 Unconditional proofs of the intersection-clustering bound and Theorem 1.3 for the Ising model + +The notation remains as in Section 4. The endgame in this section will be the unconditional proof of the intersection-clustering bound that we restate below in the right level of generality. The main modification is that the sequence $\mathcal{L}$ of integers $l_k$ will be chosen dynamically, adjusting it to the behaviour of the two-point function. More precisely, recall the definition of the bubble diagram $B_L(\beta)$ truncated at a distance $L$. Fix $D \gg 1$ and define recursively a (possibly finite) sequence $\mathcal{L}$ of integers $l_k = l_k(\beta, D)$ by the formula $l_0 = 0$ and + +$$l_{k+1} = \inf\{\ell : B_\ell(\beta) \ge D B_{\ell_k}(\beta)\}. \qquad (6.1)$$ + +By the Infrared Bound (5.37), $B_L - B_\ell \le C_0 \log(L/\ell)$ (in dimension $d=4$) from which it is a simple exercise to deduce that under the above definition + +$$D^k \le B_{\ell_k}(\beta) \le CD^k \qquad (6.2)$$ + +for every $k$ and some large constant $C$ independent of $k$. + +**Proposition 6.1 (clustering bound)** For $d=4$ and $D$ large enough, there exists $\delta = \delta(D) > 0$ such that for every $\beta \le \beta_c$, every $K > 3$ with $\ell_K \le \xi(\beta)$, and every $u,x,y,z,t \in \mathbb{Z}^4$ with mutual distances between $x,y,z,t$ larger than $2\ell_K$, + +$$\mathbf{P}_{\beta}^{ux,uz,uy,ut}[\mathbf{M}_u(\mathcal{T}; \mathcal{L}, K) < \delta K] \le 2^{-\delta K}. \qquad (6.3)$$ + +Before proving this proposition, let us explain how it implies the improved tree diagram bound. + +**Proof of Theorem 1.3** Choose $D$ large enough that the previous proposition holds true. We follow the same lines as in Section 4.1, simply noting that since $B_{\ell_k}(\beta) \le CD^k$, we may choose $K \ge c \log B_L(\beta)$ with $2\ell_K \le L$, where $c$ is independent of $L$ and $\beta$, so that (4.13) implies the improved tree diagram bound inequality. $\square$ +---PAGE_BREAK--- + +The main modification we need for an unconditional proof of the intersection-clustering +bound lies in the derivation of the intersection and mixing properties. The former is similar +to Lemma 4.4, but restricted to sources that lie in regular scales. We restate it here in a +slightly modified form. + +Recall that $I_k$ is the event that there exist unique clusters of $\text{Ann}(\ell_k, \ell_{k+1})$ in $\mathbf{n}_1 + \mathbf{n}_3$ +and $\mathbf{n}_2 + \mathbf{n}_4$ crossing the annulus from the inner boundary to the outer boundary and that +these two clusters are intersecting. + +**Lemma 6.2 (Intersection property)** +Fix $d = 4$. There exists $c > 0$ such that for every $\beta \le \beta_c$, every $k$, and every $y \notin \Lambda_{2\ell_{k+1}}$ in a regular scale, + +$$ +\mathbf{P}_{\beta}^{0y,0y,\emptyset,\emptyset}[I_k] \geq c. \tag{6.4} +$$ + +**Proof** Restricting our attention to the case of *y* belonging to a regular scale enables us to use properties P1 and P2 of the regularity assumption on the scale. With this additional assumption, we follow the same proof as the one of the conditional version (Lemma 4.4). Introduce the intermediary integers $n \le m \le M \le N$ satisfying + +$$ +\ell_k^4 \ge n \ge \ell_k^{3+\epsilon}, \quad n^4 \ge m \ge n^{3+\epsilon}, \quad M^4 \ge N \ge M^{3+\epsilon}, \quad N^4 \ge \ell_{k+1} \ge N^{3+\epsilon}. \tag{6.5} +$$ + +For the second moment method on $\mathcal{M}$, the first and second moments take the following forms + +$$ +\begin{equation} +\begin{aligned} +\mathbf{E}_{\beta}^{0y,0y,\emptyset,\emptyset}[|\mathcal{M}|] &\ge c_1(B_M(\beta) - B_{m-1}(\beta)) \ge c_2 B_{\ell_{k+1}}(\beta), \\ +\mathbf{E}_{\beta}^{0y,0y,\emptyset,\emptyset}[|\mathcal{M}|^2] &\le c_3(B_M(\beta) - B_{m-1}(\beta)) B_{2M}(\beta) \le c_3 B_{\ell_{k+1}}(\beta)^2, +\end{aligned} +\tag{6.6}\tag{6.7} +\end{equation} +$$ + +where in the second inequality of the first line, we used that $D$ is large enough and +Lemma 6.3 below to get that + +$$ +B_M(\beta) \ge \frac{B_{\ell_{k+1}}(\beta)}{1 + 15C} \quad \text{and} \quad B_{m-1}(\beta) \le (1 + 15C)B_{\ell_k}(\beta) \le \frac{1 + 15C}{D} B_{\ell_{k+1}}(\beta). +$$ + +For the bound on the probabilities of the events $F_1, \dots, F_4$ defined as in Section 4.2, recall +that the vertices $x$ and $z$ there are in our case both equal to $y$ that belongs to a regular +scale. Using Property 2 of the regularity of scales, the bounds in (4.21) and (4.22) follow +readily from the Infrared Bound (5.37). $\square$ + +In the previous proof, we used the following statement. + +**Lemma 6.3** For $d=4$, there exists $C > 0$ such that for every $\beta \le \beta_c$ and every $\ell \le L \le \xi(\beta)$, + +$$ +B_L(\beta) \le \left(1 + C \frac{\log(L/\ell)}{\log \ell}\right) B_\ell(\beta). \tag{6.8} +$$ + +**Proof** For every $n \le N$ for which $n = 2^k$ with $k$ regular, we have that (recall the definition of $\chi_n(\beta)$ from the previous section) + +$$ +\begin{align*} +B_{2N}(\beta) - B_N(\beta) &\le C_0 N^{-4} \chi_{N/d}(\beta)^2 \\ +&\le C_1 n^{-4} \chi_n(\beta)^2 \\ +&\le C_2 n^{-4} (\chi_{2n}(\beta) - \chi_n(\beta))^2 \\ +&\le C_3 (B_{2n}(\beta) - B_n(\beta)), \tag{6.9} +\end{align*} +$$ +---PAGE_BREAK--- + +where in the first inequality we used (5.4), in the second the sliding-scaled Infrared Bound (5.24), in the third Property P4 of the regularity of $n$, and in the last Cauchy-Schwarz. + +Now, there are $\log_2(L/\ell)$ scales between $\ell$ and $L$, and at least $\frac{1}{C}\log_2\ell$ regular scales between 1 and $\ell$ by abundance of regular scales (Theorem 5.12). Since the sums of squared correlations on any of the former contribute less to $B_L(\beta) - B_\ell(\beta)$ than any of the latter to $B_\ell(\beta)$, we deduce that + +$$B_L(\beta) \leq \left(1 + C \frac{\log_2(L/\ell)}{\log_2 \ell}\right) B_\ell(\beta). \quad (6.10)$$ + +Next comes the unconditional mixing property. + +**Theorem 6.4 (random currents' mixing property)** For $d \geq 4$, there exist $\alpha, c > 0$ such that for every $t \leq s$, every $\beta \leq \beta_c$, every $n^\alpha \leq N \leq \xi(\beta)$, every $x_i \in \Lambda_n$ and $y_i \notin \Lambda_N$ ($i \leq t$), and every events E and F depending on the restriction of $(\mathbf{n}_1, \dots, \mathbf{n}_s)$ to edges within $\Lambda_n$ and outside of $\Lambda_N$ respectively, + +$$\left| \mathbf{P}_{\beta}^{x_1 y_1, \dots, x_t y_t, \emptyset, \dots, \emptyset} [E \cap F] - \mathbf{P}_{\beta}^{x'_1 y'_1, \dots, x'_t y'_t, \emptyset, \dots, \emptyset} [E] \mathbf{P}_{\beta}^{x_1 y_1, \dots, x_t y_t, \emptyset, \dots, \emptyset} [F] \right| \leq s (\log \frac{N}{n})^{-c}. \quad (6.11)$$ + +Furthermore, for every $x'_1, \dots, x'_t \in \Lambda_n$ and $y'_1, \dots, y'_t \notin \Lambda_N$, we have that + +$$\left|\mathbf{P}_{\beta}^{x_1 y_1, \dots, x_t y_t, \emptyset, \dots, \emptyset} [E] - \mathbf{P}_{\beta}^{x'_1 y'_1, \dots, x'_t y'_t, \emptyset, \dots, \emptyset} [E]\right| \leq s (\log \frac{N}{n})^{-c}, \quad (6.12)$$ + +$$\left|\mathbf{P}_{\beta}^{x_1 y_1, \ldots, x_t y_t, \emptyset, \ldots, \emptyset} [F] - \mathbf{P}_{\beta}^{x'_1 y'_1, \ldots, x'_t y'_t, \emptyset, \ldots, \emptyset} [F]\right| \leq s (\log \frac{N}{n})^{-c}. \quad (6.13)$$ + +We postpone the proof to Section 6.2 below. Before showing how Theorem 6.4 is used in the proof of the improved tree diagram bound, let us make an interlude and comment on this statement. + +**Discussion** The relation (6.11) is an assertion of approximate independence between events at far distances, and (6.12)–(6.13) expresses a degree of independence of the probability of an event from the precise placement of the sources when these are far from the event in question. This result should be of interest on its own, and possibly have other applications, since mixing properties efficiently replace independence in statistical mechanics. + +The main difficulty of the theorem concerns currents with a source inside $\Lambda_n$ and a source outside $\Lambda_N$ (i.e. the first $t$ ones). In this case, the currents are constrained to have a path linking the two, and that may be a conduit for information, and correlation, between $\Lambda_n$ and the exterior of $\Lambda_N$. To appreciate the point it may be of help to compare the situation with Bernoulli percolation: there the mixing property without sources is a triviality (by the variables’ independence); while an analogue of the mixing property with sources $x$ and $y$ would concern Bernoulli percolation conditioned on having a path from $x$ to $y$. Proving convergence at criticality, for $x$ set as the origin and $y$ tending to infinity, of these conditioned measures is a notoriously hard problem. It would in particular imply the existence of the so-called Incipient Infinite Cluster (IIC), and the definition of the IIC was justified in 2D [32] and in high dimension [49], but it is still open in dimensions $3 \leq d \leq 10$. When the number of sources is even inside $\Lambda_n$, things become much simpler and one may in fact prove a quantitative ratio weak mixing using mixing properties for (sub)-critical random-cluster measures with cluster-weight 2 provided by [4]. +---PAGE_BREAK--- + +Theorem 6.4 has an extension to three dimensions using [4], but there it becomes non-quantitative (the careful reader will notice that the condition $d > 3$ is coming from the exponent appearing in the proof of (6.36) in Lemma 6.7 in the next section). More precisely, one may prove that in dimension $d = 3$, for every $n, s$ and $\varepsilon$, there exists a constant $N$ sufficiently large that the previous theorem holds with an error $\varepsilon$ instead of $s(\log \frac{N}{n})^{-c}$. This has a particularly interesting application: one may construct the IIC in dimension $d = 3$ for this model, since the random-cluster model with cluster weight $q = 2$ conditioned on having a path from $x$ to $y$ can be obtained as the random current model with sources $x$ and $y$ together with an additional independent sprinkling (see [5]). This represents a non-trivial result for critical 3D Ising. More generally, we believe that the previous mixing result may be a key tool in the rigorous description of the critical behaviour of the Ising model in three dimensions. + +This concludes the interlude, and we return now to the proof of the intersection-clustering bound. + +**Proof of Proposition 6.1** We follow the same argument as in the proof of the conditional version (Proposition 4.3) and borrow the notation from the corresponding proof at the end of Section 4.2. We fix $\alpha > 2$ large enough that the mixing property Theorem 6.4 holds true. Using Lemma 6.3, we may choose $D = D(\alpha)$ such that $\ell_{k+1} \ge \ell_k^\alpha$. + +The proof is exactly identical to the proof of Proposition 4.3, with the exception of the bound on $\mathbf{P}^{0x,0z,\emptyset,\emptyset}[A_S]$ and the fact that we restrict ourselves to subsets $S$ of even integers in $\{1, \dots, K-3\}$. In order to obtain this result, first observe that since we assumed $\ell_K \le \xi(\beta)$, by Theorem 5.12 there exists $y \in \text{Ann}(\ell_{K-1}, \ell_K)$ in a regular scale. Since the event $A_S$ depends on the currents inside $\Lambda_{\ell_{K-2}}$ (since $S$ does not contain integers strictly larger than $K-3$), and that $\ell_{K-1} \ge \ell_{K-2}^\alpha$, the mixing property (Theorem 6.4) shows that + +$$ \mathbf{P}^{0x,0z,\emptyset,\emptyset}[A_S] \le \mathbf{P}^{0y,0y,\emptyset,\emptyset}[A_S] + \frac{C}{\sqrt{\log \ell_{K-1}}} \le \mathbf{P}^{0y,0y,\emptyset,\emptyset}[A_S] + 2^{-\delta K}. \quad (6.14) $$ + +To derive the first bound on the right-hand side, we apply the mixing property repeatedly (Theorem 6.4) and the intersection property (Lemma 6.2) exactly as in the conditional proof. For the second inequality, we lower bound $\ell_{K-1}$ using $B_{\ell_{K-1}}(\beta) \ge D^{K-1}$ and the Infrared Bound (5.37). $\square$ + +## 6.2 The mixing property: proof of Theorem 6.4 + +As we saw, the mixing property is in the core of the proof of our main result. The strategy of the proof was explained in Section 4.2 when we proved mixing for one current under the power-law assumption. In this section we again define a random variable $N$ which is approximately 1 and is a weighted sum over ($t$-tuple of) vertices connected to the origin. The main difficulty will come from the fact that since we do not fully control the spin-spin correlations, we will need to define $N$ in a smarter fashion. Also, whereas in Section 4.2 we treated the case of a single current ($s=1$), here we generalize to multiple currents. + +Fix $\beta \le \beta_c$ and drop it from the notation. Also fix $s \ge t \ge 1$ and $n^\alpha \le N \le \xi(\beta)$. Below, constants $c_i$ and $C_i$ are independent of the choices of $s,t,\beta,n,N$ satisfying the properties above. We introduce the integers $m$ and $M$ such that $m/n = (N/n)^{1/3}$ and $N/M = (N/n)^{1/3}$ (we omit the details of the rounding operation). + +For $\mathbf{x} = (x_1, \dots, x_t)$ and $\mathbf{y} = (y_1, \dots, y_t)$, we will use the following shortcut notation + +$$ \mathbf{P}^{\mathbf{x}\mathbf{y}} := \mathbf{P}^{x_1 y_1, \dots, x_t y_t, \emptyset, \dots, \emptyset} \quad \text{and} \quad \mathbf{P}^{\mathbf{x}\mathbf{y}} \otimes \mathbf{P}^{\emptyset}, \quad (6.15) $$ +---PAGE_BREAK--- + +where the second measure is the law of the random variable $(\mathbf{n}_1, \dots, \mathbf{n}_s, \mathbf{n}'_1, \dots, \mathbf{n}'_s)$, where $(\mathbf{n}'_1, \dots, \mathbf{n}'_s)$ is an independent family of sourceless currents. + +To define $\mathbf{N}$, first introduce for every vertex $y \notin \Lambda_{2dm}$, the set (see Fig. 5) + +$$ +A_y(m) := \{u \in \text{Ann}(m, 2m) : \forall x \in \Lambda_{m/d}, \langle \sigma_x \sigma_y \rangle \le \left(1 + \frac{C|x-u|}{|y|}\right) \langle \sigma_u \sigma_y \rangle\}, \quad (6.16) +$$ + +where $C$ is given by the definition of good scales. + +**Remark 6.5** When $y$ is in a regular scale, then $A_y(m)$ is equal to $\text{Ann}(m, 2m)$ by Prop- erty P2 of regular scales. The reason why we consider $A_y(m)$ instead of the full annulus $\text{Ann}(m, 2m)$ is technical: since $y$ will not a priori be assumed to belong to a regular scale (in fact $|y|$ may be much larger than $\xi(\beta)$ when $\beta < \beta_c$), we will use (for (6.26) and (6.37) below) the inequality between $\langle \sigma_x \sigma_y \rangle$ and $\langle \sigma_u \sigma_y \rangle$ in several bounds. Now, if $y_1 = |y|$, then + +$$ +A_y(m) \supset \{z \in \mathbb{Z}^d : m \le z_1 \le 2m \text{ and } 0 \le z_j \le m/d \text{ for } j > 1\} \quad (6.17) +$$ + +as the Messager-Miracle-Sole inequality implies⁶ that $\langle \sigma_z \sigma_y \rangle \ge \langle \sigma_x \sigma_y \rangle$ for every $x \in \Lambda_{m/d}$. + +From now on, fix a set $\mathcal{H}$ of regular scales $k$ with $m \le 2^k \le M/2$ satisfying that distinct $k, k' \in \mathcal{H}$ are differing by a multiplicative factor at least $C$ (where the constant $C$ is given by Theorem 5.12). We further assume that $|\mathcal{H}| \ge c_1 \log(N/n)$, where $c_1$ is sufficiently small. The existence of $\mathcal{H}$ is guaranteed by the definition of $m$ and $M$ and the abundance of regular scales given by Theorem 5.12. + +Define $\mathbf{N} := \prod_{i=1}^{t} \mathbf{N}_i$, where + +$$ +\mathbf{N}_i := \frac{1}{|\mathcal{H}|} \sum_{k \in \mathcal{K}} \frac{1}{A_{x_i, y_i}(2^k)} \sum_{u \in A_{y_i}(2^k)} \mathbb{I}[u \xleftarrow{\mathbf{n}_i+\mathbf{n}'_i} x_i], \quad (6.20) +$$ + +where $a_{x,y}(u) := \langle\sigma_x\sigma_u\rangle\langle\sigma_u\sigma_y\rangle/\langle\sigma_x\sigma_y\rangle$ and $A_{x,y}(m) := \sum_{u\in A_{y}(m)} a_{x,y}(u)$. The first step of the proof is the following concentration inequality. + +**Proposition 6.6 (Concentration of N)** *For every $\alpha > 2$, there exists $C_0 = C_0(\alpha, t) > 0$ such that for every $n$ large enough and $n^\alpha \le N \le \xi(\beta)$,* + +$$ +\mathbf{E}^{\mathbf{x},\mathbf{y},\emptyset[(\mathbf{N}-1)^2]} \leq \frac{C_0}{\log(N/n)}. \quad (6.21) +$$ + +*Proof* We shall apply the telescopic formula + +$$ +\mathbf{N}-1 = \prod_{i=1}^{t} \mathbf{N}_i - 1 = \sum_{i=1}^{t} (\mathbf{N}_i - 1) \prod_{j>i} \mathbf{N}_j +$$ + +⁶The claim follows directly from the inequality $S_\beta(y) \le S_\beta(x)$ for every $x,y$ such that $x_1 \ge 0$ and $y_1 \ge x_1 + \sum_{j>1} |y_j - x_j|$. In order to prove this inequality, define, for $0 \le i \le d$, + +$$ +v^{(i)} := (x_1 + \sum_{j=i}^{d} |y_j - x_j|, x_2, \dots, x_i, y_{i+1}, \dots, y_d). \tag{6.18} +$$ + +Successive applications of the Messager-Miracle-Sole inequality with respect to the sum or the difference +(depending on whether $x_i$ is positive or negative) of the first and $i$-th coordinates implies that + +$$ +S_{\beta}(y) \le S_{\beta}(v^{(1)}) \le S_{\beta}(v^{(2)}) \le \cdots \le S_{\beta}(v^{(d)}) = S_{\beta}(x). \quad (6.19) +$$ +---PAGE_BREAK--- + +with the last product interpreted as 1 for $i = t$. Hence, by the Cauchy-Schwarz inequality and the currents' independence, + +$$ \mathbf{E}^{xy,\emptyset}[(\mathbf{N}-1)^2] \leq t \sum_{i=1}^{t} \mathbf{E}^{xy,\emptyset}[(\mathbf{N}_i-1)^2] \prod_{j>i} \mathbf{E}^{xy,\emptyset}[\mathbf{N}_j^2]. \quad (6.22) $$ + +It therefore suffices to show that there exists a constant $C_1 > 0$ such that for every $i \le t$, + +$$ \mathbf{E}^{xy,\emptyset}[(\mathbf{N}_i - 1)^2] \le \frac{C_1}{\log(N/n)}. \quad (6.23) $$ + +To lighten the notation, and since the random variable $\mathbf{N}_i$ depends only on $\mathbf{n}_i$ and $\mathbf{n}'_i$, we omit the index in $x_i$, $y_i$, $\mathbf{n}_i$, $\mathbf{n}'_i$ and write instead just $x, y, \mathbf{n}, \mathbf{n}'$. We keep the index in $\mathbf{N}_i$ to avoid confusion with $\mathbf{N}$ which is the product of these random variables. + +The proof of (6.23) is also based on a computation of the first and second moments of $\mathbf{N}_i$. For the first moment, the switching lemma and the definition of $\mathbf{N}_i$ imply that $\mathbf{E}^{xy,\emptyset}[\mathbf{N}_i] = 1$. From the lower bound on $|\mathcal{K}|$, to bound the second moment it therefore suffices to show that + +$$ \mathbf{E}^{xy,\emptyset}[\mathbf{N}_i^2] \le 1 + \frac{C_2}{|\mathcal{K}|}, \quad (6.24) $$ + +which follows from the inequality, for every $\ell \ge k$ in $\mathcal{K}$, + +$$ \sum_{\substack{u \in A_y(2^k) \\ v \in A_y(2^\ell)}} \mathbf{P}^{xy,\emptyset}[u, v \underset{\ell}{\overset{\ell}{n+n'}} x] \le A_{x,y}(2^\ell) A_{x,y}(2^\ell)(1+C_3 2^{-(\ell-k)}). \quad (6.25) $$ + +**Case** $\ell > k$. We find by (A.11) that + +$$ \mathbf{P}^{xy,\emptyset}[u,v \stackrel{n+n'}{\rightleftarrows} x] \le a_{x,y}(u)a_{x,y}(v) \left( \frac{\langle \sigma_x \sigma_y \rangle \langle \sigma_u \sigma_v \rangle}{\langle \sigma_u \sigma_y \rangle \langle \sigma_x \sigma_v \rangle} + \frac{\langle \sigma_x \sigma_y \rangle \langle \sigma_u \sigma_v \rangle}{\langle \sigma_v \sigma_y \rangle \langle \sigma_x \sigma_u \rangle} \right). \quad (6.26) $$ + +Now, since $u \in A_y(2^\ell)$, $\langle \sigma_x \sigma_y \rangle \le (1+C^{\frac{|u-x|}{|y|}})\langle \sigma_u \sigma_y \rangle$. Furthermore, since $\ell$ is a regular scale, Property P2 of regular scales implies that $\langle \sigma_u \sigma_v \rangle \le (1+C^{\frac{|u-x|}{|v|}})\langle \sigma_x \sigma_v \rangle$. We deduce that + +$$ \frac{\langle \sigma_x \sigma_y \rangle \langle \sigma_u \sigma_v \rangle}{\langle \sigma_u \sigma_y \rangle \langle \sigma_x \sigma_v \rangle} \le 1 + C_0 2^{-(\ell-k)}. \quad (6.27) $$ + +Similarly, since $v \in A_y(2^\ell)$, $\langle \sigma_x \sigma_y \rangle \le (1+C^{\frac{|v-x|}{|y|}})\langle \sigma_v \sigma_y \rangle$. Property P3 for the $\ell-k$ regular scales in $\mathcal{K}$ between $k$ and $\ell$ implies that + +$$ \frac{\langle\sigma_x\sigma_y\rangle\langle\sigma_u\sigma_v\rangle}{\langle\sigma_v\sigma_y\rangle\langle\sigma_x\sigma_u\rangle} \le C_1 2^{-(\ell-k)}. \quad (6.28) $$ + +Plugging (6.27)-(6.28) into (6.26) and summing over $u \in A_y(2^k)$ and $v \in A_y(2^\ell)$ gives (6.25). +---PAGE_BREAK--- + +Case $\ell = k$. Assume that $\langle \sigma_u \sigma_y \rangle \le \langle \sigma_v \sigma_y \rangle$. Use (A.11) to write + +$$ +\mathbf{P}^{xy,\emptyset}[u, v \underset{n \to n'}{\stackrel{n+n'}{\rightleftarrows}} x] \leq \langle \sigma_v \sigma_u \rangle \left( \frac{\langle \sigma_x \sigma_u \rangle}{\langle \sigma_x \sigma_v \rangle} + \frac{\langle \sigma_u \sigma_y \rangle}{\langle \sigma_v \sigma_y \rangle} \right) a_{x,y}(v). \quad (6.29) +$$ + +By Property P1 of regular scales, the first term under parenthesis is bounded by a constant. +The second one is bounded by 1 by assumption. Now, for each $v \in A_y(2^k)$, + +$$ +\begin{align*} +\sum_{u \in A_y(2^k): \langle \sigma_u \sigma_y \rangle \le \langle \sigma_v \sigma_y \rangle} \langle \sigma_v \sigma_u \rangle &\le \chi_{2^{k+1}}(\beta) \le C_2 (\chi_{2^{k+1}}(\beta) - \chi_{2^k}(\beta)) \\ +&\le C_3 \sum_{u \in A_y(2^k)} \langle \sigma_0 \sigma_u \rangle \\ +&\le C_4 \sum_{u \in A_y(2^k)} \frac{\langle \sigma_x \sigma_u \rangle \langle \sigma_u \sigma_y \rangle}{\langle \sigma_x \sigma_y \rangle} = C_4 A_{x,y}(2^k), +\end{align*} +$$ + +where the first inequality is trivial, the second one is true by Property P4, the third by +Remark 6.5 (when $y$ is regular then it is a direct consequence of P4, and when it is not +one can use (6.17) and the Messager-Miracle-Sole inequality), and the fourth inequality +follows from Property P1 of regular scales (to replace $\langle \sigma_0 \sigma_u \rangle$ by $\langle \sigma_x \sigma_u \rangle$) and the fact that +since $u \in A_y(2^k)$, $\langle \sigma_x \sigma_y \rangle \le (1+C|u-x||y|)\langle \sigma_u \sigma_y \rangle \le C_5\langle \sigma_u \sigma_y \rangle$. + +We deduce that + +$$ +\sum_{u,v \in A_y(2^k)} \mathbf{P}^{xy,\emptyset}[u, v \xrightarrow{n_1+n_2} x] \le 2 - \sum_{\substack{u,v \in A_y(2^k) \\ \langle \sigma_u \sigma_y \rangle \le \langle \sigma_v \sigma_y \rangle}} \mathbf{P}^{xy,\emptyset}[u, v \xrightarrow{n_1+n_2} x] \le C_6 A_{x,y}(2^k)^2. \quad (6.30) +$$ + +For a proof of Theorem 6.4 we fix $\alpha > 2$ (which will be taken large enough later). +Applying the Cauchy-Schwarz inequality gives + +$$ +|\mathbf{P}^{xy}[E \cap F] - \mathbf{E}^{xy,\emptyset}[\mathbf{N}_{\mathbb{I}((n_1, ..., n_s) \in E \cap F)}]| \leq \sqrt{\mathbf{E}^{xy,\emptyset}[(\mathbf{N} - 1)^2]} \leq \frac{C_1}{\sqrt{\log(N/n)}}. \quad (6.31) +$$ + +Now, for **u** = (u₁, ..., u_t) with uᵢ ∈ Ann(m, M) for every i, let G(u₁, ..., u_t) be the event +that for every i ≤ s, there exists **k**ᵢ ≤ **n**ᵢ + **n′**ᵢ such that **k**ᵢ = 0 on Λₙ, **k**ᵢ = **n**ᵢ + **n′**ᵢ outside +Λₙ, and ∂**k**ᵢ is equal to {uᵢ, yᵢ} if i ≤ t and Ø if t < i ≤ s. The switching principle implies +as in Section 5.3 that + +$$ +\begin{align} +&\mathbf{P}^{\text{xy},\emptyset[(\mathbf{n}_1, \dots, \mathbf{n}_s) \in E \cap F], u_i} \xrightarrow{n_i+n'_i} x_i &&\text{for } i \le t, G(u_1, \dots, u_t)] \\ +&= (\prod_{i=1}^t a_{x_i,y_i}(u_i)) \mathbf{P}^{\text{xu},\text{uy}[(\mathbf{n}_1, \dots, \mathbf{n}_s) \in E, (\mathbf{n}'_1, \dots, \mathbf{n}'_s) \in F], G(u_1, \dots, u_t)].} &&(6.32) +\end{align} +$$ + +Also, as before, we have the trivial identity + +$$ +\mathbf{P}^{\mathbf{xu},\mathbf{uy}}[(\mathbf{n}_1, ..., \mathbf{n}_s) \in E, (\mathbf{n}'_1, ..., \mathbf{n}'_s) \in F] = \mathbf{P}^{\mathbf{xu}}[E] \mathbf{P}^{\mathbf{uy}}[F]. \quad (6.33) +$$ + +We now pause the argument to establish the following lemma. + +**Lemma 6.7** For $d \ge 4$, there exist $\epsilon > 0$ and $\alpha_0 = \alpha_0(\epsilon) > 0$ large enough such that for every $n^{\alpha_0} \le N \le \xi(\beta)$ and for every $\mathbf{u}$ with $u_i \in A_{y_i}(2^{k_i})$ for some $k_i$ with $m \le 2^{k_i} \le M/2$ for every $1 \le i \le t$, + +$$ +\left(\prod_{i=1}^{t} a_{x_i, y_i}(u_i)\right)^{-1} P^{\mathbf{x}y,\emptyset}\left[u_i\xrightarrow[n_i+n'_i]{n_i+n'_i} x_i, \forall i\le t, G(u_1,\ldots,u_t)^c\right] = P^{\mathbf{x}u,\mathbf{uy}}[G(u_1,\ldots,u_t)^c] \\ +\le s\left(\frac{n}{N}\right)^{\epsilon}. +\tag{6.34} +$$ +---PAGE_BREAK--- + +Figure 5: The currents $\mathbf{n}_i$ (red) and $\mathbf{n}'_i$ (blue). Since the sources of $\mathbf{n}_i$, i.e. $x_i$ and $u_i$, are both in $\Lambda_M$, a reasoning similar to the proof of uniqueness in the intersection property (first control the backbone, proving that it does not cross the annulus $\text{Ann}(M, R)$, and then the remaining sourceless current) enables us to conclude that the probability that the current contains a crossing of $\text{Ann}(M, N)$ is small. Similarly, since the sources $u_i$ and $y_i$ of $\mathbf{n}'_i$ lie both outside of $\Lambda_m$, we can prove that the probability that $\mathbf{n}'_i$ crosses $\text{Ann}(n, m)$ is small. An extra care is needed for establishing the latter since $y$ is not assumed to be regular. To circumvent this problem, we consider only intersection sites $u_i$ in one of the boxes $\Lambda_k(y_i)$, which are depicted here in gray. + +**Proof** Fix $\varepsilon > 0$ sufficiently small (we will see below how small it should be). The first identity follows from the switching lemma so we focus on the second one. Let $G_i$ be the event that the current **k***i* exists. This event clearly contains (see Fig. 5) the event that $\text{Ann}(M, N)$ is not crossed by a cluster in **n***i*, and $\text{Ann}(n, m)$ is not crossed by a cluster in **n'***i*, since in such case **k***i* can be defined as the sum of **n***i* restricted to the clusters intersecting $\Lambda_N^c$ (this current has no sources) and **n'***i* restricted to the clusters intersecting $\Lambda_m^c$ (this current has sources $u_i$ and $y_i$). We focus on the probability of this event for $i \le t$, the case $t < i \le s$ being even simpler since there are no sources. + +We bound the probability of **n***i* crossing Ann(*M*, *N*) by splitting Ann(*M*, *N*) in two annuli Ann(*M*, *R*) and Ann(*R*, *N*) with *R* = √*MN*, then estimating the probability that the backbone of **n***i* crosses the inner annulus, and then the probability that the remaining current crosses the outer annulus. More precisely, the chain rule for backbones [3] gives that for α₀ = α₀(ε) > 0 large enough and *N* ≥ nα₀, +$$ +\mathbf{P}^{\text{xy}}[\Gamma(\mathbf{n}_i) \text{ crosses } \text{Ann}(M, R)] \le \sum_{v \in \partial \Lambda_R} \frac{\langle \sigma_{x_i} \sigma_v \rangle \langle \sigma_v \sigma_{u_i} \rangle}{\langle \sigma_{x_i} \sigma_{u_i} \rangle} \le C_2 R^3 \frac{M^3}{R^4} \le (n/N)^{\varepsilon}, \quad (6.35) +$$ +where the lower bound (5.39) to bound the denominator and the Infrared Bound (5.37) for the numerator. Then, observe that the remaining current **n***i* ∖ Γ(**n***i*) is sourceless. Adding an additional sourceless current and using the switching lemma and Griffiths inequality [22] (very much like in the bound (4.22) in the proof of Lemma 4.4) gives + +$$ +\mathbf{P}^{\text{xy}}[\mathbf{n}_i \setminus \Gamma(\mathbf{n}_i) \text{ crosses } \text{Ann}(R, N)] \leq \sum_{\substack{v \in \partial \Lambda_R \\ w \in \partial \Lambda_N}} \langle \sigma_v \sigma_w \rangle^2 \leq C_3 R^3 N^3 (R/N)^4 \leq (n/N)^{\epsilon}, \quad (6.36) +$$ +where we used the Infrared-Bound 5.37, and in the last one the definition of *R* and the fact that *N* ≥ *n*^α₀ for α₀ large enough. +---PAGE_BREAK--- + +When dealing with the probability of $\mathbf{n}'_i$ crossing $\text{Ann}(n, m)$, fix $r = \sqrt{nm}$ and apply +the same reasoning with the annuli $\text{Ann}(n, r)$ and $\text{Ann}(r, m)$. The equivalent of (6.36) is +the same as before, but one must be more careful about the bound on the probability of +the event dealing with the backbone: + +$$ +\mathbf{P}^{\mathbf{x}\mathbf{y}}[\Gamma(\mathbf{n}'_i) \text{ crosses } \text{Ann}(r,m)] \le C_4 \sum_{v \in \partial \Lambda_r} \frac{\langle \sigma_{u_i} \sigma_v \rangle \langle \sigma_v \sigma_{y_i} \rangle}{\langle \sigma_{u_i} \sigma_{y_i} \rangle} \le C_5 r^3/m^2 \le (n/N)^{\epsilon}, \quad (6.37) +$$ + +where we used the Infrared Bound (5.37) and our assumption that $u_i$ belongs to one of +the $A_k(y_i)$ (to show that $\langle \sigma_v \sigma_{y_i} \rangle \le C_4 \langle \sigma_{u_i} \sigma_{y_i} \rangle$). + +$\Delta$ + +Invoking the above lemma we now return to the proof of Theorem 6.4. +Introduce the coefficients $\delta(\mathbf{u}, \mathbf{x}, \mathbf{y})$ equal to + +$$ +\delta(\mathbf{u}, \mathbf{x}, \mathbf{y}) := \prod_{i=1}^{t} \frac{a_{x_i, y_i}(u_i)}{|\mathcal{H}| A_{x_i, y_i}(2^{k_i})} \qquad (6.38) +$$ + +for **u** such that for every *i* ≤ *t*, *u*ᵢ ∈ Ayᵢ(2*k*ᵢ) for some *k*ᵢ, and equal to 0 for other **u**. +Gathering (6.31)–(6.33) as well as Lemma 6.7, and observing that the sum on (*u*₁, ..., *u*ₜ) +of δ(**u**, **x**, **y**) is 1, we obtain that + +$$ +| \mathbf{P}^{\mathbf{x}\mathbf{y}}[E \cap F] - \sum_{\mathbf{u}} \delta(\mathbf{u}, \mathbf{x}, \mathbf{y}) \mathbf{P}^{\mathbf{x}\mathbf{u}}[E] \mathbf{P}^{\mathbf{u}\mathbf{y}}[F] | \leq \frac{C_5 s}{\sqrt{\log(N/n)}} + 2C_6 s (n/N)^{\epsilon} \leq \frac{C_7 s}{\sqrt{\log(N/n)}}, \tag{6.39} +$$ + +provided that $N \ge n^{\alpha_0}$ where $\alpha_0$ is given by the previous lemma. + +To conclude the proof is now a matter of elementary algebraic manipulations. We begin +by proving (6.12) when all the $y_i, y'_i$ for $i \le t$ belong to regular scales (not necessarily the +same ones). In this case, apply twice (once for **y** and once for **y'**) the previous inequality +for our event *E* and the event on the outside being the full event to find + +$$ +|\mathbf{P}^{\mathbf{x}\mathbf{y}}[E] - \mathbf{P}^{\mathbf{x}\mathbf{y}}'[E]| \leq |\sum_{\mathbf{u}} (\delta(\mathbf{u}, \mathbf{x}, \mathbf{y}) - \delta(\mathbf{u}, \mathbf{x}, \mathbf{y}')) \mathbf{P}^{\mathbf{x}\mathbf{u}}[E]| + \frac{2C_7s}{\sqrt{\log(N/n)}}. \quad (6.40) +$$ + +Since all the $y_i, y'_i$ are in regular scales, Remark 6.5 implies that $A_{y_i}(2^{k_i}) = A_{y'_i}(2^{k_i}) =$ +$\text{Ann}(2^{k_i}, 2^{k_i+1})$. Furthermore, Property 2 of regular scales implies⁷ that + +$$ +|\delta(\mathbf{u}, \mathbf{x}, \mathbf{y}) - \delta(\mathbf{u}, \mathbf{x}, \mathbf{y}')| \le C_8 s \frac{M}{N} \delta(\mathbf{u}, \mathbf{x}, \mathbf{y}) \le C_9 s (n/N)^{1/3} \delta(\mathbf{u}, \mathbf{x}, \mathbf{y}). \quad (6.41) +$$ + +Therefore, (6.12) follows readily (with a large constant $C_{10}$) in this case. The same argument works for the second identity (6.13) for every **x**, **x'** and **y**, noticing that for every regular **u** for which the coefficients are non-zero, + +$$ +|\delta(\mathbf{u}, \mathbf{x}', \mathbf{y}) - \delta(\mathbf{u}, \mathbf{x}, \mathbf{y})| \le C_{10} s \frac{m}{n} \delta(\mathbf{u}, \mathbf{x}, \mathbf{y}) \le C_{11} s (n/N)^{1/3} \delta(\mathbf{u}, \mathbf{x}, \mathbf{y}). \quad (6.42) +$$ + +⁷Note that in this case $\delta(\mathbf{u}, \mathbf{x}, \mathbf{y})$ and $\delta(\mathbf{u}, \mathbf{x}, \mathbf{y}')$ are both close to + +$$ +\delta'(\mathbf{u}, \mathbf{x}) := \prod_{i 0$ sufficiently small. We now provide the proof of (6.48). Fix $0 < a < 1$ (any choice would do) and split the sum into four sums + +$$ S(L, r, \beta) = \underbrace{\sum_{\substack{x \in \Lambda_{drL} \\ x_1, \dots, x_4 \in \Lambda_{rL} \\ L(x_1, \dots, x_4) \ge L^a}} (\cdots)}_{(1)} + \underbrace{\sum_{\substack{x \notin \Lambda_{drL} \\ x_1, \dots, x_4 \in \Lambda_{rL} \\ L(x_1, \dots, x_4) \ge L^a}} (\cdots)}_{(2)} + \underbrace{\sum_{\substack{x \in \Lambda_{drL} \\ x_1, \dots, x_4 \in \Lambda_{rL} \\ L(x_1, \dots, x_4) < L^a}} (\cdots)}_{(3)} + \underbrace{\sum_{\substack{x \notin \Lambda_{drL} \\ x_1, \dots, x_4 \in \Lambda_{rL} \\ L(x_1, \dots, x_4) < L^a}} (\cdots)}_{(4)} \quad (6.49) $$ + +**Bound on (1)** We focus on this term and give more details since it is in fact the main contributor. By Lemma 6.3, + +$$ B_L(x_1, \dots, x_4)(\beta) \geq \frac{1}{C_3} B_L(\beta). \quad (6.50) $$ + +Summing over the sites in $\Lambda_{rL}$, we get that + +$$ (1) \le C_3 \frac{|\Lambda_{rL} | \chi_{2rL} (\beta)^4}{\Sigma_L(\beta)^2 B_L(\beta)^c} \le C_4 r^{12} \left( \frac{\chi_L(\beta)^2}{L^4 B_L(\beta)} \right)^c, \quad (6.51) $$ + +where in the second inequality we used the sliding-scale Infrared Bound (5.24) to bound $\chi_{2drL}(\beta)$ in terms of $\chi_L(\beta) \le C_5 L^{-4} \Sigma_L(\beta)$ and the Infrared Bound (5.37) to write + +$$ \chi_L(\beta) \le C_6 L^2. \quad (6.52) $$ + +Applying Cauchy-Schwarz for the first inequality below, then bounding the two terms in the middle by (6.52) and Lemma 6.3 correspondingly, we find that + +$$ \frac{\chi_L(\beta)^2}{L^4 B_L(\beta)} \le 2 \frac{\chi_{L/\log L}(\beta)^2}{L^4} + C_7 \frac{B_L(\beta) - B_{L/\log L}(\beta)}{B_L(\beta)} \le C_8 \frac{\log \log L}{\log L}. \quad (6.53) $$ + +Plugging this estimate in (6.51) gives + +$$ (1) \le C_9 r^{12} \left( \frac{\log \log L}{\log L} \right)^c . \quad (6.54) $$ + +**Bound on (2)** Combine (5.4) and the sliding-scale Infrared Bound (5.24) to get that for $i = 1, \dots, 4$, + +$$ \langle \sigma_x \sigma_{x_i} \rangle_\beta \le \frac{C_{10}}{|x|^4} \chi_{|x|/d}(\beta) \le \frac{C_{11}}{L^2|x|^2} \chi_L(\beta). \quad (6.55) $$ + +Summing over the sites gives the same bound as in (6.51) so that the reasoning in (1) gives + +$$ (2) \le C_{12} r^{12} \left( \frac{\log \log L}{\log L} \right)^c . \quad (6.56) $$ + +**Bound on (3)** This term is much smaller than the previous two due to the constraint that two sites must be close to each other. In fact, we will not even need the improved part of the tree diagram bound and will simply use that $B_L(x_1, \dots, x_4)(\beta) \ge 1$. Then, we use the Infrared Bound (5.37) to bound the terms $\langle \sigma_x \sigma_{x_i} \rangle_\beta$ and $\langle \sigma_x \sigma_{x_j} \rangle_\beta$ for +---PAGE_BREAK--- + +which $x_i$ and $x_j$ are at a distance exactly $L(x_1, \dots, x_4)$. Summing over the other two sites $x_k$ and $x_l$ gives a contribution bounded by $\chi_{2rL}(\beta)^2 \le C_{13}L^{-8}r^4\Sigma_L(\beta)^2$ by the sliding-scale Infrared Bound (5.24). Summing over $x$ and then $x_i$ and $x_j$ gives that + +$$ (3) \le \frac{C_{14}r^4 \log(Lr)}{L^{4-4a}}. \qquad (6.57) $$ + +**Bound on (4)** This sum is even simpler to bound than (3). Again, we simply use $B_L(x_1, \dots, x_4)(\beta) \ge 1$, bound two of the terms $\langle \sigma_x \sigma_{x_i} \rangle_\beta$ using (6.55), and the other two using the Infrared Bound (5.37). Summing over the vertices and using the constraint that two of the sites must be close to each other gives the bound + +$$ (4) \le \frac{C_{15}r^8}{L^{4-4a}}. \qquad (6.58) $$ + +In conclusion, all the sums (1)–(4) are sufficiently small (recall that by definition $r \ge 1$) and the claim is derived. + +**Remark 6.8** For $\beta < \beta_c$, applying (6.46) with $r=1$ and $L = \xi(\beta)$ gives the following bound on the renormalized coupling constant $g(\beta)$: + +$$ g(\beta) := \frac{1}{\xi(\beta)^4 \chi(\beta)^2} \sum_{x,y,z \in \mathbb{Z}^d} |U_4^\beta(0,x,y,z)| \le \log\left(\frac{1}{|\beta - \beta_c|}\right)^{-c}, \qquad (6.59) $$ + +where we used that + +$$ \xi(\beta)^2 \ge c_0 \chi_{\xi(\beta)}(\beta) \ge c_1 \chi(\beta) \ge c_2 / (\beta_c - \beta). $$ + +(The first inequality follows from the infrared bound, the second is a classical bound obtained first by Sokal [47], and the third is a mean-field lower bound on $\chi(\beta)$ [1]). In field theory this quantity is often referred to as the (dimensionless) renormalized coupling constant. In [29] it was proved that for lattice $\phi_4^4$ measures of small enough $\lambda$ it converges to 0 at the rate $1/\log(\frac{1}{|\beta-\beta_c|})$. Such behaviour is expected to be true, in dimension $d=4$, also for the n.n.f. Ising model. + +# 7 Generalization to models in the Griffiths-Simon class + +In this section we extend the results to nearest-neighbor ferromagnetic models in the GS class. An important observation is that the results from the previous section also extend. Note, however, that $\rho$ can have unbounded support, so that to be of relevance the relations of interest need to be expressed in spin-dimension balanced forms. Once this is done, many of the basic diagrammatic bounds which are available for the Ising model extend to the GS class essentially by linearity, and then to the GS class by continuity. Below, we carefully present the generalizations. + +In the whole section, $U_4^{\rho,\beta}$ denotes the 4-point Ursell function of the $\tau$ variables, and $B_L(\rho, \beta)$ the bubble diagram truncated at a distance $L$. We also reuse the notation $\xi(\rho, \beta)$ and $\beta_c(\rho)$ introduced in Section 5. + +## 7.1 An improved tree diagram bound for models in the GS class + +For bounds which are not homogeneous in the spin dimension, one needs to pay attention to the fact that $\tau$ is neither dimensionless nor bounded, and prepare the extension by reformulating the Ising relations in a spin-dimensionless form. +---PAGE_BREAK--- + +For example, the basic tree diagram bound (1.22) has four Ising spins on the left side and four pairs on the right. An extension of the inequality to GS models can be reached by site-splitting the terms in which an Ising spin is repeated, using the inequality (5.38) (which has a simple proof by means of the switching lemma) $^8$. The resulting diagrammatic bounds may at first glance appear as slightly more complicated than the one for the Ising case, but it has the advantage of being dimensionally balanced. That is a required condition for a bound to hold uniformly throughout the GS class of models. Additional consideration is needed for the factors by which the tree diagram bound of [1] is improved here. Taking care of that we get the following extension of the result, which also covers the $\phi^4$ lattice models. + +**Theorem 7.1 (Improved tree diagram bound for the GS class)** *There exist $C, c > 0$ such that for every n.n.f. model in the GS class on $\mathbb{Z}^4$, every $\beta \le \beta_c(\rho)$, $L \le \xi(\rho, \beta)$ and every $x,y,z,t \in \mathbb{Z}^d$ at distances larger than $L$ of each other,* + +$$|U_4^{\rho,\beta}(x,y,z,t)| \le C \left( \frac{B_0(\rho, \beta)}{B_L(\rho, \beta)} \right)^c \sum_u \sum_{u',u''} \langle \tau_x \tau_u \rangle_{\rho,\beta} \beta J_{u,u'} \langle \tau_{u'} \tau_y \rangle_{\rho,\beta} \langle \tau_z \tau_u \rangle_{\rho,\beta} \beta J_{u,u''} \langle \tau_{u''} \tau_t \rangle_{\rho,\beta}. \quad (7.1)$$ + +Before diving into the proof, note that the improved tree diagram bound implies, as it did for the Ising model, the following quantitative bound on the convergence to gaussian of the scaling limit of the $\tau$ field in four dimensional models with variables in the GS class. + +**Proposition 7.2** *There exist two constants $c, C > 0$ such that for every n.n.f. model in the GS class on $\mathbb{Z}^4$, every $\beta \le \beta_c(\rho)$, $L \le \xi(\rho, \beta)$, every continuous function $f: \mathbb{R}^4 \to \mathbb{R}$ with bounded support and every $z \in \mathbb{R}$,* + +$$\left| \left\langle \exp[zT_{f,L}(\tau) - \frac{z^2}{2} \langle T_{f,L}(\tau)^2 \rangle_{\rho,\beta}] \right\rangle_{\rho,\beta} - 1 \right| \leq \frac{C \|f\|_{\infty}^4 r_f^{12}}{(\log L)^c} z^4. \quad (7.2)$$ + +We now return to the proof of the improved tree diagram bound, following the path outlined above. The GS class of variables is naturally divided into two kinds. The core consists of those that directly fall under the Definition 2.1. The rest can be obtained as weak limits of the former. Since the constants in (7.1) are uniform, it suffices to prove the result for the former to get it for the latter. We therefore focus on site-measures $\rho$ satisfying Definition 2.1, which can directly be represented as Ising measures on a graph where every vertex is replaced by blocks, as explained in the previous section. In this case, we identify $\langle \cdot \rangle_{\rho,\beta}$ with the Ising measure, and $\tau_x$ with the proper average of Ising's variables. With this identification, we can harvest all the nice inequalities that are given by Ising's theory. In particular, we can use the random current representation. + +More explicitly, to generalize the argument used in the Ising's proof, we introduce the measure $\mathbf{P}^{xy}$ defined on the graph $\mathbb{Z}^d \times \{1, \dots, N\}$ in two steps: + +• first, sample two integers $1 \le i, j \le N$ with probability + +$$Q_i Q_j (\sigma_{x,i} \sigma_{y,j})_{\rho,\beta} / (\tau_x \tau_y)_{\rho,\beta},$$ + +• second, sample a current according to the measure $\mathbf{P}_{\rho,\beta}^{(x,i),(y,j)}$ corresponding to the random current representation of the Ising model $\langle \cdot \rangle_{\rho,\beta}$. + +⁸An alternative method for reducing a diagrammatic expression's spin-dimension is to divide by $\langle\sigma_u^2\rangle_0$. Both methods are of use, and may be compared through (5.15). +---PAGE_BREAK--- + +The interpretation of this object is that of a random current with two random sources $(x, i) \in \mathcal{B}_x$ and $(y, j) \in \mathcal{B}_y$. Also note that the superscript $xy$ will unequivocally denote this type of measures (we will avoid using measures with deterministic sources in this section to prevent confusion) and $\mathbf{P}_{\rho,\beta}^{\emptyset}$ which have no sources. + +The interest in $\mathbf{P}_{\rho,\beta}^{xy}$ over measures with deterministic sets of sources comes from the fact that the probability that the cluster of the sources intersects a set of the form $\mathcal{B}_u$ can be bounded in terms of correlations of the variables $\tau_x, x \in \mathbb{Z}^d$ (see Proposition A.8). + +**Proof of Theorem 7.1** As mentioned above, every $\rho$ in the GS class is a weak limit of measures satisfying the first condition of Definition 2.1. We therefore focus on such measures. + +Exactly like in the case of the Ising model, the core of the proof of Theorem 7.1 will be the proof of the intersection-clustering property that we now state and whose proof is postponed after the proof of the theorem. Define $\ell_0 = 0$ and $\ell_k = \ell_k(\rho, \beta)$ using the same definition (using $B_L(\rho, \beta)$ this time) as in (6.1). Let $\mathcal{T}_u$ be the set of vertices $v \in \mathbb{Z}^d$ such that $\mathcal{B}_v$ is connected in $\mathbf{n}_1 + \mathbf{n}_3$ to a box $\mathcal{B}_{u'}$ and in $\mathbf{n}_2 + \mathbf{n}_4$ to a box $\mathcal{B}_{u''}$, with $u'$ and $u''$ at graph distance at most 2 of $u$. Note that $\mathcal{T}_u$ is now a function of $u$ and that it is defined in terms of “coarse intersections”, i.e. lattice sites $v$ such that both clusters intersect $\mathcal{B}_v$ (but do not necessarily intersect each other). + +**Proposition 7.3 (intersection-clustering bound for the GS class)** For $d=4$ and $D$ large enough, there exists $\delta = \delta(D)$ such that for every model in the GS class, every $\beta \le \beta_c(\rho)$, every $K$ such that $\ell_K \le \xi(\rho, \beta)$ and every $u, u', u'', x, y, z, t \in \mathbb{Z}^d$ with $u'$ and $u''$ neighbors of $u$ and $x, y, z, t$ at mutual distances larger than $2\ell_K$, + +$$ \mathbf{P}_{\rho,\beta}^{ux,uz,u'y,u''t}[\mathbf{M}_u(\mathcal{T}_u; \mathcal{L}, K) \le \delta K] \le 2^{-\delta K}. \quad (7.3) $$ + +Postponing the proof of this estimate, we proceed with the proof of the Theorem. Express $U_4^{\rho,\beta}$ in terms of intersection properties of currents by summing (3.14) over vertices of $\mathcal{B}_x, \dots, \mathcal{B}_z$: + +$$ |U_4^{\rho,\beta}(x,y,z,t)| \le 2\langle\tau_x\tau_y\rangle_{\rho,\beta}\langle\tau_z\tau_t\rangle_{\rho,\beta}\mathbf{P}_{\rho,\beta}^{xy,zt,\emptyset,\emptyset}[\mathbf{C}_{n_1+n_3}(\partial\mathbf{n}_1) \cap \mathbf{C}_{n_2+n_4}(\partial\mathbf{n}_2) \neq \emptyset], \quad (7.4) $$ + +where $\mathbf{C}_{n_1+n_3}(\partial n_1)$ and $\mathbf{C}_{n_2+n_4}(\partial n_2)$ refer to the clusters in $\mathbf{n}_1+\mathbf{n}_3$ and $\mathbf{n}_2+\mathbf{n}_4$ of the sources in $\partial n_1$ and $\partial n_2$ respectively (we introduce this notation since the sources are not deterministic anymore). + +Define $K \ge c \log[B_L(\rho, \beta)/B_0(\rho, \beta)]$ as in the Ising case. We now implement the same reasoning as for the Ising model, with the twist that we consider coarse intersections. If $\mathbf{C}_{n_1+n_3}(\partial n_1)$ and $\mathbf{C}_{n_2+n_4}(\partial n_2)$ intersect, then + +• either the number of $u \in \mathbb{Z}^d$ such that $\mathbf{C}_{n_1+n_3}(\partial n_1)$ and $\mathbf{C}_{n_2+n_4}(\partial n_2)$ intersect $\mathcal{B}_u$ is larger than or equal to $2^{\delta K/5}$, + +• or there exists $u \in \mathbb{Z}^d$ such that $\mathbf{C}_{n_1+n_3}(\partial n_1)$ and $\mathbf{C}_{n_2+n_4}(\partial n_2)$ intersect $\mathcal{B}_u$, and $\mathbf{M}_u(\mathcal{T}_u; \mathcal{L}, K) < \delta K$. + +Using the Markov inequality and (A.38) on the first line, and Lemma A.7 in the second one, we find (drop $\rho$ and $\beta$ from notation) + +$$ +\begin{aligned} +|U_4(x,y,z,t)| &\le 2^{-\delta K/5} \sum_{u,u',u'' \in \mathbb{Z}^d} \langle \tau_x \tau_u \rangle \beta J_{u,u'} \langle \tau_{u'} \tau_y \rangle \langle \tau_z \tau_u \rangle \beta J_{u,u''} \langle \tau_{u''} \tau_t \rangle \\ +&\quad + \sum_{u,u',u'' \in \mathbb{Z}^d} \langle \tau_x \tau_u \rangle \beta J_{u,u'} \langle \tau_{u'} \tau_y \rangle \langle \tau_z \tau_u \rangle \beta J_{u,u''} \langle \tau_{u''} \tau_t \rangle \mathbf{P}^{xu,zu,u'y,u''t}[\mathbf{M}_u(\mathcal{T}_u; \mathcal{L}, K) < \delta K]. +\end{aligned} +\quad (7.5) +$$ +---PAGE_BREAK--- + +Lemma A.7 was invoked here since in the present context $\mathbf{M}_u(\mathcal{T}_u; \mathcal{L}, K)$ is defined in terms of coarse rather than true intersections. The intersection-clustering bound (Proposition 7.3) concludes the proof. $\square$ + +We now need to prove Proposition 7.3. The proof itself is exactly the same as for Proposition 6.1 (the monotonicity property of (A.2) is not impacted), except for the proofs of the mixing and intersection properties (i.e. statements corresponding to Lemma 6.2 and Theorem 6.4 respectively). Below, we briefly detail the statements and proofs of these results. Let $I_k(0)$ be the event that there exists $v \in \text{Ann}(\ell_k, \ell_{k+1})$ such that $\mathcal{B}_v$ is connected in $\mathbf{n}_1 + \mathbf{n}_3$ and in $\mathbf{n}_2 + \mathbf{n}_4$ to the union of the boxes $\mathcal{B}_w$ with $w$ at a distance at most 2 of 0. + +**Lemma 7.4 (intersection property for the GS class)** *There exists $c > 0$ such that for every $\rho$ in the GS class, every $\beta \le \beta_c(\rho)$, every $k$, every neighbour $0'$ of the origin, and every $y \notin \Lambda_{2\ell_{k+1}}$ in a regular scale,* + +$$ \mathbf{P}_{\rho,\beta}^{0y,0'y,\emptyset,\emptyset}[I_k(0)] \ge c. \quad (7.6) $$ + +**Proof** Reuse the notions included in the proofs of the intersection property in previous sections. Let + +$$ \mathcal{M} := \sum_{v \in \text{Ann}(m,M)} \sum_{i,i'=1}^{n} Q_i^2 \mathbb{I}[\partial \mathbf{n}_1 \xleftarrow{\mathbf{n}_1+\mathbf{n}_3} (v,i)] Q_{i'}^2 \mathbb{I}[\partial \mathbf{n}_1 \xleftarrow{\mathbf{n}_1+\mathbf{n}_3} (v,i')]. \quad (7.7) $$ + +A computation similar to before gives + +$$ \mathbf{E}_{\rho, \beta}^{0y, 0'y, \emptyset, \emptyset}[|\mathcal{M}|] \geq c_1(B_M(\rho, \beta) - B_{m-1}(\rho, \beta)) \quad (7.8) $$ + +$$ \mathbf{E}_{\rho, \beta}^{0y, 0'y, \emptyset, \emptyset}[|\mathcal{M}|^2] \le C_2 B_{\ell_{k+1}}(\rho, \beta)^2. \quad (7.9) $$ + +Now, in the first line we use the same reasoning as below (6.6). We include it for completeness to see where the division by $B_0(\rho, \beta)$ enters into the game (it is the only place it does). The Infrared Bound (5.36) (note that $\langle \tau_0^2 \rangle_{\rho,\beta} = B_0(\rho, \beta)$) implies that + +$$ B_M(\rho, \beta) - B_{m-1}(\rho, \beta) \ge B_{\ell_{k+1}}(\rho, \beta) - B_{\ell_k}(\rho, \beta) - C_3 B_0(\rho, \beta) \ge \left(1 - \frac{1+C_3}{D}\right) B_{\ell_{k+1}}(\rho, \beta). \quad (7.10) $$ + +Cauchy-Schwarz therefore implies the fact that $\mathcal{M} \neq \emptyset$ with positive probability, which implies in particular the existence of a vertex $v \in \text{Ann}(m,M)$ which is connected in $\mathbf{n}_1 + \mathbf{n}_3$ to $\mathcal{B}_0$ and in $\mathbf{n}_2 + \mathbf{n}_4$ to $\mathcal{B}'_0$. + +The second part of the proof bounding the probabilities of $F_1, \dots, F_4$ follows by the same proof as for the Ising model. More precisely, for $F_1$, the chain rule for backbones [3] and a decomposition on the first edge of the backbone with one endpoint in (a block of a vertex in) $\Lambda_{n-1}$ and the other (in a block of a vertex) in $\partial\Lambda_n$, and then the first edge after this between an endpoint (in a block of a vertex) outside $\Lambda_{\ell_k}$ and one in (a block of a vertex in) $\Lambda_{\ell_k}$ implies that + +$$ \mathbf{P}_{\rho, \beta}^{0y, \emptyset}[F_1] \le \sum_{\substack{v \in \partial\Lambda_n \\ w \in \partial\Lambda_{\ell_k} \\ v', w' \in \mathbb{Z}^d}} \frac{\langle \tau_0 \tau_v \rangle_{\rho, \beta} \beta J_{v', v} \langle \tau_v \tau_{w'} \rangle_{\rho, \beta} \beta J_{w', w} \langle \tau_w \tau_y \rangle_{\rho, \beta}}{\langle \tau_0 \tau_y \rangle_{\rho, \beta}} \le C_3 n^3 \ell_k^3 n^{-4} \le C_4 \ell_k^{-\epsilon}. \quad (7.11) $$ +---PAGE_BREAK--- + +This inequality uses Property P2 of regular scales, the lower bound (5.39) on the two-point function, and the Infrared Bound (5.37). For $F_3$, the same reasoning as for Ising, with Proposition A.8 replacing the switching lemma, leads to + +$$ +\mathbf{P}_{\rho,\beta}^{0,x,\emptyset}[F_3] \le \sum_{\substack{v \in \partial \Lambda_n \\ w \in \partial \Lambda_m}} \mathbf{P}_{\rho,\beta}^{\emptyset,\emptyset}[\mathcal{B}_v \xleftarrow{\mathrm{n}_1+\mathrm{n}_2} \mathcal{B}_w] \le \sum_{\substack{v \in \partial \Lambda_n \\ w \in \partial \Lambda_m \\ v', w' \in \mathbb{Z}^d}} \langle \tau_v \tau_w \rangle \beta J_{w,w'} \langle \tau_{w'} \tau_{v'} \rangle \beta J_{v',v} \le C_5 l_k^{-\epsilon}, \tag{7.12} +$$ + +where in the last line we used again the Infrared Bound (5.37). $\square$ + +We now turn to the proof of the mixing property for the measures $P_\beta^{xy}$, which is the exact replica of the Ising statement. + +**Theorem 7.5 (mixing of random currents for the GS class)** For $d \ge 4$, there exist $\alpha, c > 0$ such that for every $\rho$ satisfying Definition 2.1, every $t \le s$, every $\beta \le \beta_c(\rho)$, every $n^\alpha \le N \le \xi(\rho, \beta)$, every $x_i \in \Lambda_n$ and $y_i \notin \Lambda_N$ for every $i \le t$, and every events E and F depending on the restriction of $(\mathbf{n}_1, \dots, \mathbf{n}_s)$ to edges within $\Lambda_n$ and outside of $\Lambda_N$ respectively, + +$$ +| \mathbf{P}_{\rho, \beta}^{x_1 y_1, \dots, x_t y_t, \emptyset, \dots, \emptyset} [E \cap F] - \mathbf{P}_{\rho, \beta}^{x'_1 y'_1, \dots, x'_t y'_t, \emptyset, \dots, \emptyset} [E] | \mathbf{P}_{\rho, \beta}^{x_1 y_1, \dots, x_t y_t, \emptyset, \dots, \emptyset} [F] | \le s (\log \frac{N}{n})^{-c}. \quad (7.13) +$$ + +Furthermore, for every $x'_1, \dots, x'_t \in \Lambda_n$ and $y'_1, \dots, y'_t \notin \Lambda_N$, + +$$ +|\mathbf{P}_{\rho,\beta}^{x_1 y_1, \dots, x_t y_t, \emptyset, \dots, \emptyset} [E] - \mathbf{P}_{\rho,\beta}^{x'_1 y'_1, \dots, x'_t y'_t, \emptyset, \dots, \emptyset} [E]| \le s (\log \frac{N}{n})^{-c}, \quad (7.14) +$$ + +$$ +\left| |\mathbf{P}_{\rho,\beta}^{x_1 y_1, \dots, x_t y_t, \emptyset, \dots, \emptyset} [F] - \mathbf{P}_{\rho,\beta}^{x'_1 y'_1, \dots, x'_t y'_t, \emptyset, \dots, \emptyset} [F]| \le s (\log \frac{N}{n})^{-c}. \quad (7.15) +$$ + +**Proof** The beginning is the same as for the Ising model, until the definition of the variable $\mathbf{N}_i$ that now becomes + +$$ +\mathbf{N}_i := \frac{1}{|\mathcal{H}|} \sum_{k \in \mathcal{K}} \frac{1}{A_{x_i, y_i}(k)} \sum_{u \in A_k(y_i)} Q_j^2 [\mathbb{I}[(u,j) \xleftarrow{\mathbf{n}_i + \mathbf{n}'_i} \partial\mathbf{n}_i]], \quad (7.16) +$$ + +where $a_{x,y}(u) := (\tau_x\tau_u)(\tau_u\tau_y)/(\tau_x\tau_y)$ and $A_{x,y}(k) := \sum_{u\in A_k(y_i)} a_{x,y}(u)$. The proof of the concentration inequality follows the same lines as in the Ising case. Indeed, the choice of the weight $Q_j^2$ enables to rewrite the moments of the random variables $\mathbf{N}_i$ in terms of the correlations of the random variables $(\tau_z : z \in \mathbb{Z}^d)$. The rest of the proof is exactly the same, with trivial changes. For instance, in the proof of Lemma 6.7, one must be careful to derive bounds on probabilities involving $\beta|J|$. This is easily doable using Proposition A.8 exactly like in the previous proof. $\square$ + +# A Appendix + +## A.1 Random currents's partial monotonicity statements + +An inconvenient feature of the random current representation is the lack of an FKG-type monotonicity, as the one valid for the Fortuin-Kasteleyn random cluster models (cf. [25]). The addition of a pair of sources may enhance the configuration, e.g. forcing a long line +---PAGE_BREAK--- + +where such were rare, but in some situations it may facilitate a split in a connecting line, +thereby reducing the current's connectivity properties. Nevertheless, some monotonicity +properties can still be found, and are used in our analysis. + +In this section, we set $\sigma_A$ for the product of the spins in $A$ and write $\mathbf{C}_n(S) = \cup_{x \in S} \mathbf{C}_n(x)$. + +**Lemma A.1** Let $A, B, S$ be subsets of $\Lambda$ and $F$ a non-negative function defined over pairs of currents, which is determined by just the values of $(\mathbf{n}_1, \mathbf{n}_2)$ along the edges touching the connected cluster $\mathbf{C}_{\mathbf{n}_1+\mathbf{n}_2}(S)$ and such that $F(\mathbf{n}_1, \mathbf{n}_2) = 0$ whenever that cluster intersects $B$ and $(\partial\mathbf{n}_1, \partial\mathbf{n}_2) = (A, B)$. Then + +$$ +\mathbf{E}_{\Lambda,\beta}^{A,B}[F(\mathbf{n}_1, \mathbf{n}_2)] = \mathbf{E}_{\Lambda,\beta}^{A,\emptyset}[F(\mathbf{n}_1, \mathbf{n}_2) \frac{\langle \sigma_B \rangle_{\Lambda \setminus C_{\mathbf{n}_1+\mathbf{n}_2}(S),\beta}}{\langle \sigma_B \rangle_{\Lambda,\beta}}] \leq \mathbf{E}_{\Lambda,\beta}^{A,\emptyset}[F(\mathbf{n}_1, \mathbf{n}_2)]. \quad (\text{A.1}) +$$ + +**Proof** The second inequality is a trivial application of Griffiths’ inequality [22]. The first one is proven by a fairly straightforward manipulation involving currents that we now present. We drop $\beta$ from the notation. Fix $T \subset \Lambda$ not intersection $B$ and choose $F$ given by + +$$ +F(\mathbf{n}_1, \mathbf{n}_2) := \mathbb{I}[\mathbf{C}_{\mathbf{n}_1+\mathbf{n}_2}(S) = T] \mathbb{I}[\mathbf{n}_1 = \mathbf{n}] \mathbb{I}[\mathbf{n}_2 = \mathbf{m} \text{ on } T] \quad (\text{A.2}) +$$ + +for **n** and **m** currents on Λ and T respectively. For such a choice of function, we find that + +$$ +\begin{align*} +\langle \sigma_A \rangle_\Lambda \langle \sigma_B \rangle_\Lambda \mathbf{E}_\Lambda^{A,B}[F(\mathbf{n}_1, \mathbf{n}_2)] &= \frac{4^{|\Lambda|}}{Z(\Lambda, \beta)^2} \sum_{\mathbf{n}_1 : \partial\mathbf{n}_1 = A} \sum_{\mathbf{n}_2 : \partial\mathbf{n}_2 = B} F(\mathbf{n}_1, \mathbf{n}_2) w(\mathbf{n}_1) w(\mathbf{n}_2) \\ +&= \frac{4^{|\Lambda|} w(\mathbf{n}) w(\mathbf{m})}{Z(\Lambda, \beta)^2} \sum_{\mathbf{n}'_2 : \partial\mathbf{n}'_2 = B} w(\mathbf{n}_2) \\ +&= \frac{4^{|\Lambda|} w(\mathbf{n}) w(\mathbf{m})}{Z(\Lambda, \beta)^2} \langle \sigma_B \rangle_{\Lambda \setminus T} \sum_{\mathbf{n}'_2 : \partial\mathbf{n}'_2 = \emptyset} w(\mathbf{n}_2) \\ +&= \langle \sigma_A \rangle_\Lambda \langle \sigma_B \rangle_{\Lambda \setminus T} \mathbf{E}_\Lambda^{A,\emptyset}[F(\mathbf{n}_1, \mathbf{n}_2)], \tag{A.3} +\end{align*} +$$ + +where $\mathbf{n}'_2$ is referring to a current on $\Lambda \setminus T$. In the second line, we used that for $F(\mathbf{n}_1, \mathbf{n}_2)$ to be non-zero, $\mathbf{n}_1$ must be equal to $\mathbf{n}$ and $\mathbf{n}_2$ be decomposed into the current $\mathbf{m}$ on $T$ and a current $\mathbf{n}'_2$ outside $T$ (also, $\mathbf{n}_2(x,y)$ is equal to zero for every $x \in T$ and $y \notin T$). In the last line, we skipped the steps corresponding to going backward line to line to end up with $\mathbf{E}_{\Lambda}^{A,\emptyset}[F(\mathbf{n}_1, \mathbf{n}_2)]$. + +The proof follows readily for every function *F* satisfying the assumptions of the lemma. +Also, we obtain the result on Z*d* by letting Λ tend to Z*d*. + +An interesting application of the lemma is the following pair of disentangling bounds. +The first inequality appeared in [1, Proposition 5.2], the second is new. + +**Corollary A.2** For every $\beta > 0$, every four vertices $x, y, z, t \in \mathbb{Z}^d$ and every set $S \subset \mathbb{Z}^d$, + +$$ +\begin{align} +\mathbf{P}_{\beta}^{xy,zt}[\mathbf{C}_{n_1+n_2}(x) \cap \mathbf{C}_{n_1+n_2}(z) \neq \emptyset] &\leq \mathbf{P}_{\beta}^{xy,\emptyset,zt}[\mathbf{C}_{n_1+n_2}(x) \cap \mathbf{C}_{n_3}(z) \neq \emptyset], && (\text{A.4}) \\ +\mathbf{P}_{\beta}^{0x,0z,\emptyset,\emptyset}[\mathbf{C}_{n_1+n_3}(0) \cap \mathbf{C}_{n_2+n_4}(0) \cap S \neq \emptyset] &\leq \mathbf{P}_{\beta}^{0x,0z,0y,0t}[\mathbf{C}_{n_1+n_3}(0) \cap \mathbf{C}_{n_2+n_4}(0) \cap S \neq \emptyset]. && (\text{A.5}) +\end{align} +$$ +---PAGE_BREAK--- + +**Proof** Fix $\beta > 0$, $\Lambda$ finite (the claim will then follow by letting $\Lambda$ tend to $\mathbb{Z}^d$) and drop $\beta$ from the notation. For the first identity, introduce the random variable + +$$ \mathbf{C} = \mathbf{C}(\mathbf{n}_1, \mathbf{n}_2, \mathbf{n}_3) := \mathbf{C}_{\mathbf{n}_3}(\mathbf{C}_{\mathbf{n}_1+\mathbf{n}_2}(x)). \quad (\text{A.6}) $$ + +Lemma A.1 applied in the first and third lines, Griffiths’ inequality [22], and the trivial inclusion $\mathbf{C}_{\mathbf{n}_1+\mathbf{n}_2}(x) \subset \mathbf{C}$ in the second, give + +$$ +\begin{align*} +\mathbf{P}_{\Lambda}^{xy,zt}[\mathbf{C}_{\mathbf{n}_1+\mathbf{n}_2}(x) \cap \mathbf{C}_{\mathbf{n}_1+\mathbf{n}_2}(z) = \emptyset] &= \mathbf{E}_{\Lambda}^{xy,\emptyset}[\mathbb{I}[z, t \notin \mathbf{C}_{\mathbf{n}_1+\mathbf{n}_2}(x)] \frac{\langle \sigma_z \sigma_t \rangle_{\Lambda \setminus \mathbf{C}_{\mathbf{n}_1+\mathbf{n}_2}(x)}}{\langle \sigma_z \sigma_t \rangle_{\Lambda}}] \\ +&\geq \mathbf{E}_{\Lambda}^{xy,\emptyset}[\mathbb{I}[z, t \notin \mathbf{C}] \frac{\langle \sigma_z \sigma_t \rangle_{\Lambda \setminus \mathbf{C}}}{\langle \sigma_z \sigma_t \rangle_{\Lambda}}] \\ +&= \mathbf{P}_{\Lambda}^{xy,\emptyset,zt}[z, t \notin \mathbf{C}], \tag{A.7} +\end{align*} +$$ + +which gives the first inequality. + +The second identity requires two successive applications of Lemma A.1. First, conditioning on $\mathbf{n}_2 + \mathbf{n}_4$, the proposition applied to $S := \mathbf{C}_{\mathbf{n}_2+\mathbf{n}_4}(0) \cap S$ gives + +$$ \mathbf{P}_{\Lambda}^{0x,0z,\emptyset,0t}[\mathbf{C}_{n_1+n_3}(0) \cap \mathbf{C}_{n_2+n_4}(0) \cap S \neq \emptyset] \leq \mathbf{P}_{\Lambda}^{0x,0z,0y,0t}[\mathbf{C}_{n_1+n_3}(0) \cap \mathbf{C}_{n_2+n_4}(0) \cap S \neq \emptyset]. \quad (\text{A.8}) $$ + +Similarly, conditioning on $\mathbf{n}_1 + \mathbf{n}_3$, the proposition applied to $S' := \mathbf{C}_{\mathbf{n}_1+\mathbf{n}_3}(0) \cap S$ gives + +$$ \mathbf{P}_{\Lambda}^{0x,0z,\emptyset,\emptyset}[\mathbf{C}_{n_1+n_3}(0) \cap \mathbf{C}_{n_2+n_4}(0) \cap S \neq \emptyset] \leq \mathbf{P}_{\Lambda}^{0x,0z,\emptyset,\emptyset}[|\mathbf{C}_{n_1+n_3}(0) \cap \mathbf{C}_{n_2+n_4}(0)| S \neq \emptyset], \quad (\text{A.9}) $$ + +thus concluding the proof. $\square$ + +## A.2 Multi-point connectivity probabilities + +The following two relations facilitate the derivation of estimates guided by the random walk analogy. + +**Proposition A.3** For every $x, u, v \in \mathbb{Z}^d$, we have that + +$$ +\begin{align} +\mathbf{P}_{\beta}^{0x,\emptyset}[u \stackrel{\mathbf{n}_1+\mathbf{n}_2}{\leftrightarrow} 0] &= \frac{\langle \sigma_0 \sigma_u \rangle_{\beta} \langle \sigma_u \sigma_x \rangle_{\beta}}{\langle \sigma_0 \sigma_x \rangle_{\beta}}, && (\text{A.10}) \\ +\mathbf{P}_{\beta}^{0x,\emptyset}[u,v &\stackrel{\mathbf{n}_1+\mathbf{n}_2}{\leftrightarrow} 0] \le && \frac{\langle \sigma_0 \sigma_v \rangle_{\beta} \langle \sigma_v \sigma_u \rangle_{\beta} \langle \sigma_u \sigma_x \rangle_{\beta}}{\langle \sigma_0 \sigma_x \rangle_{\beta}} + && \frac{\langle \sigma_0 \sigma_u \rangle_{\beta} \langle \sigma_u \sigma_v \rangle_{\beta} \langle \sigma_v \sigma_x \rangle_{\beta}}{\langle \sigma_0 \sigma_x \rangle_{\beta}}. && (\text{A.11}) +\end{align} +$$ + +The equality (A.10) is a direct consequence of the switching lemma and has been used several times in the past. The inequality (A.11) is an important new addition, which is proven below. Its structure suggests a more general *k*-step random walk type bound, but the present proof does not extend to *k* > 2. In particular, if a *k*-step bound could be proven for every *k*, it would improve the concentration estimate for $\mathcal{N}$ in the proof of mixing from an inverse logarithmic bound to a small polynomial one, which would translate into a similar bound for the mixing property which may be very useful for the study of the critical regime. Note that this would not improve the log correction in our result since the intersection property also requires the $\ell_k$ to grow fast. +---PAGE_BREAK--- + +**Proof** Fix $\beta > 0$ and drop it from the notation. We work with finite $\Lambda$ and then take the limit as $\Lambda$ tends to $\mathbb{Z}^d$. In the whole proof, $\leftrightarrow$ denotes the connection in $\mathbf{n}_1 + \mathbf{n}_2$, and $\leftarrow\to$ denotes the absence of connection. As mentioned above, (A.10) follows readily from the switching lemma. To prove (A.11), use the switching lemma to find + +$$ +\mathbf{P}_{\Lambda}^{0x,\emptyset}[u,v \leftrightarrow 0] = \frac{\langle \sigma_0 \sigma_u \rangle_{\Lambda} \langle \sigma_u \sigma_x \rangle_{\Lambda}}{\langle \sigma_0 \sigma_x \rangle_{\Lambda}} \mathbf{P}_{\Lambda}^{0u,ux}[v \leftrightarrow u]. \quad (\text{A.12}) +$$ + +Then, our goal is to show that + +$$ +\mathbf{P}_{\Lambda}^{0u,ux}[v \leftrightarrow u] \leq \mathbf{P}_{\Lambda}^{0u,\emptyset}[v \leftrightarrow u] + \mathbf{P}_{\Lambda}^{\emptyset,ux}[v \leftrightarrow u] - \mathbf{P}_{\Lambda}^{\emptyset,\emptyset}[v \leftrightarrow u] \quad (\text{A.13}) +$$ + +which implies (A.11) readily using (A.10). In order to show (A.13), set $\mathbf{C} = \mathbf{C}_{n_1+n_2}(v)$ and apply Lemma A.1 to $F(\mathbf{n}_1, \mathbf{n}_2) := \mathbb{I}[u \leftrightarrow v]$ to obtain + +$$ +\mathbf{P}_{\Lambda}^{0u,ux}[u \leftrightarrow v] = \mathbf{E}_{\Lambda}^{0u,\emptyset}[\mathbb{I}[u \leftrightarrow v] \frac{\langle \sigma_0 \sigma_y \rangle_{\Lambda \setminus C}}{\langle \sigma_0 \sigma_y \rangle_{\Lambda}}]. \quad (\text{A.14}) +$$ + +Next, apply Lemma A.1 to + +$$ +F(\mathbf{n}_1, \mathbf{n}_2) := \mathbb{I}[u \leftrightarrow v] \left(1 - \frac{\langle \sigma_0 \sigma_x \rangle_{\Lambda \setminus C}}{\langle \sigma_0 \sigma_x \rangle_{\Lambda}}\right) \geq 0 \quad (\text{A.15}) +$$ + +(the inequality is due to Griffiths’ inequality [22]) to obtain (A.13) thanks to the following inequalities + +$$ +\begin{align} +& \mathbf{P}_{\Lambda}^{0x,\emptyset}[u \leftrightarrow v] - \mathbf{P}_{\Lambda}^{0x,0y}[u \leftrightarrow v] = \mathbf{E}_{\Lambda}^{0x,\emptyset}[F(\mathbf{n}_1, \mathbf{n}_2)] = \mathbf{E}_{\Lambda}^{\emptyset,\emptyset}\left[F(\mathbf{n}_1, \mathbf{n}_2) \frac{\langle\sigma_0\sigma_x\rangle_{\Lambda\setminus C}}{\langle\sigma_0\sigma_x\rangle_{\Lambda}}\right] \\ +& \leq \mathbf{E}_{\Lambda}^{\emptyset,\emptyset}[F(\mathbf{n}_1, \mathbf{n}_2)] = \mathbf{P}_{\Lambda}^{\emptyset,\emptyset}[u \leftrightarrow v] - \mathbf{P}_{\Lambda}^{\emptyset,0y}[u \leftrightarrow v]. \tag{A.16} +\end{align} +$$ + +□ + +**Remark A.4** Griffiths’ inequality [22] plugged in (A.14) gives + +$$ +\mathbf{P}^{0u,ux}[v \leftrightarrow u] \geq \mathbf{P}^{0u,\emptyset}[v \leftrightarrow u]. \quad (\text{A.17}) +$$ + +**Remark A.5** The inequalities (A.13) and (A.17) can be extended to every set $S \subset \mathbb{Z}^d$ and every two vertices $x, y \in \mathbb{Z}^d$: + +$$ +\mathbf{P}_{\beta}^{0x,\emptyset}[0 \underset{n_1+n_2}{\stackrel{n_1+n_2}{\rightleftharpoons}} S] \leq \mathbf{P}_{\beta}^{0x,0y}[0 \underset{n_1+n_2}{\stackrel{n_1+n_2}{\rightleftharpoons}} S] \leq \mathbf{P}_{\beta}^{0x,\emptyset}[0 \underset{n_1+n_2}{\stackrel{n_1+n_2}{\rightleftharpoons}} S] + \mathbf{P}_{\beta}^{\emptyset,0y}[0 \underset{n_1+n_2}{\stackrel{n_1+n_2}{\rightleftharpoons}} S] - \mathbf{P}_{\beta}^{\emptyset,\emptyset}[0 \underset{n_1+n_2}{\stackrel{n_1+n_2}{\rightleftharpoons}} S]. \quad (\text{A.18}) +$$ + +**A.3 The spectral representation** + +In Section 5.3 we make use of a spectral representation of the correlation function $S(x) := (\tau_0\tau_x)$. Though the statement is well known, cf. [21] and references therein, for completeness of the presentation following is its derivation. For the present purpose it is convenient to present the system’s Hamiltonian as the semi-definite function + +$$ +H = - \sum_{x,y} J_{x,y} (\tau_x - \tau_y)^2 . \tag{A.19} +$$ +---PAGE_BREAK--- + +The difference from the expressions used elsewhere in the paper are the diagonal quadratic terms $\tau_x^2$ whose effect on the Gibbs measure can be incorporated by an adjustment in the spins' a-priori distribution (which is doable as we assumed in the first place that the site distribution was satisfying (2.2)). + +To avoid burdensome notation, when the domain over which the spins are defined is clear from the context of the discussion we shall use the symbol $\tau = \{\tau_x\}_x$ to denote the entire collection of spins in that region, and by $\rho_0(d\tau)$ the corresponding product measure. + +**Proposition A.6 (Spectral Representation)** Let $\rho_0$ be a single variable distribution for which the Gibbs states on $\mathbb{Z}^d$ with the n.n.f. Hamiltonian satisfies + +$$ \langle |\tau_0|^2 \rangle_\beta < \infty, \quad \forall \beta \ge 0. \tag{A.20} $$ + +Then, for every $0 < \beta < \infty$ and every square-summable $v \in \ell^2(\mathbb{Z}^{d-1})$, there exists a positive measure $\mu_{v,\beta}$ with a total mass satisfying + +$$ \mu_{v,\beta}([0, \infty)) \le \|v\|_2^2 \langle |\tau_0|^2 \rangle_\beta \tag{A.21} $$ + +such that for every $n \in \mathbb{Z}$, + +$$ \sum_{x_\perp, y_\perp \in \mathbb{Z}^{d-1}} v_{x_\perp} \overline{y_{y_\perp}} S_\beta((n, x_\perp - y_\perp)) = \int_0^\infty e^{-a|n|} d\mu_{v,\beta}(a). \tag{A.22} $$ + +For $\beta < \beta_c$ the measure' support is limited to $a \ge 1/\xi(\beta)$ (here $\xi(\beta)$ is the correlation length of the system). + +In particular, with $v = \delta_\perp$ the Kronecker function (at the origin) on $\mathbb{Z}^{d-1}$, this yields +the following spectral representation for the correlation function along a principal axis + +$$ S_\beta((n, 0_\perp)) = \int_{1/\xi(\beta)}^\infty e^{-a|n|} d\mu_{\delta_\perp, \beta}(a), \tag{A.23} $$ + +with a measure whose total mass is $\mu_{\delta_\perp, \beta}([0, \infty)) = (\lvert\tau_0\rvert^2)_\beta$. + +**Proof** Throughout the proof $\beta$ is held constant, and to a large extent will be omitted from the notation. It is convenient to first derive the corresponding statements for finite volume versions of the model, in tubular domains with periodic boundary conditions $\mathbb{T}(m, \ell) := (\mathbb{Z}/m\mathbb{Z}) \times (\mathbb{Z}/\ell\mathbb{Z})^{d-1}$ (with the notational convention $\mathbb{Z}/\infty\mathbb{Z} = \mathbb{Z}$). The corresponding finite volume correlation function is naturally denoted $S_{m,\ell;\beta}(x) := \langle \tau_0\tau_x \rangle_{\mathbb{T}(m,\ell)}$. + +Let $\mathcal{V}_\ell$ be the $\mathbb{C}$-vector space of $L^2(\otimes_{x\in(\mathbb{Z}/\ell\mathbb{Z})^{d-1}} \rho(d\tau_x))$ of functions supported on the transversal hyperplane $(\mathbb{Z}/\ell\mathbb{Z})^{d-1}$, over the product measure $\otimes\rho(d\tau_x)$. On $\mathcal{V}_\ell$, let $T_\ell$ be the self adjoint operator whose kernel is given by + +$$ T_\ell(\tau, \tau') := \exp \left\{ -\frac{\beta J}{4} \sum_{\substack{x,y \subset (\mathbb{Z}/\ell\mathbb{Z})^{d-1} \\ \{x,y\} \text{ edge}}} [(τ_x - τ_y)^2 + (τ'_x - τ'_y)^2] - \frac{\beta J}{2} \sum_{x \in (\mathbb{Z}/\ell\mathbb{Z})^{d-1}} (τ_x - τ'_x)^2 \right\}. \tag{A.24} $$ + +This operator serves as the “transfer matrix” in terms of which the partition function can +be presented as a trace: + +$$ Z_{m,\ell} := \operatorname{Tr}(T_{\ell}^{m}). \tag{A.25} $$ + +To express the correlations functions, let us consider the multiplication operators + +$$ \tau[v] := \sum_{x \in (\mathbb{Z}/\ell\mathbb{Z})^{d-1}} v(x)\tau_x \tag{A.26} $$ +---PAGE_BREAK--- + +associated with square summable functions $v : (\mathbb{Z}/\ell\mathbb{Z})^{d-1} \to \mathbb{C}$. + +In this notation, the correlation function of spins at sites (which we write as $(n, x_\perp) \in T_{m,\ell}$) satisfy + +$$ +\sum_{x_1, y_1 \in (\mathbb{Z}/\ell\mathbb{Z})^{d-1}} \overline{v_{y_1}} v_{x_1} S_{m,\ell;\beta}((n, y_\perp - x_\perp)) = \frac{\operatorname{Tr}(T_\ell^{m-n} \bar{\tau}[v] T_\ell^n \tau[v])}{\operatorname{Tr}(T_\ell^m)} . \quad (\text{A.27}) +$$ + +We next claim that for any $\ell < \infty$ the operator $T_\ell$ is: + +(i) self adjoint and compact (and thus with spectrum which is discrete, except for possible accumulation at 0); + +(ii) positive definite; + +(iii) non-degenerate at the top of its spectrum, with a strictly positive eigenfunction. + +Item (i) is implied by the kernel’s symmetry and the finiteness of its Hilbert-Schmidt norm: + +$$ +\mathrm{Tr} T_{\ell}^{*} T_{\ell} = \iint \rho(d\tau) \rho(d\tau') |T_{\ell}(\tau, \tau')|^2 \le 1. \qquad (\text{A.28}) +$$ + +Positivity (ii) can be deduced by the criteria of [19] (see also [10]) applied to the reflection symmetry with respect to the hyperplanes passing through mid-edges. The last assertion (iii) is implied by (i) combined with the kernel’s pointwise positivity (cf. Krein-Rutman theorem [33]). + +Rewritten in terms of the spectral representation of $T_\ell$, (A.27) takes the form: + +$$ +\sum_{x_1, y_1 \in \mathbb{Z}^{d-1}} v_{x_1} \overline{v_{y_1}} S_{m,l;\beta}((n, x_{\perp} - y_{\perp})) = \frac{\sum_{\lambda_1, \lambda_2 \in \text{Spec}(T_l)} \lambda_1^{m-n} \lambda_2^n \langle e_{\lambda_1} | \bar{\tau}[v] | e_{\lambda_2} \rangle \langle e_{\lambda_2} | \tau[v] | e_{\lambda_1} \rangle}{\sum_{\lambda \in \text{Spec}(T_l)} \lambda^m}, \quad (A.29) +$$ + +where $\{|e_\lambda\rangle\}$ is an orthonormal basis of eigenvectors of $T_\ell$. By the structure of the spectrum described above, in the limit $m \to \infty$ only the terms with $\lambda_1 = \lambda_{max}$ and $\lambda = \lambda_{max}$ are of relevance, and one is left with the single sum: + +$$ +\begin{align} +\sum_{x_\perp, y_\perp \in \mathbb{Z}^{d-1}} v_{x_\perp} \overline{v_{y_\perp}} S_{\infty, \ell; \beta}((n, x_\perp - y_\perp)) &= \sum_{\lambda \in \text{Spec}(P)} \left(\frac{\lambda}{\lambda_{\text{max}}}^n\right)^n \langle e_{\lambda_{\text{max}}} | \overline{T} | e_\lambda \rangle \langle e_\lambda | T | e_{\lambda_{\text{max}}} \rangle \\ +&=: \int_0^\infty e^{-an} d\mu_{v,\beta,\ell}(a), \tag{A.30} +\end{align} +$$ + +with $e^{-a} = \lambda/\lambda_{\max}$ and $\mu_{v,\beta,\ell}$ the above discrete spectral measure (whose support starts at $\xi(\ell, \beta)$, the inverse rate of decay in $x$ of $S_{\infty,\ell}(x)$). + +Next we consider the limit $\ell \to \infty$ at fixed $x_\perp - y_\perp$. It is known, through the FKG inequality, that the correlation function converges pointwise, i.e. for any $(n, x_\perp - y_\perp)$ and $\beta$, + +$$ +S_{\beta}((n, x_{\perp} - y_{\perp})) = \lim_{\ell \to \infty} S_{\infty, \ell; \beta}((n, x_{\perp} - y_{\perp})) \quad (\text{A.31}) +$$ + +Through (A.30) this translates into convergence of the moments of $e^{-a}$ under the measures $\mu_{v,\beta,\ell}$. The moment criterion for the convergence of positive measures over bounded intervals (here $[0,1]$) allows to conclude existence of the (weak) limit $\lim_{\ell\to\infty} \mu_{v,\beta,\ell} = \mu_{v,\beta}$ (which need not be a point measure) with which the claimed relation (A.22) holds. $\square$ + +One may observe that the above result applies to all $\beta$. It may be added that for $\beta < \beta_c$ the spectral measure's support is bounded away from 0. In contrast, for $\beta > \beta_c$ the measures associated with $v$ of non-zero sum include a point mass there, i.e. $\mu_{v,\beta}(\{0\}) \neq 0$, which is the spectral representation of the long range order. +---PAGE_BREAK--- + +### A.4 Intersection properties for random current representation of models in the GS class + +We start with the version of the switching lemma that we will use. Below, $\delta_{uv}$ denotes the current equal to 1 on the edge $uv$ and 0 otherwise. + +**Lemma A.7 (Coarse switching)** Let $S,T$ be two disjoint sets of vertices. For every event $E$ depending on the sum of two currents and every $x \neq y$, + +$$ P_{\beta}^{xy,\emptyset}[x \stackrel{\mathbf{n}_1+\mathbf{n}_2}{\longleftrightarrow} S, \mathbf{n}_1+\mathbf{n}_2 \in E] \leq \beta \sum_{a \in S, b \notin S} J_{a,b} \frac{\langle \sigma_x \sigma_a \rangle \langle \sigma_b \sigma_y \rangle}{\langle \sigma_x \sigma_y \rangle} P_{\beta}^{xa,by}[\mathbf{n}_1+\mathbf{n}_2+\delta_{ab} \in E], \quad (A.32) $$ + +$$ P_{\beta}^{\emptyset,\emptyset}[S \stackrel{\mathbf{n}_1+\mathbf{n}_2}{\longleftrightarrow} T, \mathbf{n}_1+\mathbf{n}_2 \in E] \leq \beta^2 \sum_{\substack{a \in S, b \notin S \\ s \in T, t \notin T}} J_{a,b} J_{s,t} \langle \sigma_a \sigma_s \rangle \langle \sigma_t \sigma_b \rangle P_{\beta}^{as,tb}[\mathbf{n}_1+\mathbf{n}_2+\delta_{ab}+\delta_{st} \in E]. \quad (A.33) $$ + +**Proof** We start with the first inequality. Fix $\Lambda$ finite. By multiplying by the quantity $\langle \sigma_x \sigma_y \rangle_\beta 4^{-|\Lambda|} Z(\Lambda, J, \beta)^2$, and then making the change of variable $\mathbf{m} = \mathbf{n}_1 + \mathbf{n}_2$, $\mathbf{n}_2 = \mathbf{n}$, we find that + +$$ +\begin{align} +(1) := & \sum_{\substack{\partial \mathbf{n}_1 = \{x,y\} \\ \partial \mathbf{n}_2 = \emptyset}} w_\beta(\mathbf{n}_1) w_\beta(\mathbf{n}_2) \mathbb{I}[\mathbf{n}_1 + \mathbf{n}_2 \in E] \mathbb{I}[x \stackrel{\mathbf{n}_1+\mathbf{n}_2}{\longleftrightarrow} S] \\ += & \sum_{\substack{\partial \mathbf{m} = \{x,y\}}} w_\beta(\mathbf{m}) \mathbb{I}[\mathbf{m} \in E] \mathbb{I}[x \stackrel{\mathbf{m}}{\longleftrightarrow} S] \sum_{\substack{\mathbf{n} \leq \mathbf{m} \\ \partial \mathbf{n} = \emptyset}} \binom{\mathbf{m}}{\mathbf{n}} \\ += & 2^{-|\Lambda|} \sum_{\substack{\partial \mathbf{m} = \{x,y\}}} w_{2\beta}(\mathbf{m}) \mathbb{I}[\mathbf{m} \in E] \mathbb{I}[x \stackrel{\mathbf{m}}{\longleftrightarrow} S] 2^{k(\mathbf{m})}, +\end{align} +\quad (A.34) +$$ + +where in the last line we used that the number of even subgraphs of the multi-graph $\mathcal{M}$ (see for instance definition in [5]) associated with $\mathbf{m}$ is given by $2^{|\mathbf{m}|+k(\mathbf{m})-|\Lambda|}$, where $|\mathbf{m}|$ means the total sum of $\mathbf{m}$, and $k(\mathbf{m})$ is the number of connected components. Now, observe that + +$$ w_{2\beta}(\mathbf{m})\mathbb{I}[x \stackrel{\mathbf{m}}{\longleftrightarrow} S] 2^{k(\mathbf{m})} \leq \sum_{a \notin S, b \in S} \beta J_{a,b} w_{2\beta}(\mathbf{m}-\delta_{ab})\mathbb{I}[x \stackrel{\mathbf{m}-\delta_{ab}}{\longleftrightarrow} b]\mathbb{I}[\mathbf{m}_{ab} \geq 1] 2^{k(\mathbf{m}-\delta_{ab})}. +\quad (A.35) $$ + +Indeed, we are necessarily in one of the following cases: consider the edges $ab$ with $a \in S$, $b \notin S$, and $a$ connected to $y$ in $\mathbf{m}-\delta_{ab}$. Assume that + +* there is an edge $ab$ as above with $\mathbf{m}_{ab} \ge 2$, in such case $k(\mathbf{m}-\delta_{ab}) = k(\mathbf{m})$ and + $w_{2\beta}(\mathbf{m}) = \frac{2\beta J_{ab}}{\mathbf{m}_{ab}} w_{2\beta}(\mathbf{m}-\delta_{ab}) \le \beta J_{a,b} w_{2\beta}(\mathbf{m}-\delta_{ab});$ + +* there is a loop in the cluster of $x$ in $\mathbf{m}$ which is intersecting the edge-boundary of $S$, in such case there are two edges $ab$ satisfying the property above, with $k(\mathbf{m}-\delta_{ab}) = k(\mathbf{m})$ and $w_{2\beta}(\mathbf{m}) \le 2\beta J_{a,b} w_{2\beta}(\mathbf{m}-\delta_{ab});$ + +* otherwise, there is only one edge $ab$ with $\mathbf{m}_{ab}=1$, in such case $k(\mathbf{m}-\delta_{ab}) = k(\mathbf{m})+1$ and $w_{2\beta}(\mathbf{m}) \le 2\beta J_{a,b} w_{2\beta}(\mathbf{m}-\delta_{ab}).$ +---PAGE_BREAK--- + +Injecting the last displayed inequality in the first one, and then making the change of variable $\mathbf{m}' = \mathbf{m} - \delta_{uv}$, we find that + +$$ +\begin{align} +2^{|\Lambda|} \times (1) &\le \sum_{a \notin S, b \in S} \beta J_{a,b} \sum_{\partial \mathbf{m}' = \{x,y,a,b\}} w_{2\beta}(\mathbf{m}') 2^{k(\mathbf{m}')}\mathbb{I}[x \stackrel{\mathbf{m}'}{\longleftrightarrow} a] \mathbb{I}[\mathbf{m}' + \delta_{ab} \in E] \\ +&= \sum_{a \notin S, b \in S} \beta J_{a,b} \sum_{\substack{\mathbf{n}_1 = \{x,a,b,y\} \\ \partial \mathbf{n}_2 = \emptyset}} w_{\beta}(\mathbf{n}_1) w_{\beta}(\mathbf{n}_2) \mathbb{I}[x \stackrel{\mathbf{n}_1+\mathbf{n}_2}{\longleftrightarrow} a] \mathbb{I}[\mathbf{n}_1 + \mathbf{n}_2 + \delta_{ab} \in E] \\ +&= \sum_{a \notin S, b \in S} \beta J_{a,b} \sum_{\substack{\mathbf{n}_1 = \{b,y\} \\ \partial \mathbf{n}_2 = \{x,a\}}} w_{\beta}(\mathbf{n}_1) w_{\beta}(\mathbf{n}_2) \mathbb{I}[\mathbf{n}_1 + \mathbf{n}_2 + \delta_{ab} \in E], \tag{A.36} +\end{align} +$$ + +where in the last line we used the switching lemma. Dividing this relation by the factor $\langle\sigma_x\sigma_y\rangle_{\Lambda,\beta}4^{-|\Lambda|}Z(\Lambda, J, \beta)^2$ and letting $\Lambda$ tend to the full lattice implies the first claim. + +The second claim follows from the same reasoning using pairs of edges (ab,st) with $a \in S$, $b \notin S$, $s \in T$ and $t \notin T$ such that $a$ is connected to $s$ in $\mathbf{n}_1 + \mathbf{n}_2$. $\square$ + +We deduce the following pair of diagrammatic bounds on the connectivity probabilities. + +**Proposition A.8** For every distinct $x, y, u, v \in \mathbb{Z}^d$ + +$$ +\begin{align} +\mathbf{P}_{\rho,\beta}^{\emptyset,\emptyset} [\mathcal{B}_x \underset{x',y'\in\mathbb{Z}^d}{\stackrel{\mathbf{n}_1+\mathbf{n}_2}{\rightleftarrows}} \mathcal{B}_y] &\leq \sum_{x',y'\in\mathbb{Z}^d} \langle\tau_x\tau_y\rangle \beta J_{y,y'} \langle\tau_{y'}\tau_{x'}\rangle \beta J_{x',x}, \tag{A.37} \\ +\mathbf{P}_{\rho,\beta}^{\mathcal{X},\emptyset} [\partial\mathbf{n}_1 \underset{u'\in\mathbb{Z}^d}{\stackrel{\mathbf{n}_1+\mathbf{n}_2}{\rightleftarrows}} \mathcal{B}_u] &\leq \sum_{u'\in\mathbb{Z}^d} \frac{\langle\tau_x\tau_u\rangle \beta J_{u,u'} \langle\tau_{u'}\tau_x\rangle}{\langle\tau_x\tau_y\rangle}. \tag{A.38} +\end{align} +$$ + +*Proof* For the first one, sum (A.33) for *E* being the full event and vertices in $\mathcal{B}_x$ and $\mathcal{B}_y$, and use (A.10). For the second one, do the same with (A.32) instead. $\square$ + +**Acknowledgments** The work of M. Aizenman on this project was supported in part by the NSF grant DMS-1613296, and that of H. Duminil-Copin by the NCCR SwissMAP, the Swiss NSF and an IDEX Chair from Paris-Saclay. This project has received funding from the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation programme (grant agreement No. 757296). The joint work was advanced through mutual visits to Princeton and Geneva University, sponsored by a Princeton-Unige partnership grant. We thank S. Goswami, A. Raoufi, P.-F. Rodriguez, and F. Severo for stimulating discussions, and M. Oulamara, R. Panis, P. Wildemann, and an anonymous referee for careful reading of the paper. + +**References** + +[1] M. Aizenman, Geometric analysis of $\varphi^4$ fields and Ising models. I, II, Comm. Math. Phys., 86(1):1-48, 1982. + +[2] M. Aizenman, The Intersection of Brownian Paths as a Case Study of a Renormalization Group Method for Quantum Field Theory., Comm. Math. Phys., 97:91-110, 1985. + +[3] M. Aizenman, D.J. Barsky, and R. Fernández, The phase transition in a general class of Ising-type models is sharp, J. Stat. Phys., 47(3-4):343-374, 1987. +---PAGE_BREAK--- + +[4] M. Aizenman, H. Duminil-Copin, and V. Sidoravicius, *Random Currents and Continuity of Ising Model's Spontaneous Magnetization*, Comm. Math. Phys., 334:719-742, 2015. + +[5] M. Aizenman, H. Duminil-Copin, V. Tassion and S. Warzel, *Emergent Pla-narity in two-dimensional Ising Models with finite-range Interactions*, Inven-tiones Mathematicae, 216(3):661-743, 2019. + +[6] M. Aizenman and R. Fernández, *On the critical behaviour of the magnetization in high-dimensional Ising models*, J. Stat. Phys., 44(3-4):393-454, 1986. + +[7] M. Aizenman and R. Graham, *On the renormalized coupling constant and the susceptibility in φ⁴ field theory and the Ising model in four dimensions*, Nucl. Phys. B, 225:261-288 (1983). + +[8] C. Aragao de Carvalho, S. Caracciolo, J. Fröhlich, *Polymers and gφ⁴ theory in four dimensions*, Nucl. Phys. **B215** [FS7], 209-248 (1983). + +[9] R. Bauerschmidt, D.C. Brydges, and G. Slade, *Scaling limits and critical be-haviour of the 4-dimensional n-component |φ⁴| spin model*, J. Stat. Phys., 157:692-742, 2014. + +[10] M. Biskup, *Reflection positivity and phase transitions in lattice spin models*, Methods of contemporary mathematical statistical physics, Lecture Notes in Math., vol. 1970, Springer, Berlin, 2009, pp. 1-86. + +[11] D. Brydges, J. Fröhlich, and T. Spencer, *The random walk representation of clas-sical spin systems and correlation inequalities*, Comm. Math. Phys., 83(1):123-150, 1982. + +[12] H. Duminil-Copin, *Random currents expansion of the Ising model*, in European Congress of Mathematics, Eur. Math. Soc., Zürich, 2018, pp. 869-889. MR 3890455. Zbl 1403.82005. https://doi.org/10.4171/176-1/39. + +[13] H. Duminil-Copin, *Lectures on the Ising and Potts models on the hypercubic lattice*. In *Random Graphs, Phase Transitions, and the Gaussian Free Field*, Springer Proc. Math. Stat. 304, Springer, Cham, 2020, pp. 35-161. MR 4043224. Zbl 1447.82007. https://doi.org/10.1007/978-3-030-32011-9-2. + +[14] H. Duminil-Copin, S. Goswami, and A. Raoufi, *Exponential decay of truncated correlations for the Ising model in any dimension for all but the critical temper-ature*, Comm. Math. Phys., 374(2):891-921, 2020. + +[15] H. Duminil-Copin and V. Tassion, *A new proof of the sharpness of the phase transition for Bernoulli percolation and the Ising model*, Math. Phys., 343(2):725-745, 2016. + +[16] J. Feldman, J. Magnen, V. Rivasseau, and R. Sénéor, *Construction and Borel Summability of Infrared Φ⁴ by a Phase Space Expansion*, Comm. Math. Phys., 109:437-480, 1987. + +[17] J. Fröhlich, *On the triviality of λφ⁴ theories and the approach to the critical point in d > 4 dimensions*, Nuclear Physics B, 200(2):281-296, 1982. +(-) +---PAGE_BREAK--- + +[18] J. Fröhlich, R. Israel, E.H. Lieb, B. Simon, *Phase transitions and reflection positivity*. I. *General theory and long range lattice models*, Comm. math. Phys., 62:1–34, 1978. + +[19] J. Fröhlich, B. Simon, and T. Spencer, *Infrared bounds, phase transitions and continuous symmetry breaking*, Comm. Math. Phys., 50(1):79–95, 1976. + +[20] K. Gawedzki and A. Kupiainen, *Massless Lattice $\Phi_4^4$ Theory: Rigorous Control of a Renormalizable Asymptotically Free Model*, Comm. Math. Phys., 99:197–252, 1985. + +[21] J. Glimm and A. Jaffe, Positivity of the $\phi_3^4$ hamiltonian., Fortschritte der Physik, 21(7):327–376, 1973. + +[22] R. Griffiths, *Correlation in Ising ferromagnets I, II*, J. Math. Phys., 8:478–489, 1967. + +[23] R. Griffiths, *Rigorous Results for Ising Ferromagnets of Arbitrary Spin*, J. Math. Phys., 10:1559, 1969. + +[24] R.B. Griffiths, C.A. Hurst, and S. Sherman, Concavity of magnetization of an Ising ferromagnet in a positive external field, J. Math. Phys., 11:790–795, 1970. + +[25] G. Grimmett, The random-cluster model, Grundlehren der Mathematischen Wissenschaften [Fundamental Principles of Mathematical Sciences], vol. 333, Springer-Verlag, Berlin, 2006. + +[26] F. Guerra, L. Rosen and B. Simon, *The P($\phi$$^2$ Euclidean Quantum Field Theory as Classical Statistical Mechanics*, Annals of Math., 101:111–189, 1975. + +[27] M. Hairer, *A theory of regularity structures*, Inventiones Mathematicae, 198(2):269–504, 2014. + +[28] J.M. Hammersley. Percolation processes: Lower bounds for the critical probability. Ann. Math. Statist., 28:790–795, 1957. + +[29] T. Hara and H. Tasaki, *A Rigorous Control of Logarithmic Corrections in Four-Dimensional ($\phi_4$)$^4$ Spin Systems. II. Critical Behaviour of Susceptibility and Correlation Length*, J. Stat. Phys., 47(1/2):99-121, 1987. + +[30] G.C. Hegerfeldt, Correlation inequalities for Ising ferromagnets with symmetries, Comm. Math. Phys., 57(3):259–266, 1977. + +[31] A. Jaffe and E. Witten, *Quantum Yang-Mills Theory*, The millennium prize problems, (1):129, 2006, www.claymath.org/sites/default/files/yangmills.pdf. + +[32] H. Kesten, *The incipient infinite cluster in two-dimensional percolation*, Probab. Theory Related Fields, 73(3):369–394, 1986. + +[33] M.G. Krein, M.A. Rutman, Linear operators leaving invariant a cone in a Banach space, Transl. Amer. Math. Soc.26, 199 (1950) (cf. p.279). + +[34] G.F. Lawler, *Intersections of random walks*, Modern Birkhäuser Classics, Birkhäuser/Springer, New York, 2013, Reprint of the 1996 edition. +---PAGE_BREAK--- + +[35] J.L. Lebowitz, *GHS and other inequalities*, Comm. Math. Phys. 35:87-92, 1974. + +[36] E. H. Lieb, *A refinement of Simon's correlation inequality*, Comm. Math. Phys. 77(2):127-135, 1980. + +[37] S. Miracle-Solé A. Messager, *Correlation functions and boundary conditions in the Ising ferromagnet*, J. Stat. Phys. 17(4):245-262, 1977. + +[38] C.M. Newman, *Inequalities for Ising models and field theories which obey the Lee-Yang theorem*, Comm. Math. Phys., 41(1), 1975. + +[39] K. Osterwalder, R. Schrader, *Axioms for Euclidean Green's functions I.*, Comm. Math. Phys. 31:83-112, 1973. + +[40] K. Osterwalder, R. Schrader, *Axioms for Euclidean Green's functions II.*, Comm. Math. Phys. 42:281-305, 1975. + +[41] R. Schrader, *New correlation inequalities for the Ising model and $P(\phi)$ theories*, Phys. Rev., B15:2798, 1977. + +[42] T.D. Schultz, D.C. Mattis, E.H. Lieb, *Two-dimensional Ising model as a soluble problem of many fermions*, Reviews of Modern Physics 36(3), 856, 1964. + +[43] B. Simon, The $P(\Phi)_2$ Euclidean (Quantum) Field Theory, Princeton Univ. Press, 1974. + +[44] B. Simon, *Correlation inequalities and the decay of correlations in ferromagnets*, Comm. Math. Phys. **77**, 111-126 (1980). + +[45] B. Simon and R.B. Griffiths, *The $\phi_2^4$ field theory as a classical Ising model*, Comm. Math. Phys. 33(2):145-164, 1973. + +[46] A.D. Sokal, *Sokal, A.D.: A rigorous inequality for the specific heat of an Ising or $\phi_4^4$ ferromagnet*. Phys. Lett. bf 71 A, 451-453 (1979) + +[47] A.D. Sokal, *An alternate constructive approach to the $\phi_3^4$ quantum field theory, and a possible destructive approach to $\phi_4^4$,*, Ann. Inst. Henri Poincaré Phys. Théorique, 37:317-398, 1982. + +[48] K. Symanzik, *Euclidean quantum field theory*, Local quantum theory. Jost, R. (ed.). New York: Academic Press, 1969. + +[49] R. van der Hofstad and A. Járai, *The incipient infinite cluster for high-dimensional unoriented percolation*, J. Stat. Phys., 114(3-4):553-625, 2004. + +[50] A.S. Wightman, *Quantum Field Theory in Terms of Vacuum Expectation Values*, Phys. Rev. 101, 860, 1956. + +[51] K.G. Wilson, *Renormalization Group and Critical Phenomena. I. Renormalization Group and the Kadanoff Scaling Picture*, Phys. Rev. B **4**, 1971. \ No newline at end of file diff --git a/samples_new/texts_merged/450057.md b/samples_new/texts_merged/450057.md new file mode 100644 index 0000000000000000000000000000000000000000..60d63ba40a53058d83ae20468efb099927c3c5d9 --- /dev/null +++ b/samples_new/texts_merged/450057.md @@ -0,0 +1,1876 @@ + +---PAGE_BREAK--- + +Some results on the Weiss-Weinstein bound for +conditional and unconditional signal models in array +processing + +Dinh Thang Vu, Alexandre Renaux, Remy Boyer, Sylvie Marcos + +► To cite this version: + +Dinh Thang Vu, Alexandre Renaux, Remy Boyer, Sylvie Marcos. Some results on the Weiss-Weinstein bound for conditional and unconditional signal models in array processing. Signal Processing, Elsevier, 2014, 95 (2), pp.126-148. 10.1016/j.sigpro.2013.08.020. hal-00947784 + +HAL Id: hal-00947784 + +https://hal.inria.fr/hal-00947784 + +Submitted on 17 Feb 2014 + +**HAL** is a multi-disciplinary open access +archive for the deposit and dissemination of sci- +entific research documents, whether they are pub- +lished or not. The documents may come from +teaching and research institutions in France or +abroad, or from public or private research centers. + +L'archive ouverte pluridisciplinaire **HAL**, est +destinée au dépôt et à la diffusion de documents +scientifiques de niveau recherche, publiés ou non, +émanant des établissements d'enseignement et de +recherche français ou étrangers, des laboratoires +publics ou privés. +---PAGE_BREAK--- + +Some results on the Weiss-Weinstein bound for conditional and +unconditional signal models in array processing + +Dinh Thang VU, Alexandre RENAUX, Rémy BOYER, Sylvie MARCOS + +Université Paris-Sud 11, CNRS, Laboratoire des Signaux et Systèmes, Supelec, 3 rue Joliot Curie, 91192 Gif-sur-Yvette +Cedex, France (e-mail: {Vu,Renaux,Remy.Boyer,Marcos}@lss.supelec.fr) + +Abstract + +In this paper, the Weiss-Weinstein bound is analyzed in the context of sources localization with a planar +array of sensors. Both conditional and unconditional source signal models are studied. First, some results +are given in the multiple sources context without specifying the structure of the steering matrix and of the +noise covariance matrix. Moreover, the case of an uniform or Gaussian prior are analyzed. Second, these +results are applied to the particular case of a single source for two kinds of array geometries: a non-uniform +linear array (elevation only) and an arbitrary planar (azimuth and elevation) array. + +Keywords: Weiss-Weinstein bound, DOA estimation. + +# 1. Introduction + +Sources localization problem has been widely investigated in the literature with many applications such as radar, sonar, medical imaging, etc. One of the objective is to estimate the direction-of-arrival (DOA) of the sources using an array of sensors. + +In array processing, lower bounds on the mean square error are usually used as a benchmark to evaluate +the ultimate performance of an estimator. There exist several lower bounds in the literature. Depending +on the assumptions about the parameters of interest, there are three main kinds of lower bounds. When +the parameters are assumed to be deterministic (unknown), the main lower bounds on the (local) mean +square error used are the well known Cramér-Rao bound and the Barankin bound (more particularly their +approximations [1][2][3][4]). When the parameters are assumed to be random with a known prior distribution, +these lower bounds on the global mean square error are called Bayesian bounds [5]. Some typical families +of Bayesian bounds are the Ziv-Zakai family [6][7][8] and the Weiss-Weinstein family [9][10][11][12]. Finally, +when the parameter vector is made from both deterministic and random parameters, the so-called hybrid +bounds have been developed [13][14][15]. + +Since the DOA estimation is a non-linear problem, the outliers effect can appear and the estimators +mean square error exhibits three distinct behaviors depending on the number of snapshots and/or on +the signal to noise ratio(SNR) [16]. At high SNR and/or for a high number of snapshots, i.e., in the +---PAGE_BREAK--- + +asymptotic region, the outliers effect can be neglected and the ultimate performance are described by the (classical/Bayesian/hybrid) Cramér-Rao bound. However, when the SNR and/or the number of snapshots decrease, the outliers effect lead to a quick increase of the mean square error: this is the so-called threshold effect. In this region, the behavior of the lower bounds are not the same. Some bounds, generally called global bounds (Barankin, Ziv-Zakai, Weiss-Weinstein) can predict the threshold while the others, called local bounds, like the Cramér-Rao bound or the Bhattacharyya bound cannot. Finally, at low SNR and/or at low number of snapshots, i.e., in the no-information region, the deterministic bounds exceed the estimator mean square error due to the fact that they do not take into account the parameter support. On the contrary, the Bayesian bounds exploit the parameter prior information leading to a "real" lower bound on the global mean square error. + +In this paper¹, we are interested in the Weiss-Weinstein bounds which is known to be one of the tightest Bayesian bound with the bounds of the Ziv-Zakai family. We will study the two main source models used in the literature [17]: the unconditional (or stochastic) model where the source signals are assumed to be Gaussian and the conditional (or deterministic) model where the source signals are assumed to be deterministic. Surprisingly, in the context of array processing, while closed-form expressions of the Ziv-Zakai bound (more precisely its extension by Bell et. al. [18]) were proposed around 15 years ago for the unconditional model, the results concerning the Weiss-Weinstein bound are, most of the time, only conducted by way of computations. Concerning the unconditional model, in [19], the Weiss-Weinstein bound has been evaluated by way of computations and has been compared to the mean square error of the MUSIC algorithm and classical Beamforming using a particular 8 × 8 element array antenna. In [20], the authors have introduced a numerical comparison between the Bayesian Cramér-Rao bound, the Ziv-Zakai bound and the Weiss-Weinstein bound for DOA estimation. In [21], numerical computations of the Weiss-Weinstein bound to optimize sensor positions for non-uniform linear arrays have been presented. Again in the unconditional model context, in [22], by considering the matched-field estimation problem, the authors have derived a semi closed-form expression of a simplified version of the Weiss-Weinstein bound for the DOA estimation. Indeed, the integration over the prior probability density function was not performed. The conditional model (with known waveforms) is studied only in [23], where a closed-form expression of the WWB is given in the simple case of spectral analysis and in [24] which is a simplified version of the bound. + +While the primary goal of this paper is to give closed-form expressions of the Weiss-Weinstein bound for the DOA estimation of a single source with an arbitrary planar array of sensors, under both conditional and unconditional source signal models, we also provide partial closed-form expressions of the bound which could be useful for other problems. First, we study the general Gaussian observation model with parameterized + +¹Section 5.2.2 of this paper has been partially presented in [24] +---PAGE_BREAK--- + +mean or parameterized covariance matrix. Indeed, one of the success of the Cramér-Rao is that, for this +observation model, a closed-form expression of the Fisher information matrix is available: this is the so- +called Slepian-Bang formula [25]. Such kind of formulas have been less investigated in the context of bounds +tighter than the Cramér-Rao bound. Second, some results are given in the multiple sources context without +specifying the structure of the steering matrix and of the noise covariance matrix. Finally, these results +are applied to the particular case of a single source for two kinds of array geometries: the non-uniform +linear array (elevation only) and the planar (azimuth and elevation) array. Consequently, the aim of this +paper is also to provide a textbook of formulas which could be applied in other fields. The Weiss-Weinstein +bound is known to depend on parameters called test points and other parameters generally denoted $s_i$. One +particularity of this paper in comparison with the previous works on the Weiss-Weinstein bound is that we +do not use the assumption $s_i = 1/2, \forall i$. + +This paper is organized as follows. Section 2 is devoted to the array processing observation model which will be used in the paper. In Section 3, a short background on the Weiss-Weinstein bound is presented and two general closed-form expressions which will be the cornerstone for our array processing problems are derived. In Section 4 we apply these general results to the array processing problem without specifying the structure of the steering matrix. In Section 5, we study the particular case of the non-uniform linear array and of the planar array for which we provide both closed-form expressions of the bound in the context of a single stationary source in the far field area. Some simulation results are proposed in Section 6. Finally, Section 7 gives our conclusions. + +## 2. Problem setup + +In this section, the general observation model generally used in array signal processing is presented as +well as the first different assumptions used in the remain of the paper. Particularly, the so-called conditional +and unconditional source models are emphasized. + +**2.1. Observations model** + +We consider the classical scenario of an array with $M$ sensors which receives $N$ complex bandpass +signals $\mathbf{s}(t) = [s_1(t) \ s_2(t) \ \cdots \ s_N(t)]^T$. The output of the array is a $M \times 1$ complex vector $\mathbf{y}(t)$ which can +be modelled as follows (see, e.g., [26] or [17]) + +$$ \mathbf{y}(t) = \mathbf{A}(\theta)\mathbf{s}(t) + \mathbf{n}(t), \quad t = 1, \dots, T, \qquad (1) $$ + +where $T$ is the number of snapshots, where $\theta = [\theta_1 \ \theta_2 \ \cdots \ \theta_q]^T$ is an unknown parameter vector of interest², +where $\mathbf{A}(\theta)$ is the so-called $M \times N$ steering matrix of the array response to the sources, and where the +$M \times 1$ random vector $\mathbf{n}(t)$ is an additive noise. + +²Note that one source can be described by several parameters. Consequently, *q* > *N* in general. +---PAGE_BREAK--- + +## 2.2. Assumptions + +* The unknown parameters of interest are assumed to be random with an *a priori* probability density function $p(\theta_i)$, $i = 1, \dots, q$. These random parameters are assumed to be statistically independent such that the *a priori* joint probability density function is $p(\boldsymbol{\theta}) = \prod_{i=1}^q p(\theta_i)$. Note that this assumption will be only used in Subsections 4.2 and 4.3. We also assume that the parameter space, denoted $\Theta$, is a connected subset of $\mathbb{R}^q$ (see [27]). + +* The noise vector is assumed to be complex Gaussian, statistically independent of the parameters, i.i.d., circular, with zero mean and known covariance matrix $E[\mathbf{n}(t)\mathbf{n}^H(t)] = \mathbf{R}_n$. This assumption will be made more restrictive in Section 5 where it will be assumed that $\mathbf{R}_n = \sigma_n^2\mathbf{I}$. In any case, $\mathbf{R}_n$ is assumed to be a full rank matrix. + +* The steering matrix $\mathbf{A}(\boldsymbol{\theta})$ is assumed such that the observation model is identifiable. From Section 3 to Section 4, the structure of $\mathbf{A}(\boldsymbol{\theta})$ is not specified in order to obtain the more general results. + +* Concerning the source signals, two kinds of models have been investigated in the literature (see, e.g., [28] or [17]) and will be alternatively used in this paper. + +- $M_1$: *Unconditional or stochastic model:* $\mathbf{s}(t)$ is assumed to be a complex circular random vector, i.i.d., statistically independent of the noise, Gaussian with zero-mean and known covariance matrix $E[\mathbf{s}(t)\mathbf{s}^H(t)] = \mathbf{R}_s$. Note that concerning the previous results on the Cramér-Rao bound available in the literature [28], the covariance matrix $\mathbf{R}_s$ is assumed to be unknown. In this paper, we have made the simpler assumption that the covariance matrix $\mathbf{R}_s$ is known. These assumptions have already been used for the calculation of bounds more complex than the Cramér-Rao bound (see, e.g., [22], [29], [30]). + +- $M_2$: *Conditional or deterministic model:* $\forall t$, $\mathbf{s}(t)$ is assumed to be deterministic known. Note that, under the conditional model assumption, the signal waveforms can be assumed either unknown or known. While the conditional observation model with unknown waveforms seems more challenging, the conditional model with known waveforms signals which will be used in this paper can be found in several applications such as in mobile telecommunication and radar (see e.g. [31],[32], and [33]). + +## 2.3. Likelihood of the observations + +Let $\mathbf{R}_y = E[(\mathbf{y}(t) - E[\mathbf{y}(t)])(\mathbf{y}(t) - E[\mathbf{y}(t)])^H]$ be the covariance matrix of the observation vector $\mathbf{y}(t)$. According to the aforementioned assumptions, it is easy to see that under $M_1$, the observations $\mathbf{y}(t)$ are distributed as a complex circular Gaussian random vector with zero mean and covariance matrix +---PAGE_BREAK--- + +$\mathbf{R}_y(\theta) = \mathbf{A}(\theta)\mathbf{R}_s\mathbf{A}^H(\theta) + \mathbf{R}_n$ while under $\mathcal{M}_2$, the observations $\mathbf{y}(t)$ are distributed as a complex circular Gaussian random vector with mean $\mathbf{A}(\theta)\mathbf{s}(t)$ and covariance matrix $\mathbf{R}_y = \mathbf{R}_n$. Moreover, in both case the observations are i.i.d.. + +Therefore, the likelihood, $p(\mathbf{Y}; \boldsymbol{\theta})$, of the full observations matrix $\mathbf{Y} = [\mathbf{y}(1) \ \mathbf{y}(2) \ \dots \ \mathbf{y}(T)]$ under $\mathcal{M}_1$ +is given by + +$$ +p(\mathbf{Y}; \boldsymbol{\theta}) = \frac{1}{\pi^{MT} |\mathbf{R}_{\mathbf{Y}}(\boldsymbol{\theta})|^T} \exp \left( -\sum_{t=1}^{T} \mathbf{y}(t)^H \mathbf{R}_{\mathbf{Y}}^{-1}(\boldsymbol{\theta}) \mathbf{y}(t) \right), \quad (2) +$$ + +where $\mathbf{R}_y(\theta) = \mathbf{A}(\theta)\mathbf{R}_s\mathbf{A}^H(\theta) + \mathbf{R}_n$ and the likelihood under $\mathcal{M}_2$ is given by + +$$ +p(\mathbf{Y}; \boldsymbol{\theta}) = \frac{1}{\pi^{MT} |\mathbf{R}_{\mathrm{n}}|^T} \exp \left( -\sum_{t=1}^{T} (\mathbf{y}(t) - \mathbf{A}(\boldsymbol{\theta}) \mathbf{s}(t))^{H} \mathbf{R}_{\mathrm{n}}^{-1} (\mathbf{y}(t) - \mathbf{A}(\boldsymbol{\theta}) \mathbf{s}(t)) \right). \quad (3) +$$ + +**3. Weiss-Weinstein bound: Generalities** + +In this Section, we first remind to the reader the structure of the Weiss-Weinstein bound on the mean square error and the assumptions used to compute this bound. Second, a general result about the Gaussian observation model with parameterized mean or parameterized covariance matrix, which, to the best of our knowledge, does not appear in the literature is presented. This result will be useful to study both the unconditional model $\mathcal{M}_1$ and the conditional model $\mathcal{M}_2$ in the next Section. + +3.1. Background + +The Weiss-Weinstein bound for a $q \times 1$ real parameter vector $\boldsymbol{\theta}$ is a $q \times q$ matrix denoted **WWB** and is +given as follows [34] + +$$ +\text{WWB} = \text{HG}^{-1}\text{H}^T, \tag{4} +$$ + +where the $q \times q$ matrix $\mathbf{H} = [\mathbf{h}_1 \ \mathbf{h}_2 \dots \mathbf{h}_q]$ contains the so-called test-points $\mathbf{h}_i$, $i = 1, \dots, q$ such that +$\boldsymbol{\theta} + \mathbf{h}_i \in \Theta \ \forall \mathbf{h}_i$. The $k, l$-element of the $q \times q$ matrix $\mathbf{G}$ is given by + +$$ +\{\mathbf{G}\}_{k,l} = \frac{\mathbb{E}\left[(L^{s_k}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_k, \boldsymbol{\theta})) - L^{1-s_k}(\mathbf{Y}; \boldsymbol{\theta} - \mathbf{h}_k, \boldsymbol{\theta}))\right] (L^{s_l}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_l, \boldsymbol{\theta})) - L^{1-s_l}(\mathbf{Y}; \boldsymbol{\theta} - \mathbf{h}_l, \boldsymbol{\theta}))\right]}{\mathbb{E}[L^{s_k}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_k, \boldsymbol{\theta})] \mathbb{E}[L^{s_l}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_l, \boldsymbol{\theta})]}, \quad (5) +$$ + +where the expectations are taken over the joint probability density function $p(\mathbf{Y}, \boldsymbol{\theta})$ and where the function +$L(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_i, \boldsymbol{\theta})$ is defined by $L(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_i, \boldsymbol{\theta}) = \frac{p(\mathbf{Y}, \boldsymbol{\theta}+\mathbf{h}_i)}{p(\mathbf{Y}, \boldsymbol{\theta})}$. The notation $L^{s_k}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_k, \boldsymbol{\theta})$ means that $s_k$ +is the power of $L(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_i, \boldsymbol{\theta})$. The elements $s_i$ are such that $s_i \in [0, 1], i = 1, \dots, q$. + +Note that we have the following order relation [34] + +$$ +\operatorname{Cov}(\hat{\theta}) = E\left[(\hat{\theta} - \theta)(\hat{\theta} - \theta)^T\right] \geq \operatorname{WWB}, \quad (6) +$$ + +where $\mathbf{A} \succeq \mathbf{B}$ means that the matrix $\mathbf{A} - \mathbf{B}$ is a semi-positive definite matrix and where $\operatorname{Cov}(\hat{\theta})$ is the +global (the expectation is taken over the joint pdf $p(\mathbf{Y}, \boldsymbol{\theta})$) mean square error of any estimator $\hat{\theta}$ of the +---PAGE_BREAK--- + +parameter vector $\theta$. Finally, in order to obtain a tight bound, one has to maximize **WWB** over the test-points $\mathbf{h}_i$ and $s_i$ ($i=1, \dots, q$). Note that this maximization can be done by using the trace of $\mathbf{HG}^{-1}\mathbf{H}^T$ or with respect to the Loewner partial ordering [35]. In this paper we will use the trace of $\mathbf{HG}^{-1}\mathbf{H}^T$ which is enough to obtain tight results. + +## 3.2. A general result on the Weiss-Weinstein bound and its application to the Gaussian observation models + +An analytical result on the Weiss-Weinstein bound which will be useful in the following derivations and which could be useful for other problems is derived in this part. Note that this result is independent of the parameter vector size *q* and of the considered observation model. + +Let us denote $\Omega$ the observation space. By rewriting the elements of matrix $\mathbf{G}$ (see Eqn. (5)) involved in the Weiss-Weinstein bound, one obtains for the numerator denoted by $N_{\{\mathbf{G}\}_{k,l}}$, + +$$ +\begin{aligned} +N_{\{\mathbf{G}\}_{k,l}} &= \mathbb{E} \left[ (L^{s_k}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_k, \boldsymbol{\theta})) (L^{s_l}(\mathbf{Y}; \boldsymbol{\theta} - \mathbf{h}_k, \boldsymbol{\theta})) (L^{s_k+s_l}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_l, \boldsymbol{\theta})) (L^{s_k-s_l}(\mathbf{Y}; \boldsymbol{\theta} - \mathbf{h}_l, \boldsymbol{\theta})) \right] \\ +&= \int_{\Theta} \int_{\Omega} \frac{p^{s_k}(\mathbf{Y}, \boldsymbol{\theta} + \mathbf{h}_k) p^{s_l}(\mathbf{Y}, \boldsymbol{\theta} + \mathbf{h}_l)}{p^{s_k+s_l-1}(\mathbf{Y}, \boldsymbol{\theta})} d\mathbf{Y}d\boldsymbol{\theta} + \int_{\Theta} \int_{\Omega} \frac{p^{1-s_k}(\mathbf{Y}, \boldsymbol{\theta} - \mathbf{h}_k) p^{1-s_l}(\mathbf{Y}, \boldsymbol{\theta} - \mathbf{h}_l)}{p^{1-s_k-s_l}(\mathbf{Y}, \boldsymbol{\theta})} d\mathbf{Y}d\boldsymbol{\theta} \\ +&\quad - \int_{\Theta} \int_{\Omega} \frac{p^{s_k}(\mathbf{Y}, \boldsymbol{\theta} + \mathbf{h}_k) p^{1-s_l}(\mathbf{Y}, \boldsymbol{\theta} - \mathbf{h}_l)}{p^{s_k-s_l}(\mathbf{Y}, \boldsymbol{\theta})} d\mathbf{Y}d\boldsymbol{\theta} - \int_{\Theta} \int_{\Omega} \frac{p^{1-s_k}(\mathbf{Y}, \boldsymbol{\theta} - \mathbf{h}_k) p^{s_l}(\mathbf{Y}, \boldsymbol{\theta} + \mathbf{h}_l)}{p^{s_l-s_k}(\mathbf{Y}, \boldsymbol{\theta})} d\mathbf{Y}d\boldsymbol{\theta}, +\end{aligned} +\quad (7) $$ + +and for the denominator denoted by $D_{\{\mathbf{G}\}_{k,l}}$, + +$$ +\begin{aligned} +D_{\{\mathbf{G}\}_{k,l}} &= \mathbb{E}[L^{s_k}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_k, \boldsymbol{\theta})] \mathbb{E}[L^{s_l}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{h}_l, \boldsymbol{\theta})] \\ +&= \int_{\Theta} \int_{\Omega} \frac{p^{s_k}(\mathbf{Y}, \boldsymbol{\theta} + \mathbf{h}_k)}{p^{s_k-1}(\mathbf{Y}, \boldsymbol{\theta})} d\mathbf{Y} d\boldsymbol{\theta} \int_{\Theta} \int_{\Omega} \frac{p^{s_l}(\mathbf{Y}, \boldsymbol{\theta} + \mathbf{h}_l)}{p^{s_l-1}(\mathbf{Y}, \boldsymbol{\theta})} d\mathbf{Y} d\boldsymbol{\theta}. +\end{aligned} +\quad (8) $$ + +Let us now define a function $\eta(\alpha, \beta, \mathbf{u}, \mathbf{v})$ as + +$$ +\eta(\alpha, \beta, \mathbf{u}, \mathbf{v}) = \int_{\Theta} \int_{\Omega} \frac{p^{\alpha}(\mathbf{Y}, \boldsymbol{\theta} + \mathbf{u}) p^{\beta}(\mathbf{Y}, \boldsymbol{\theta} + \mathbf{v})}{p^{\alpha+\beta-1}(\mathbf{Y}, \boldsymbol{\theta})} d\mathbf{Y}d\boldsymbol{\theta}, +\quad (9) $$ + +where $(\alpha, \beta) \in [0, 1]^2$ and where $(\mathbf{u}, \mathbf{v})$ are two $q \times 1$ vectors such that $\boldsymbol{\theta} + \mathbf{u} \in \Theta$ and $\boldsymbol{\theta} + \mathbf{v} \in \Theta$. The notation $p^\alpha (\mathbf{Y}, \boldsymbol{\theta} + \mathbf{u})$ means that $\alpha$ is the power of $p(\mathbf{Y}, \boldsymbol{\theta} + \mathbf{u})$. By identification, it is easy to see that + +$$ +\begin{aligned} +\{\mathbf{G}\}_{k,l} = & \\ +& \frac{\eta(s_k, s_l, h_k, h_l) + \eta(1-s_k, 1-s_l, -h_k, -h_l) - \eta(s_k, 1-s_l, h_k, -h_l) - \eta(1-s_k, s_l, -h_k, h_l)}{\eta(s_k, 0, h_k, 0) \eta(0, s_l, 0, h_l)}. +\end{aligned} +\quad (10) $$ + +Note that we choose the arbitrary notation $D_{\{\mathbf{G}\}_{k,l}} = \eta(s_k, 0, h_k, 0) \eta(0, s_l, 0, h_l)$ for the denominator. The notation $D_{\{\mathbf{G}\}_{k,l}} = \eta(s_k, 1, h_k, 0) \eta(1, s_l, 0, h_l)$ or, even, $D_{\{\mathbf{G}\}_{k,l}} = \eta(s_k, 0, h_k, v) \eta(0, s_l, u, h_l)$ will lead to the same result. + +With Eqn. (10), it is clear that the knowledge of $\eta(\alpha, \beta, u, v)$ for a particular problem leads to the Weiss-Weinstein bound (without the maximization procedure over the test-points and over the parameters $s_i$). Surprisingly, this simple expression is given in [34] only for $s_i = 1/2$, $\forall i$ and not for the general case. +---PAGE_BREAK--- + +Let us now detail this function $\eta(\alpha, \beta, \mathbf{u}, \mathbf{v})$. The function $\eta(\alpha, \beta, \mathbf{u}, \mathbf{v})$ can be rewritten as + +$$ +\begin{align} +\eta(\alpha, \beta, \mathbf{u}, \mathbf{v}) &= \int_{\Theta} \frac{p^{\alpha}(\boldsymbol{\theta} + \mathbf{u}) p^{\beta}(\boldsymbol{\theta} + \mathbf{v})}{p^{\alpha+\beta-1}(\boldsymbol{\theta})} \int_{\Omega} \frac{p^{\alpha}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{u}) p^{\beta}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{v})}{p^{\alpha+\beta-1}(\mathbf{Y}; \boldsymbol{\theta})} d\mathbf{Y} d\boldsymbol{\theta} \\ +&= \int_{\Theta} \dot{\eta}_{\boldsymbol{\theta}}(\alpha, \beta, \mathbf{u}, \mathbf{v}) \frac{p^{\alpha}(\boldsymbol{\theta} + \mathbf{u}) p^{\beta}(\boldsymbol{\theta} + \mathbf{v})}{p^{\alpha+\beta-1}(\boldsymbol{\theta})} d\boldsymbol{\theta}, +\end{align} +\tag{11} +$$ + +where we define + +$$ \dot{\eta}_{\boldsymbol{\theta}}(\alpha, \beta, \mathbf{u}, \mathbf{v}, \boldsymbol{\theta}) = \int_{\Omega} \frac{p^{\alpha}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{u}) p^{\beta}(\mathbf{Y}; \boldsymbol{\theta} + \mathbf{v})}{p^{\alpha+\beta-1}(\mathbf{Y}; \boldsymbol{\theta})} d\mathbf{Y}. \quad (12) $$ + +Our aim is to give the most general result. Consequently, we will focus only on $\dot{\eta}_{\boldsymbol{\theta}}(\alpha, \beta, \mathbf{u}, \mathbf{v})$ since the *a priori* probability density function depends on the considered problem. + +An important remark pointed out in [27] is that the integration for the parameter space is with respect to the region $\{\boldsymbol{\theta}: p(\boldsymbol{\theta}) > 0\}$. However, since the functions being integrated are $p(\boldsymbol{\theta})$, $p(\boldsymbol{\theta} + \mathbf{u})$, and $p(\boldsymbol{\theta} + \mathbf{v})$, then the actual region of integration (where all the functions are positive) is the intersection of three regions, $\{\boldsymbol{\theta}: p(\boldsymbol{\theta}) > 0\} \cap \{\boldsymbol{\theta}: p(\boldsymbol{\theta} + \mathbf{u}) > 0\} \cap \{\boldsymbol{\theta}: p(\boldsymbol{\theta} + \mathbf{v}) > 0\}$. Note that, in order to simplify the notation we only use $\Theta$ throughout this paper but this remark will be useful and explicitly specified in Section 4.2. + +### 3.2.1. Gaussian observation model with parameterized covariance matrix + +One calls (circular, i.i.d.) Gaussian observation model with parameterized covariance matrix, a model such that the observations $\mathbf{y}(t) \sim CN(0, R_y(\boldsymbol{\theta}))$ where $\boldsymbol{\theta}$ are the parameters of interest. Note that $M_1$ is a special case of this model since the parameters of interest appear only in the covariance matrix of the observations which has the following particular structure $R_y(\boldsymbol{\theta}) = A(\boldsymbol{\theta})R_sA^H(\boldsymbol{\theta}) + R_n$. The closed-form expression of $\dot{\eta}_{\boldsymbol{\theta}}(\alpha, \beta, \mathbf{u}, \mathbf{v})$ is given by: + +$$ +\dot{\eta}_{\boldsymbol{\theta}}(\alpha, \beta, \mathbf{u}, \mathbf{v}) = \frac{|\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta})|^{T(\alpha+\beta-1)}}{|\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta}+\mathbf{u})|^{T\alpha} |\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta}+\mathbf{v})|^{T\beta} |\alpha\mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}+\mathbf{u}) + \beta\mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}+\mathbf{v}) - (\alpha+\beta-1)\mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta})|^T}. +\tag{13} +$$ + +The proof is given in Appendix .1. Note that, similar expressions are given in [18] (Eqn. (B.15)) and [36] (p. 67, Eqn. (52)) for the particular case where $\alpha = s$ and $\beta = 1-s$. + +### 3.2.2. Gaussian observation model with parameterized mean + +One calls (circular, i.i.d.) Gaussian observation model with parameterized mean, a model such that the observations $\mathbf{y}(t) \sim CN(\mathbf{f}(\boldsymbol{\theta}), R_y)$ where $\boldsymbol{\theta}$ are the parameters of interest. Note that $M_2$ is a special case of this model since the parameters of interest appear only in the mean of the observations which has the following particular structure $\mathbf{f}_t(\boldsymbol{\theta}) = A(\boldsymbol{\theta})s(t)$ (and $R_y = R_n$). The closed-form expression of $\dot{\eta}_{\boldsymbol{\theta}}(\alpha, \beta, \mathbf{u}, \mathbf{v})$ is given in this case by +---PAGE_BREAK--- + +$$ +\begin{equation} +\begin{split} +\ln \eta_{\theta} (\alpha, \beta, \mathbf{u}, \mathbf{v}) = & -\sum_{t=1}^{T} \alpha (1-\alpha) \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{u}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{u}) + \beta (1-\beta) \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{v}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{v}) \\ +& + (1-\alpha-\beta) (\alpha+\beta) \mathbf{f}_t^H (\boldsymbol{\theta}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta}) - 2 \operatorname{Re} \{\alpha\beta \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{u}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{v}) \\ +& + \alpha (1-\alpha-\beta) \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{u}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta}) + \beta (1-\alpha-\beta) \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{v}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta}) \}, +\end{split} +\tag{14} +\end{equation} +$$ + +or equivalently by + +$$ +\begin{equation} +\begin{split} +\ln \eta_{\theta} (\alpha, \beta, \mathbf{u}, \mathbf{v}) = & -\sum_{t=1}^{T} \alpha (1-\alpha-\beta) \| \mathbf{R}_{\mathbf{y}}^{-1/2} (\mathbf{f}_t(\boldsymbol{\theta}+\mathbf{u}) - \mathbf{f}_t(\boldsymbol{\theta})) \|^{2} + \alpha\beta \| \mathbf{R}_{\mathbf{y}}^{-1/2} (\mathbf{f}_t(\boldsymbol{\theta}+\mathbf{u}) - \mathbf{f}_t(\boldsymbol{\theta}+\mathbf{v})) \|^{2} \\ +& +\beta (1-\alpha-\beta) \| \mathbf{R}_{\mathbf{y}}^{-1/2} (\mathbf{f}_t(\boldsymbol{\theta}+\mathbf{v}) - \mathbf{f}_t(\boldsymbol{\theta})) \|^{2}. +\end{split} +\tag{15} +\end{equation} +$$ + +The details are given in Appendix .2. + +**4. General application to array processing** + +In the previous Section, it has been shown that the Weiss-Weinstein bound computation (or, at least, +the matrix **G** computation) is reduced to the knowledge of the function η (α, β, **u**, **v**) given by Eqn. (9). As +one can see in Eqn. (10), the elements of the matrix **G** depend on η (α, β, **u**, **v**) for particular values of α, β, +**u**, and **v**. Consequently, the goal of this Section is to detail these particular functions for our model given +by Eqn. (1). Since Eqn. (9) can be decomposed into a *deterministic part* (in the sense where ηθ (α, β, **u**, **v**) +(see Eqn. (12)) only depends on the likelihood function) and a *Bayesian part* (when we have to integrate +ηθ (α, β, **u**, **v**) over the *a priori* probability density function of the parameters), we will first focus on the +particular functions ηθ (α, β, **u**, **v**) by using the results of the previous Section on the Gaussian observation +model with parameterized mean or covariance matrix. Second, we will detail the passage from ηθ (α, β, **u**, **v**) +to η (α, β, **u**, **v**) in the particular case where p(θi) is a uniform probability density function ∀i. Another +result will also be given in the case of a Gaussian prior. + +4.1. Analysis of $\hat{\eta}_{\theta}$ $(\alpha, \beta, u, v)$ + +We will now detail the particular functions $\hat{\eta}_{\theta}(\alpha, \beta, u, v)$ involved in the different elements of $\{\mathbf{G}\}_{k,l}$, +$k,l \in \{1,q\}^2$ for both models $M_1$ and $M_2$. + +4.1.1. Unconditional observation model $M_1$ + +Under the unconditional model $\mathcal{M}_1$, by using Eqn. (13), one obtains straightforwardly the functions +$\hat{\eta}_{\theta}(\alpha, \beta, u, v)$ involved in the elements $\{\mathbf{G}\}_{k,l} = \{\mathbf{G}\}_{l,k}$ +---PAGE_BREAK--- + +$$ +\left\{ +\begin{aligned} +\dot{\eta}_{\theta}(s_k, s_l, \mathbf{h}_k, \mathbf{h}_l) &= \frac{|\mathbf{R}_y(\theta)|^{T(s_k+s_l-1)}}{|\mathbf{R}_y(\theta+\mathbf{h}_k)|^{Ts_k} |\mathbf{R}_y(\theta+\mathbf{h}_l)|^{Ts_l} |s_k \mathbf{R}_y^{-1}(\theta+\mathbf{h}_k)+s_l \mathbf{R}_y^{-1}(\theta+\mathbf{h}_l)-(s_k+s_l-1) \mathbf{R}_y^{-1}(\theta)|^T}, \\ +\dot{\eta}_{\theta}(1-s_k, 1-s_l, -\mathbf{h}_k, -\mathbf{h}_l) &= \frac{|\mathbf{R}_y(\theta)|^{T(1-s_k-s_l)} |\mathbf{R}_y(\theta-\mathbf{h}_k)|^{Ts_k-1} |\mathbf{R}_y(\theta-\mathbf{h}_l)|^{Ts_l-1}}{|(1-s_k)\mathbf{R}_y^{-1}(\theta-\mathbf{h}_k)+(1-s_l)\mathbf{R}_y^{-1}(\theta-\mathbf{h}_l)-(1-s_k-s_l)\mathbf{R}_y^{-1}(\theta)|^T}, \\ +\dot{\eta}_{\theta}(s_k, 1-s_l, \mathbf{h}_k, -\mathbf{h}_l) &= \frac{|\mathbf{R}_y(\theta)|^{Ts_k} |s_k \mathbf{R}_y^{-1}(\theta+\mathbf{h}_k)+(1-s_l)\mathbf{R}_y^{-1}(\theta-\mathbf{h}_l)-(s_k-s_l)\mathbf{R}_y^{-1}(\theta)|^T}{|\mathbf{R}_y(\theta+\mathbf{h}_k)|^{Ts_k} |s_k \mathbf{R}_y^{-1}(\theta+\mathbf{h}_k)-(s_k-1)\mathbf{R}_y^{-1}(\theta)|^T}, \\ +\dot{\eta}_{\theta}(1-s_k, s_l, -\mathbf{h}_k, \mathbf{h}_l) &= \frac{|\mathbf{R}_y(\theta)|^{Ts_l} |(1-s_k)\mathbf{R}_y^{-1}(\theta-\mathbf{h}_k)+s_l\mathbf{R}_y^{-1}(\theta+\mathbf{h}_l)-(s_l-s_k)\mathbf{R}_y^{-1}(\theta)|^T}{|\mathbf{R}_y(\theta-\mathbf{h}_k)|^{Ts_k-1} |\mathbf{R}_y(\theta-\mathbf{h}_k)|^{Ts_k-1}}, \\ +\dot{\eta}_{\theta}(s_k, 0, \mathbf{h}_k, \mathbf{0}) &= \frac{|\mathbf{R}_y(\theta)|^{Ts_k} |s_k \mathbf{R}_y^{-1}(\theta+\mathbf{h}_k)-(s_k-1)\mathbf{R}_y^{-1}(\theta)|^T}{|\mathbf{R}_y(\theta+\mathbf{h}_k)|^{Ts_k} |s_l \mathbf{R}_y^{-1}(\theta+\mathbf{h}_l)-(s_l-1)\mathbf{R}_y^{-1}(\theta)|^T}, \\ +\dot{\eta}_{\theta}(0, s_l, \mathbf{0}, \mathbf{h}_l) &= \frac{|\mathbf{R}_y(\theta)|^{Ts_l} |\mathbf{R}_y(\theta)|^{Ts_l-1}}{|s_l \mathbf{R}_y^{-1}(\theta+\mathbf{h}_l)-(s_l-1)\mathbf{R}_y^{-1}(\theta)|^T}. +\end{aligned} +\right. +\quad (16) +$$ + +The diagonal elements of $\mathbf{G}$ are obtained by letting $k=l$ in the above equations. + +### 4.1.2. Conditional observation model $\mathcal{M}_2$ + +Under the conditional model $\mathcal{M}_2$, by using Eqn. (15) with $\mathbf{f}_t(\boldsymbol{\theta}) = \mathbf{A}(\boldsymbol{\theta})\mathbf{s}(t)$ and $\mathbf{R}_{\boldsymbol{y}} = \mathbf{R}_{\boldsymbol{n}}$ one obtains straightforwardly the functions $\dot{\eta}_{\boldsymbol{\theta}}(\alpha, \beta, \mathbf{u}, \mathbf{v})$ involved in the elements $\{\mathbf{G}\}_{k,l} = \{\mathbf{G}\}_{l,k}$ + +$$ +\left\{ +\begin{array}{l} +\ln \dot{\eta}_{\theta}(s_k, s_l, \mathbf{h}_k, \mathbf{h}_l) = s_k (s_k + s_l - 1) \zeta_{\theta}(\mathbf{h}_k, \mathbf{0}) + s_l (s_k + s_l - 1) \zeta_{\theta}(\mathbf{h}_l, \mathbf{0}) - s_k s_l \zeta_{\theta}(\mathbf{h}_k, \mathbf{h}_l), \\ +\\ +\ln \dot{\eta}_{\theta}(1 - s_k, 1 - s_l, -\mathbf{h}_k, -\mathbf{h}_l) = (s_k - 1)(s_k + s_l - 1) \zeta_{\theta}(-\mathbf{h}_k, \mathbf{0}) + (s_l - 1)(s_k + s_l - 1) \zeta_{\theta}(-\mathbf{h}_l, \mathbf{0}) \\ +\qquad - (1 - s_k)(1 - s_l) \zeta_{\theta}(-\mathbf{h}_k, -\mathbf{h}_l), \\ +\\ +\ln \dot{\eta}_{\theta}(s_k, 1 - s_l, \mathbf{h}_k, -\mathbf{h}_l) = s_k (s_k - s_l) \zeta_{\theta}(\mathbf{h}_k, \mathbf{0}) + (1 - s_l)(s_k - s_l) \zeta_{\theta}(-\mathbf{h}_l, \mathbf{0}) + s_k (s_l - 1) \zeta_{\theta}(\mathbf{h}_k, -\mathbf{h}_l), \\ +\\ +\ln \dot{\eta}_{\theta}(1 - s_k, s_l, -\mathbf{h}_k, \mathbf{h}_l) = (s_k - 1)(s_k - s_l) \zeta_{\theta}(-\mathbf{h}_k, \mathbf{0}) + s_l (s_l - s_k) \zeta_{\theta}(\mathbf{h}_l, \mathbf{0}) + (s_k - 1) s_l \zeta_{\theta}(-\mathbf{h}_k, \mathbf{h}_l), \\ +\\ +\ln \dot{\eta}_{\theta}(s_k, 0, \mathbf{h}_k, \mathbf{0}) = s_k (s_k - 1) \zeta_{\theta}(\mathbf{h}_k, \mathbf{0}), \\ +\\ +\ln \dot{\eta}_{\theta}(0, s_l, \mathbf{0}, \mathbf{h}_l) = s_l (s_l - 1) \zeta_{\theta}(\mathbf{h}_l, \mathbf{0}), +\end{array} +\right. +\tag{17} +$$ + +where we define + +$$ +\zeta_{\theta}(\mu, \rho) = \sum_{t=1}^{T} \| \mathbf{R}_{n}^{-1/2} (\mathbf{A}(\theta + \mu) - \mathbf{A}(\theta + \rho)) \mathbf{s}(t) \|^{2}. \quad (18) +$$ + +The diagonal elements of $\mathbf{G}$ are obtained by letting $k=l$ in the above equations. Note that, since we are working on matrix $\mathbf{G}$, all the previously proposed results are made whatever the number of test-points. + +## 4.2. Analysis of $\eta(\alpha, \beta, u, v)$ with a uniform prior + +Of course, the analysis of $\eta(\alpha, \beta, u, v)$ given by Eqn. (11) can only be conducted by specifying the a priori probability density functions of the parameters. Consequently, the results provided here are very specific. However, note that, in general, this aspect is less emphasized in the literature where most of the authors give results without specifying the prior probability density functions and compute the rest of the bound numerically (see e.g., [22][20][37]). + +We assume that all the parameters $\theta_i$ have a uniform prior distribution over the interval $[a_i, b_i]$ and are statistically independent. We will also assume one test-point per parameter, otherwise there is no possibility +---PAGE_BREAK--- + +to obtain (pseudo) closed-form expressions. Consequently, the matrix **H** is such that + +$$ +\mathbf{H} = \mathrm{Diag} ([h_1 h_2 \cdots h_q]), \tag{19} +$$ + +and the vector **h***i*, *i* = 1, ..., *q*, takes the value *h**i* at the *i*th row and zero elsewhere. So, in this analysis, +the vector **u** takes the value *u**i* at the *i*th row and zero elsewhere and the vector **v** takes the value *v**j* at the +*j*th row and zero elsewhere (of course, we can have *i* = *j*). Under these assumptions, η(α, β, **u**, **v**) can be +rewritten³ for *i* ≠ *j* + +$$ +\begin{align} +\eta(\alpha, \beta, \mathbf{u}, \mathbf{v}) &= \int_{\Theta} \dot{\eta}_{\theta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) \frac{p^{\alpha}(\theta_i + u_i) p^{\beta}(\theta_j + v_j) p^{\beta}(\theta_i) p^{\alpha}(\theta_j)}{p^{\alpha+\beta-1}(\theta_i) p^{\alpha+\beta-1}(\theta_j)} \prod_{\substack{k=1 \\ k \neq i, k \neq j}}^{q} p(\theta_k) d\theta \\ +&= \frac{1}{\prod_{k=1}^{q} (b_k - a_k)} \int_{\Theta^{q-2}} \int_{\Theta_j} \int_{\Theta_i} \dot{\eta}_{\theta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) d\theta_i d\theta_j d(\theta / \{\theta_i, \theta_j\}), \tag{20} +\end{align} +$$ + +where $\Theta_i = \begin{cases} [a_i, b_i - u_i] & \text{if } u_i > 0, \\ [a_i - u_i, b_i] & \text{if } u_i < 0, \end{cases}$ and $\Theta_j = \begin{cases} [a_j, b_j - v_j] & \text{if } v_j > 0, \\ [a_j - v_j, b_j] & \text{if } v_j < 0, \end{cases}$. For $i=j$, one can have $\mathbf{v} = \pm \mathbf{u}$, +then one obtains + +$$ +\begin{align} +\eta(\alpha, \beta, \mathbf{u}, \mathbf{v} = \pm \mathbf{u}) &= \int_{\Theta} \dot{\eta}_{\theta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) \frac{p^{\alpha}(\theta_i + u_i) p^{\beta}(\theta_i \pm u_i)}{p^{\alpha+\beta-1}(\theta_i)} \prod_{\substack{k=1 \\ k \neq i}}^{q} p(\theta_k) d\theta \\ +&= \frac{1}{\prod_{k=1}^{q} (b_k - a_k)} \int_{\Theta^{q-1}} \int_{\Theta_i} \dot{\eta}_{\theta}(\alpha, \beta, \mathbf{u}, \mathbf{v} = \pm \mathbf{u}) d\theta_i d(\theta / \{\theta_i\}). \tag{21} +\end{align} +$$ + +In the last equation, if $\mathbf{v} = -\mathbf{u}$, then $\Theta_i = \begin{cases} [a_i + u_i, b_i - u_i] & \text{if } u_i > 0, \\ [a_i - u_i, b_i + u_i] & \text{if } u_i < 0, \end{cases}$ , while, if $\mathbf{v} = \mathbf{u}$, then +$\Theta_i = \begin{cases} [a_i, b_i - u_i] & \text{if } u_i > 0, \\ [a_i - u_i, b_i] & \text{if } u_i < 0, \end{cases}$. + +Depending on the structure of $\eta_\theta (\alpha, \beta, \mathbf{u}, \mathbf{v})$, $\eta (\alpha, \beta, \mathbf{u}, \mathbf{v})$ has to be computed numerically or a closed- +form expression can be found. + +Another particular case which appears sometimes is when the function $\eta_\theta (\alpha, \beta, \mathbf{u}, \mathbf{v})$ does not depend +on $\theta$ (see, [23][5][8][18][20][21][27][29] and Section 5 of this paper). In this case, $\eta_\theta (\alpha, \beta, \mathbf{u}, \mathbf{v})$ is denoted + +³In this case, one has to have a particular attention to the integration domain as mentionned in Section 3.2. It will not be +the case for the Gaussian prior since the support is ℝ. +---PAGE_BREAK--- + +$\dot{\eta}(\alpha, \beta, \mathbf{u}, \mathbf{v})$ and one obtains from Eqn. (20) + +$$ +\begin{align} +\eta(\alpha, \beta, \mathbf{u}, \mathbf{v}) &= \frac{\dot{\eta}(\alpha, \beta, \mathbf{u}, \mathbf{v})}{\prod_{k=1}^{q} (b_k - a_k)} \left( \prod_{\substack{k=1 \\ k \neq i, k \neq j}}^{q} \int_{a_k}^{b_k} d\theta_k \right) \int_{\Theta_i} d\theta_i \int_{\Theta_j} d\theta_j \nonumber \\ +&= \frac{(b_i - a_i - |u_i|)(b_j - a_j - |v_j|)}{(b_i - a_i)(b_j - a_j)} \dot{\eta}(\alpha, \beta, \mathbf{u}, \mathbf{v}), \tag{22} +\end{align} +$$ + +and from Eqn. (21) + +$$ +\eta(\alpha, \beta, \mathbf{u}, \mathbf{v} = \mathbf{u}) = \frac{(b_i - a_i - |u_i|)}{(b_i - a_i)} \dot{\eta}(\alpha, \beta, \mathbf{u}, \mathbf{v}), \quad (23) +$$ + +and + +$$ +\eta(\alpha, \beta, \mathbf{u}, \mathbf{v} = -\mathbf{u}) = \frac{(b_i - a_i - 2|u_i|)}{(b_i - a_i)} \dot{\eta}(\alpha, \beta, \mathbf{u}, \mathbf{v}). \quad (24) +$$ + +### 4.3. Analysis of $\eta(\alpha, \beta, \mathbf{u}, \mathbf{v})$ with a Gaussian prior + +Finally, one can mention that if the prior is now assumed to be Gaussian, i.e., $\theta_i \sim N(\mu_i, \sigma_i^2) \forall i$ and $\dot{\eta}_{\theta}(\alpha, \beta, \mathbf{u}, \mathbf{v})$ does not depend on $\theta$ one obtains after a straightforward calculation + +$$ +\begin{align} +\eta(\alpha, \beta, \mathbf{u}, \mathbf{v}) &= \dot{\eta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) \int_{\mathbb{R}} \frac{p^{\alpha}(\theta_i + u_i)}{p^{\alpha-1}(\theta_i)} d\theta_i \int_{\mathbb{R}} \frac{p^{\beta}(\theta_j + v_j)}{p^{\beta-1}(\theta_j)} d\theta_j \\ +&= \dot{\eta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) \exp \left( -\frac{1}{2} \left( \frac{\alpha(1-\alpha)u_i^2}{\sigma_i^2} + \frac{\beta(1-\beta)v_j^2}{\sigma_j^2} \right) \right), \tag{25} +\end{align} +$$ + +$$ +\begin{align} +\eta(\alpha, \beta, \mathbf{u}, \mathbf{v} = \mathbf{u}) &= \dot{\eta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) \int_{\mathbb{R}} \frac{p^{\alpha+\beta}(\theta_i + u_i)}{p^{\alpha+\beta-1}(\theta_i)} d\theta_i \\ +&= \dot{\eta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) \exp\left(-\frac{(\alpha+\beta)(1-\alpha-\beta)u_i^2}{2\sigma_i^2}\right), \tag{26} +\end{align} +$$ + +and + +$$ +\begin{align} +\eta(\alpha, \beta, \mathbf{u}, \mathbf{v} = -\mathbf{u}) &= \dot{\eta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) \int_{\mathbb{R}} \frac{p^{\alpha}(\theta_i + u_i) p^{\beta}(\theta_i - u_i)}{p^{\alpha+\beta-1}(\theta_i)} d\theta_i \\ +&= \dot{\eta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) \exp\left(-\frac{(\alpha + \beta - \alpha^2 - \beta^2 + 2\alpha\beta) u_i^2}{2\sigma_i^2}\right). \tag{27} +\end{align} +$$ + +## 5. Specific applications to array processing: DOA estimation + +We now consider the application of the Weiss-Weinstein bound in the particular context of source localization. Indeed, until now, the structure of the steering matrix $A(\theta)$ for a particular problem has not been used in the proposed (semi) closed-form expressions. Consequently, these previous results can be applied to a large class of estimation problems such as far-field and near-field sources localization, passive localization with polarized array of sensors, or radar (known waveforms). +---PAGE_BREAK--- + +Here, we want to focus on the direction-of-arrival estimation of a single source in the far-field area with narrow-band signal. In this case, the steering matrix $\mathbf{A}(\boldsymbol{\theta})$ becomes a steering vector denoted as $\mathbf{a}(\boldsymbol{\theta})$ (except for one preliminary result concerning the conditional model which will be given whatever the number of sources in Section 5.1.2). The structure of this vector will be specified by the analysis of two kinds of array geometry: the non-uniform linear array from which only one angle-of-arrival can be estimated ($\boldsymbol{\theta}$ becomes a scalar) and the arbitrary planar array from which both azimuth and elevation can be estimated ($\boldsymbol{\theta}$ becomes a $2 \times 1$ vector). In any cases, the array always consists of $M$ identical, omnidirectional sensors. Both models $\mathcal{M}_1$ and $\mathcal{M}_2$ will be considered and the noise will be assumed spatially uncorrelated: $\mathbf{R}_n = \sigma_n^2 \mathbf{I}$. Since we focus on the single source scenario, the variance of the source signal $s(t)$ is denoted $\sigma_s^2$ for the model $\mathcal{M}_1$. + +The general structure of the $i^{th}$ element of the steering vector is as follows + +$$ \{\mathbf{a}(\boldsymbol{\theta})\}_i = \exp \left( j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\theta} \right), \quad i = 1, \dots, M \qquad (28) $$ + +where $\boldsymbol{\theta}$ represents the parameter vector, where $\lambda$ denotes the wavelength, and where $\mathbf{r}_i$ denotes the coordinate of the $i^{th}$ sensor position with respect to a given referential. In the following, $\mathbf{r}_i$ will be a scalar or a $2 \times 1$ vector depending on the context (linear array or planar array). + +## 5.1. Preliminary results + +Since our analysis is now reduced to the single source case, we give here some other closed-form expressions which will be useful when we will detail the specific linear and planar arrays. + +### 5.1.1. Unconditional observation model $\mathcal{M}_1$ + +In order to detail the set of functions $\eta_{\theta}$ given by Eqn. (16), one has to find closed-form expressions of the determinant $|\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta} + \mathbf{u})|$ and of determinants having the following structure: $|m_1\mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}_1) + m_2\mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}_2)|$ with $m_1 + m_2 = 1$ or $|m_1\mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}_1) + m_2\mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}_2) + m_3\mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}_3)|$ with $m_1 + m_2 + m_3 = 1$. Under $\mathcal{M}_1$, the observation covariance matrix is now given by + +$$ \mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta}) = \sigma_s^2 \mathbf{a}(\boldsymbol{\theta}) \mathbf{a}^H(\boldsymbol{\theta}) + \sigma_n^2 \mathbf{I}_M. \qquad (29) $$ + +Concerning the calculation of $|\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta} + \mathbf{u})|$, it is easy to find + +$$ |\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta} + \mathbf{u})| = \sigma_n^{2M} \left( 1 + \frac{\sigma_s^2}{\sigma_n^2} \|\mathbf{a}(\boldsymbol{\theta} + \mathbf{u})\|^2 \right). \qquad (30) $$ + +Moreover, after calculation detailed in Appendix B.3, one obtains for the other determinants + +$$ |m_1 \mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}_1) + m_2 \mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}_2)| = \frac{1}{(\sigma_n^2)^M} \left( \begin{aligned}[t] & 1 - \varphi_1 m_1 \| \mathbf{a}(\boldsymbol{\theta}_1) \| ^2 + m_2 \varphi_2 \| \mathbf{a}(\boldsymbol{\theta}_2) \| ^2 \\ & - \varphi_1 m_1 \varphi_2 m_2 (\| \mathbf{a}^H(\boldsymbol{\theta}_1) \mathbf{a}(\boldsymbol{\theta}_2) \| ^2 - \| \mathbf{a}(\boldsymbol{\theta}_1) \| ^2 \| \mathbf{a}(\boldsymbol{\theta}_2) \| ^2) \end{aligned} \right) \qquad (31) $$ +---PAGE_BREAK--- + +and + +$$ +\begin{align} +|m_1 \mathbf{R}_{\mathbf{y}}^{-1}(\theta_1) + m_2 \mathbf{R}_{\mathbf{y}}^{-1}(\theta_2) + m_3 \mathbf{R}_{\mathbf{y}}^{-1}(\theta_3)| = & \nonumber \\ +& \frac{1}{(\sigma_n^2)^M} \left( 1 - \sum_{k=1}^3 m_k \varphi_k \| \mathbf{a}(\theta_k) \|^2 - \frac{1}{2} \sum_{k=1}^3 \sum_{\substack{k'=1 \\ k' \neq k}}^3 m_k \varphi_k m_{k'} \varphi_{k'} \left( \| \mathbf{a}^H(\theta_k) \mathbf{a}(\theta_{k'}) \|^2 - \| \mathbf{a}(\theta_k) \|^2 \| \mathbf{a}(\theta_{k'}) \|^2 \right) \right. \nonumber \\ +& \left. - \left( \prod_{k=1}^3 m_k \varphi_k \right) \left( \prod_{k=1}^3 \| \mathbf{a}(\theta_k) \|^2 - \frac{1}{2} \sum_{k=1}^3 \sum_{\substack{k'=1 \\ k' \neq k}}^3 \sum_{\substack{k''=1 \\ k'' \neq k'}}^3 \| \mathbf{a}^H(\theta_k) \mathbf{a}(\theta_{k''}) \|^2 \| \mathbf{a}(\theta_{k''}) \|^2 \right) \right. \nonumber \\ +& \left. + \mathbf{a}^H(\theta_3) \mathbf{a}(\theta_2) \mathbf{a}^H(\theta_1) \mathbf{a}(\theta_3) \mathbf{a}^H(\theta_2) \mathbf{a}(\theta_1) + \mathbf{a}^H(\theta_3) \mathbf{a}(\theta_1) \mathbf{a}^H(\theta_1) \mathbf{a}(\theta_2) \mathbf{a}^H(\theta_2) \mathbf{a}(\theta_3) \right), \tag{32} +\end{align} +$$ + +where + +$$ +\varphi_k = \frac{\sigma_s^2}{\sigma_s^2 \|a(\theta_k)\|^2 + \sigma_n^2}, \quad k = 1, 2, 3. \tag{33} +$$ + +5.1.2. Conditional observation model $\mathcal{M}_2$ + +Note that the results proposed here are in the context of any number of sources. Under the conditional +model, the set of functions $\hat{\eta}_{\theta}$ given by Eqn. (17) is linked to the function $\zeta_{\theta}(\boldsymbol{\mu}, \boldsymbol{\rho})$ given by Eqn. (18). In +this analysis, the vector $\boldsymbol{\mu}$ takes the value $\mu_i$ at the $i^{th}$ row and zero elsewhere and the vector $\boldsymbol{\rho}$ takes the +value $\rho_j$ at the $j^{th}$ row and zero elsewhere (of course, one can has $i = j$). In Appendix .4, the calculation +of the following closed-form expressions for $\zeta_{\theta}(\boldsymbol{\mu}, \boldsymbol{\rho})$ are detailed. + +• If $(m-1)p+1 \le i,j \le mp$, where $p$ denotes the number of parameters per source, then, we have + +$$ +\begin{equation} +\begin{aligned} +\zeta_{\theta}(\boldsymbol{\mu}, \boldsymbol{\rho}) = {}& \sum_{t=1}^{T} \| \{\mathbf{s}(t)\}_m \|^{2} \sum_{i=1}^{M} \sum_{j=1}^{M} \{\mathbf{R}_{\boldsymbol{n}}^{-1}\}_{i,j} \\ +& \times \left( \exp\left(-j\frac{2\pi}{\lambda}\mathbf{r}_{i}^{T}\boldsymbol{\mu}_{m}\right) - \exp\left(-j\frac{2\pi}{\lambda}\mathbf{r}_{i}^{T}\boldsymbol{\rho}_{m}\right) \right) \\ +& \times \left( \exp\left(j\frac{2\pi}{\lambda}\mathbf{r}_{j}^{T}\boldsymbol{\mu}_{m}\right) - \exp\left(j\frac{2\pi}{\lambda}\mathbf{r}_{j}^{T}\boldsymbol{\rho}_{m}\right) \right) +\end{aligned} +\tag{34} +\end{equation} +$$ + +• Otherwise, if (m − 1) p + 1 ≤ i ≤ mp and ( n − 1) p + 1 ≤ j ≤ np , then we have + +$$ +\begin{align*} +\zeta_{\theta}(\boldsymbol{\mu}, \boldsymbol{\rho}) = & -2 \operatorname{Re} \left( \sum_{t=1}^{T} {\{\mathbf{s}(t)\}_m}^* {\{\mathbf{s}(t)\}_n} \right) \\ +& + \sum_{t=1}^{T} \|{\{\mathbf{s}(t)\}_n}\|^{2} \sum_{i=1}^{M} \sum_{j=1}^{M} {\{\mathbf{R}_{\boldsymbol{n}}^{-1}\}_{i,j}} \\ +& + \sum_{t=1}^{T} \|{\{\mathbf{s}(t)\}_n}\|^{2} \sum_{i=1}^{M} \sum_{j=1}^{M} {\{\mathbf{R}_{\boldsymbol{n}}^{-1}\}_{i,j}} \\ +& + 2 \operatorname{Re} \left( j \frac{2\pi}{\lambda} (\mathbf{r}_j^T \boldsymbol{\theta}_n - \mathbf{r}_i^T \boldsymbol{\theta}_m) \right) \\ +& + 2 \operatorname{Re} (\mathbf{r}_j^T (\boldsymbol{\mu}_m - \boldsymbol{\rho}_m)) \\ +& + 2 \operatorname{Re} (\mathbf{r}_i^T (\boldsymbol{\mu}_n - \boldsymbol{\rho}_n)) \\ +& + 2 \operatorname{Re} (\boldsymbol{\mu}_m - \boldsymbol{\rho}_m) \\ +& + 2 \operatorname{Re} (\boldsymbol{\mu}_n - \boldsymbol{\rho}_n) +\end{align*} +$$ + +$$ +\times +\sum_{i=1}^{M} +\sum_{j=1}^{M} +\{ +\mathbf{R}_{\mathrm{n}}^{-1} +\}_{i,j} +\times +\exp +\left( +j +\frac{2\pi}{\lambda} +( +\mathbf{r}_{j}^{T} +\boldsymbol{\theta}_{n} +- +\mathbf{r}_{i}^{T} +\boldsymbol{\theta}_{m} +) +\right) +\times +(-j +\frac{2\pi}{\lambda} +\mathbf{r}_{i}^{T} +\boldsymbol{\mu}_{m}) +\times +(j +\frac{2\pi}{\lambda} +\mathbf{r}_{j}^{T} +\boldsymbol{\rho}_{n}) +. +\quad (35) +$$ +---PAGE_BREAK--- + +In particular, if one assumes $\mathbf{R}_n = \sigma_n^2 \mathbf{I}$, then, several simplifications can be done: + +• If $(m-1)p+1 \le i,j \le mp$, then + +$$ +\zeta_{\theta}(\boldsymbol{\mu}, \boldsymbol{\rho}) = \frac{1}{\sigma_n^2} \sum_{i=1}^{M} \left\| \exp\left(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\mu}_m\right) - \exp\left(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\rho}_m\right) \right\|^2 \sum_{t=1}^{T} \| \{\mathbf{s}(t)\}_m \|^2, \quad (36) +$$ + +where we note that the function $\zeta_{\theta}(\boldsymbol{\mu}, \boldsymbol{\rho})$ does not depend on the parameter $\theta$. + +• Otherwise, if $(m-1)p+1 \le i \le mp$ and $(n-1)p+1 \le j \le np$, then + +$$ +\begin{equation} +\begin{split} +\zeta_{\theta}(\boldsymbol{\mu}, \boldsymbol{\rho}) &= \frac{1}{\sigma_n^2} \sum_{i=1}^{M} \left\| \exp\left(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\mu}_m\right) \right\|^2 \sum_{t=1}^{T} \| \{\mathbf{s}(t)\}_m \|^2 + \\ +&\qquad + \frac{1}{\sigma_n^2} \sum_{i=1}^{M} \left\| \exp\left(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\rho}_n\right) \right\|^2 \sum_{t=1}^{T} \| \{\mathbf{s}(t)\}_n \|^2 \\ +&\quad - 2 \operatorname{Re} \left( \frac{1}{\sigma_n^2} \sum_{i=1}^{M} \exp\left(j \frac{2\pi}{\lambda} \mathbf{r}_i^T (\boldsymbol{\theta}_n - \boldsymbol{\theta}_m)\right) \exp\left(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\mu}_m\right) \exp\left(j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\rho}_n\right) \sum_{t=1}^{T} \{\mathbf{s}(t)\}_m^* \{\mathbf{s}(t)\}_n \right) +\end{split} +\tag{37} +\end{equation} +$$ + +It is clear that the proposed above formulas for both the unconditional and the conditional models can be applied to any kind of array geometry and whatever the number of sources. However, they generally depend on the parameter vector $\theta$. This means that, in general, the calculation of the set of functions $\eta$ will have to be performed numerically (except if one is able to find a closed-form expression of Eqn. (11)). In the following we present a kind of array geometry where, fortunately, the set of functions $\eta_\theta$ will not depend on $\theta$ leading to a straightforward calculation of the bound. + +5.2. 3D Source localization with a planar array + +We first consider the problem of DOA estimation of a single narrow band source in the far field area by using an arbitrary planar array. In fact, we start by this general setting because the non-uniform linear array is clearly a particular case of this array. Without loss of generality, we assume that the sensors of this array lay on the $xOy$ plan with Cartesian coordinates (see Fig. .1). Therefore, the vector $\mathbf{r}_i$ contains the coordinate of the $i^{th}$ sensor position with respect to this referential, i.e., $\mathbf{r}_i = [d_{x_i} \ d_{y_i}]^T$, $i = 1, ..., M$. From (28), the steering vector is given by + +$$ +\mathbf{a}(\boldsymbol{\theta}) = \left[ \exp\left(j \frac{2\pi}{\lambda} (d_{x_1} u + d_{y_1} v)\right) \dots \exp\left(j \frac{2\pi}{\lambda} (d_{x_M} u + d_{y_M} v)\right) \right]^T, \quad (38) +$$ + +where, as in [18], the parameter vector of interest is $\boldsymbol{\theta} = [u \ v]^T$ where + +$$ +\begin{cases} +u = \sin \varphi \cos \phi, \\ +v = \sin \varphi \sin \phi, +\end{cases} +\tag{39} +$$ + +and where $\varphi$ and $\phi$ represent the elevation and azimuth angles of the source, respectively. The parameters space is such that $u \in [-1, 1]$ and $v \in [-1, 1]$. Therefore, we assume that they both follow a uniform distribution over $[-1, 1]$. Note that from a physical point of view, it should be more tempting to choose a uniform +---PAGE_BREAK--- + +prior for $\varphi$ and $\phi$. This will lead to a probability density functions for $u$ and $v$ not uniform. To the best of our knowledge, this assumption has only been used in the context of lower bounds in [20]. Unfortunately, such prior leads to an untractable expression of the bound (see Eqn. (21) of [20]). Consequently, other authors have generally not specified the prior leading to semi closed-form expressions of bounds (i.e. that it remains a numerical integration to perform over the parameters) [20][37][22]. On the other hand, in order to obtain a closed-form expression, authors have generally used a simplified assumption, i.e. a uniform prior directly on $u$ and $v$ (see, for example, [21][38]). In this paper, we have followed the same way by expecting a slight modification of performance with respect to a more physical model and in order to be able to get closed-form expressions of the bound. + +We choose the matrix of test points such that + +$$ \mathbf{H} = [\mathbf{h}_u \quad \mathbf{h}_v] = \begin{bmatrix} h_u & 0 \\ 0 & h_v \end{bmatrix}. \qquad (40) $$ + +Then, we have: $\theta + \mathbf{h}_u = [u + h_u \ v]^T$ and $\theta + \mathbf{h}_v = [u \ v + h_v]^T$. Moreover, we now have two elements $s_i \in [0, 1], i = 1, 2$ for which we will prefer the notation $s_u$ and $s_v$, respectively. + +### 5.2.1. Unconditional observation model $\mathcal{M}_1$ + +Under $\mathcal{M}_1$, let us set $U_{SNR} = \frac{\sigma_s^4}{\sigma_n^2(M\sigma_s^2+\sigma_n^2)}$. The closed-form expressions of the elements of matrix $\mathbf{G} = [\begin{matrix} \{\mathbf{G}\}_{uu} & \{\mathbf{G}\}_{uv} \\ \{\mathbf{G}\}_{vu} & \{\mathbf{G}\}_{vv} \end{matrix}]$ are given by (see Appendix B.5 for the proof): + +$$ \{\mathbf{G}\}_{uu} = \frac{\left( \left(1 - \frac{|h_u|}{2}\right) \left(1 + 2s_u(1 - 2s_u)U_{\text{SNR}} \left(M^2 - \left\|\sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}d_{x_k}h_u)\right\|^2\right)\right)^{-T} + \left(1 - \frac{|h_u|}{2}\right) \left(1 + 2(1-s_u)(2s_u-1)U_{\text{SNR}} \left(M^2 - \left\|\sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}d_{x_k}h_u)\right\|^2\right)\right)^{-T} \right)}{\left(1 - \frac{|h_u|}{2}\right)^2 \left(1 + s_u(1-s_u)U_{\text{SNR}} \left(M^2 - \left\|\sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}d_{x_k}h_u)\right\|^2\right)\right)^{-2T}}, \quad (41) $$ + +$$ \{\mathbf{G}\}_{vv} = \frac{\left( \left(1 - \frac{|h_v|}{2}\right) \left(1 + 2s_v(1 - 2s_v)U_{\text{SNR}} \left(M^2 - \left\|\sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}d_{y_k}h_v)\right\|^2\right)\right)^{-T} + \left(1 - \frac{|h_v|}{2}\right) \left(1 + 2(1-s_v)(2s_v-1)U_{\text{SNR}} \left(M^2 - \left\|\sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}d_{y_k}h_v)\right\|^2\right)\right)^{-T} \right)}{\left(1 - \frac{|h_v|}{2}\right)^2 \left(1 + s_v(1-s_v)U_{\text{SNR}} \left(M^2 - \left\|\sum_{k=1}^{M} \exp(-j\frac{4\pi}{\lambda}d_{y_k}h_v)\right\|^2\right)\right)^{-2T}}, \quad (42) $$ +---PAGE_BREAK--- + +$$ +\begin{equation} +\left\{ +\begin{aligned} +& \left( + \begin{pmatrix} + s_u s_v \left( \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} (d_{x_k} h_u - d_{y_k} h_v)) \right\|^2 - M^2 \right) \\ + +s_u(1-s_u-s_v) \left( \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{x_k} h_u) \right\|^2 - M^2 \right) \\ + +s_v(1-s_u-s_v) \left( \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{y_k} h_v) \right\|^2 - M^2 \right) + \end{pmatrix} + \right)^{-T} \\ +& \times \left( + \begin{pmatrix} + -s_u s_v (1-s_u-s_v) \frac{U_{SNR}^2 R^{\sigma_n^2}}{\sigma_n^2} \\ + \left( \sum_{k=1}^{M} \exp(j \frac{2\pi d_{y_k} h_v}{\lambda}) \sum_{k=1}^{M} \exp(-j \frac{2\pi d_{x_k} h_u}{\lambda}) \sum_{k=1}^{M} \exp(j \frac{2\pi (d_{x_k} h_u - d_{y_k} h_v)}{\lambda}) \right) \\ + +\sum_{k=1}^{M} \exp(-j \frac{2\pi d_{y_k} h_v}{\lambda}) \sum_{k=1}^{M} \exp(j \frac{2\pi d_{x_k} h_u}{\lambda}) \sum_{k=1}^{M} \exp(-j \frac{2\pi (d_{x_k} h_u - d_{y_k} h_v)}{\lambda}) \\ + -M \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{y_k} h_v) \right\|^2 - M \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{x_k} h_u) \right\|^2 \\ + -M \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} (d_{x_k} h_u - d_{y_k} h_v)) \right\|^2 + M^3 + \end{pmatrix} + \\ +& + \left( + \begin{pmatrix} + (1-s_u)(1-s_v) \left( \left\| \sum_{k=1}^{M} \exp(j \frac{2\pi}{\lambda} (d_{x_k} h_u - d_{y_k} h_v)) \right\|^2 - M^2 \right) \\ + +(1-s_u)(s_u+s_v-1) \left( \left\| \sum_{k=1}^{M} \exp(j \frac{2\pi}{\lambda} d_{x_k} h_u) \right\|^2 - M^2 \right) \\ + +(1-s_v)(s_u+s_v-1) \left( \left\| \sum_{k=1}^{M} \exp(j \frac{2\pi}{\lambda} d_{y_k} h_v) \right\|^2 - M^2 \right) + \end{pmatrix} + \right)^{-T} \\ +& + (1-s_u)(1-s_v)(s_u+s_v-1) \frac{U_{SNR}^2 R^{\sigma_n^2}}{\sigma_n^2} \\ +& + (-1-s_u)(1-s_v)(s_u+s_v-1) (1-U_{SNR}) \\ +& + (1-U_{SNR}) \left( + \begin{pmatrix} + s_u(1-s_v) \left( \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} (d_{x_k} h_u + d_{y_k} h_v)) \right\|^2 - M^2 \right) \\ + +s_u(s_v-s_u) \left( \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{x_k} h_u) \right\|^2 - M^2 \right) \\ + +(1-s_v)(s_u-s_v) \left( \left\| \sum_{k=1}^{M} \exp(j \frac{2\pi}{\lambda} d_{y_k} h_v) \right\|^2 - M^2 \right) + \end{pmatrix} + \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) + \end{pmatrix} + \\ +& - (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s_u) (1-U_{SNR}) \\ +& + (-s_u)(1-s_v)(s_v-s-u) + \frac{U_{SNR}^2 R^{\sigma_n^2}}{\sigma_n^2} + \\ +& - (-s-u(1-u_s)) U_{SNR} + (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_y_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j\frac{2\pi}{\lambda}\frac{d_x_h v}{h_c}))\\ +& - (\sum_{k=1}^{M}\exp(-j)\frac{\sigma_n^2 R^{\sigma_n^2}}{\sigma_n^4}\\ +& + \left( + 0 + \right)^{-T}, + \\[6ex] +{\mathbf{\Gamma}}uv = + & + \left( + 0 + \right)^{-T} + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + ) + ( + 0 + + + U_S U_N S R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_R U_T S_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R_S R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-R-S-RRSRSSSRRSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_r_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzppp_pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprprpr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr pr p pp_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_p_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_o_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_yy__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y__y___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o___o_____ +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$$ + +$${G}}uv = $$ +---PAGE_BREAK--- + +and, of course, ${\mathbf{G}}_{uv} = {\mathbf{G}}_{vu}$. Consequently, the unconditional Weiss-Weinstein bound is 2 × 2 matrix given by: + +$$ +\begin{align} +\mathbf{UWWB} &= \mathbf{HG}^{-1}\mathbf{H}^T \nonumber \\ +&= \frac{1}{\{\mathbf{G}\}_{uu}\{\mathbf{G}\}_{vv} - \{\mathbf{G}\}_{uv}^2} \begin{bmatrix} +h_u^2 \{\mathbf{G}\}_{vv} & -h_u h_v \{\mathbf{G}\}_{uv} \\ +-h_u h_v \{\mathbf{G}\}_{uv} & h_v^2 \{\mathbf{G}\}_{uu} +\end{bmatrix}, \tag{44} +\end{align} +$$ + +which has to be optimized over $s_u$, $s_v$, $h_u$, and $h_v$. Concerning the optimization over $s_u$ and $s_v$, several other works in the literature have suggested to simply use $s_u = s_v = 1/2$. Most of the time, numerical simulations of this simplified bound compared with the bound obtained after optimization over $s_u$ and $s_v$ leads to the same results while their is no formal proof of this fact (see [5] page 41 footnote 17). Note that, thanks to the expressions obtained in the next Section concerning the linear array, we will be able to prove that $s = 1/2$ is a (maybe not unique) correct choice for any linear array. In the case of the planar array treated in this Section, we will only check this property by simulation. + +In the particular case where $s_u = s_v = 1/2$ one obtains the following simplified expressions + +$$ +\begin{align} +\{\mathbf{G}\}_{uu} &= \frac{2\left(1-\frac{|h_u|}{2}\right) - 2(1-|h_u|)\left(1+\frac{U_{SNR}}{4}\left(M^2 - \left\|\sum_{k=1}^M \exp(-j\frac{4\pi}{\lambda}d_{x_k}h_u)\right\|^2\right)\right)^{-T}}{\left(1-\frac{|h_u|}{2}\right)^2 \left(1+\frac{U_{SNR}}{4}\left(M^2 - \left\|\sum_{k=1}^M \exp(-j\frac{2\pi}{\lambda}d_{x_k}h_u)\right\|^2\right)\right)^{-2T}}, \tag{45} \\ +\{\mathbf{G}\}_{vv} &= \frac{2\left(1-\frac{|h_v|}{2}\right) - 2(1-|h_v|)\left(1+\frac{U_{SNR}}{4}\left(M^2 - \left\|\sum_{k=1}^M \exp(-j\frac{4\pi}{\lambda}d_{y_k}h_v)\right\|^2\right)\right)^{-T}}{\left(1-\frac{|h_v|}{2}\right)^2 \left(1+\frac{U_{SNR}}{4}\left(M^2 - \left\|\sum_{k=1}^M \exp(-j\frac{2\pi}{\lambda}d_{y_k}h_v)\right\|^2\right)\right)^{-2T}}, \tag{46} +\end{align} +$$ + +and + +$$ +\begin{equation} +\begin{split} +\{\mathbf{G}\}_{uv} = {}& \frac{\left( 2 \left( 1 + \frac{U_{SNR}}{4} \left( M^2 - \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} (d_{x_k} h_u - d_{y_k} h_v)) \right\|^2 \right) \right)^{-T} \right.}{\left( 1 + \frac{U_{SNR}}{4} \left( M^2 - \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} (d_{x_k} h_u + d_{y_k} h_v)) \right\|^2 \right) \right)^{-T}} \\ +& \quad \left. - 2 \left( 1 + \frac{U_{SNR}}{4} \left( M^2 - \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} (d_{x_k} h_u + d_{y_k} h_v)) \right\|^2 \right) \right)^{-T} \right)} \\ +& \quad \cdot \frac{\left( 1 + \frac{U_{SNR}}{4} \left( M^2 - \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{x_k} h_u) \right\|^2 \right) \right)^{-T}}{\left( 1 + \frac{U_{SNR}}{4} \left( M^2 - \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{y_k} h_v) \right\|^2 \right) \right)^{-T}} +\end{split} +\tag{47} +\end{equation} +$$ + +Again, the Weiss-Weinstein bound is obtained by using the above expressions in Eqn. (44) and after an optimization over the test points. The optimization over the test points can be done over a search grid or by using the ambiguity diagram of the array in order to reduce significantly the computational cost (see [14],[22], [30],[39]). +---PAGE_BREAK--- + +5.2.2. Conditional observation model $M_2$ + +Under $\mathcal{M}_2$, let us set $C_{SNR} = \frac{1}{\sigma_n^2} \sum_{t=1}^{T} \|s(t)\|^2$. The closed-form expressions of the elements of matrix **G** are given by (see Appendix .6 for the proof): + +$$ +\begin{align} +\{\mathbf{G}\}_{uu} &= \frac{\left( \begin{aligned}[c] + &\left(1 - \frac{|h_u|}{2}\right) \exp\left(4s_u(2s_u - 1)C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right)\right) \\ + + &\left(1 - \frac{|h_u|}{2}\right) \exp\left(4(2s_u - 1)(s_u - 1)C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right)\right) \\ + - &2(1 - |h_u|) \exp\left(2s_u(s_u - 1)C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{4\pi}{\lambda}d_{x_k}h_u\right)\right)\right) +\end{aligned} \right)}{\left(1 - \frac{|h_u|}{2}\right)^2 \exp\left(4s_u(s_u - 1)C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right)\right)}, \tag{48} +\end{align} +$$ + +$$ +\begin{equation} +\begin{split} +\{\mathbf{G}\}_{vv} = {}& \frac{\left( \begin{aligned}[t] + &\left(1 - \frac{|h_v|}{2}\right) \exp\left(4s_v(2s_v - 1)C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right)\right) \\ + + &\left(1 - \frac{|h_v|}{2}\right) \exp\left(4(2s_v - 1)(s_v - 1)C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right)\right) \\ + - &2(1 - |h_v|) \exp\left(2s_v(s_v - 1)C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{4\pi}{\lambda}d_{y_k}h_v\right)\right)\right) +\end{aligned} \right)}{\left(1 - \frac{|h_v|}{2}\right)^2 \exp\left(4s_v(s_v - 1)C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right)\right)}, +\end{split} +\tag{49} +\end{equation} +$$ + +$$ +\begin{equation} +\begin{split} +\{\mathbf{G}\}_{uv} = {}& \frac{\left( + \begin{aligned}[t] + & 2s_u(s_u + s_v - 1)C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right) \\ + & + 2s_v(s_u + s_v - 1)C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right) \\ + & - 2s_u s_v C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}(d_{x_k}h_u - d_{y_k}h_v)\right)\right) + \end{aligned} + \right)} + {\exp\left(2s_u(s_u-1)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right)\right)} + \times + \left( + \begin{aligned}[t] + & 2(s_u-1)(s_u+s_v-1)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right) \\ + & + 2(s_v-1)(s_u+s_v-1)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right) \\ + & - 2(1-s_u)(1-s_v)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}(d_{x_k}h_u-d_{y_k}h_v)\right)\right) + \end{aligned} + \right)} + \\ +& + + \times + \left( + \begin{aligned}[t] + & 2s_u(s_u-s_v)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right) \\ + & + 2(1-s_v)(s_u-s_v)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right) \\ + & + 2s_u(s_v-1)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}(d_{x_k}h_u+d_{y_k}h_v)\right)\right) + \end{aligned} + \right) + \\ +& - + \times + \left( + \begin{aligned}[t] + & 2(s_u-1)(s_u-s_v)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right) \\ + & + 2s_v(s_v-s_u)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right) \\ + & + 2(s_u-1)s_v C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}(d_{x_k}h_u+d_{y_k}h_v)\right)\right) + \end{aligned} + \right) + \\ +& - + \times + \left( + \begin{aligned}[t] + & 2(s_u-1)(s_u-s_v)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right) \\ + & + 2s_v(s_v-s_u)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right) \\ + & + 2(s_u-1)s_v C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}(d_{x_k}h_u+d_{y_k}h_v)\right)\right) + \end{aligned} + \right) + \\ +& = + \frac{\exp\left(2s_u(s_u-1)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right)\right) + \exp\left(2s_v(s_v-1)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right)\right)} + {\exp\left(2s_u(s_u-1)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right)\right) + \exp\left(2s_v(s_v-1)C_{SNR}\left(M-\sum_{k=1}^{M}\cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right)\right)}, + \end{split} + \tag{50} +\end{split} +$$ + +and $\{\mathbf{G}\}_{uv} = \{\mathbf{G}\}_{vu}$. Consequently, the conditional Weiss-Weinstein bound is 2 × 2 matrix given by using the above equations in Eqn. (44). As for the unconditional case, if we set $s_u = s_v = 1/2$, one obtains the following simplified expressions +---PAGE_BREAK--- + +$$ +\begin{align} +\{\mathbf{G}\}_{uu} &= \frac{2\left(1 - \frac{|h_u|}{2}\right) - 2(1 - |h_u|)\exp\left(-\frac{C_{SNR}}{2}\left(M - \sum_{k=1}^{M} \cos\left(\frac{4\pi}{\lambda}d_{x_k}h_u\right)\right)\right)}{\left(1 - \frac{|h_u|}{2}\right)^2 \exp\left(-C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right)\right)}, \tag{51} \\ +\{\mathbf{G}\}_{vv} &= \frac{2\left(1 - \frac{|h_v|}{2}\right) - 2(1 - |h_v|)\exp\left(-\frac{C_{SNR}}{2}\left(M - \sum_{k=1}^{M} \cos\left(\frac{4\pi}{\lambda}d_{y_k}h_v\right)\right)\right)}{\left(1 - \frac{|h_v|}{2}\right)^2 \exp\left(-C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right)\right)}, \tag{52} \\ +\{\mathbf{G}\}_{uv} &= \frac{\begin{pmatrix} 2 \exp\left(-\frac{C_{SNR}}{2}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}(d_{x_k}h_u - d_{y_k}h_v)\right)\right)\right) \\ -2 \exp\left(-\frac{C_{SNR}}{2}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}(d_{x_k}h_u + d_{y_k}h_v)\right)\right)\right)}{\exp\left(-\frac{C_{SNR}}{2}\left(2M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{x_k}h_u\right) - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right)\right)}. \tag{53} +\end{align} +$$ + +By using the above expressions in Eqn. (44) and after an optimization over the test points, one obtains +the Weiss-Weinstein bound. + +5.3. Source localization with a non-uniform linear array + +We now briefly consider the DOA estimation of a single narrow band source in the far area by using a non-uniform linear array antenna. Without loss of generality, let us assume that the linear array antenna lays on the Ox axis of the coordinate system (see Fig. .1), consequently, $d_{y_i} = 0, \forall i$. The sensor positions vector is denoted $[d_{x_1} ... d_{x_M}]$. By letting $\theta = \sin \varphi$, where $\varphi$ denotes the elevation angle of the source, the steering vector is then given by + +$$ +\mathbf{a}(\theta) = \left[ \exp \left( j \frac{2\pi}{\lambda} d_{x_1} \theta \right) \dots \exp \left( j \frac{2\pi}{\lambda} d_{x_M} \theta \right) \right]^T . \quad (54) +$$ + +We assume that the parameter $\theta$ follows a uniform distribution over [-1, 1]. As in Section 4.2 and since +the parameter of interest is a scalar, matrix **H** of the test points becomes a scalar denoted $h_\theta$. In the +same way, there is only one element $s_i \in [0, 1]$ which will be simply denoted *s*. The closed-form expressions +given here are straightforwardly obtained from the aforementioned results on the planar array about the +element denoted $\{\mathbf{G}\}_{uu}$. We will continue to use the previously introduced notations $U_{SNR} = \frac{\sigma_s^4}{\sigma_n^2 (M\sigma_s^2 + \sigma_n^2)}$ +and $C_{SNR} = \frac{1}{\sigma_n^2} \sum_{t=1}^T \|s(t)\|^2$. +---PAGE_BREAK--- + +### 5.3.1. Unconditional observation model $M_1$ + +The closed-form expression of the unconditional Weiss-Weinstein bound, denoted UWWB, is given by + +$$ \text{UWWB} = \frac{h_{\theta}^{2} \left(1 - \frac{|h_{\theta}|}{2}\right)^{2} \left(1 + s(1-s)U_{\text{SNR}} \left(M^{2} - \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{x_k} h_{\theta}) \right\|^2\right)\right)^{-2T}}{\left(1 - \frac{|h_{\theta}|}{2}\right) \left( \begin{aligned}[t] & \left(1 + 2s(1-2s)U_{\text{SNR}} \left(M^2 - \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{x_k} h_{\theta}) \right\|^2\right)\right)^{-T} \\ & + \left(1 + 2(1-s)(2s-1)U_{\text{SNR}} \left(M^2 - \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{x_k} h_{\theta}) \right\|^2\right)\right)^{-T} \end{aligned} \right) \\ & - 2(1-|h_{\theta}|) \left(1 + s(1-s)U_{\text{SNR}} \left(M^2 - \left\| \sum_{k=1}^{M} \exp(-j \frac{4\pi}{\lambda} d_{x_k} h_{\theta}) \right\|^2\right)\right)^{-T}} \tag{55} $$ + +In order to find one optimal value of $s$ that maximizes $\mathbf{HG}^{-1}\mathbf{H}^T$, $\forall h_\theta$ we have considered the derivative of $\mathbf{HG}^{-1}\mathbf{H}^T$ w.r.t. $s$. The calculation (not reported here) is straightforward and it is easy to see that $\left.\frac{\partial \mathbf{HG}^{-1}\mathbf{H}^T}{\partial s}\right|_{s=\frac{1}{2}} = 0$. Consequently, the Weiss-Weinstein bound has just to be optimized over $h_\theta$ and is simplified leading to + +$$ UWWB = \sup_{h_{\theta}} \frac{h_{\theta}^{2} \left(1 - \frac{|h_{\theta}|}{2}\right)^{2} \left(1 + \frac{U_{SNR}}{4} \left(M^{2} - \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{x_k} h_{\theta}) \right\|^2\right)\right)^{-2T}}{2 \left(1 - \frac{|h_{\theta}|}{2}\right) - 2(1-|h_{\theta}|) \left(1 + \frac{U_{SNR}}{4} \left(M^{2} - \left\| \sum_{k=1}^{M} \exp(-j \frac{4\pi}{\lambda} d_{x_k} h_{\theta}) \right\|^2\right)\right)^{-T}} . \tag{56} $$ + +In the classical case of a uniform linear array (i.e., $d_{x_k} = d$), this expression can be still simplified by +noticing that $\sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{x_k} h_{\theta}) = M \exp(-j \frac{2\pi d}{\lambda} h_{\theta})$. + +### 5.3.2. Conditional observation model $M_2$ + +The closed-form expression of the conditional Weiss-Weinstein bound CWWB is given by + +$$ CWWB = \frac{h_{\theta}^{2} \left(1 - \frac{|h_{\theta}|}{2}\right)^{2} \exp \left(4s(s-1)C_{SNR} \left(M - \sum_{k=1}^{M} \cos \left(\frac{2\pi}{\lambda} d_{x_k} h_{\theta}\right)\right)\right)}{\left(1 - \frac{|h_{\theta}|}{2}\right) \left( \begin{aligned}[t] & \exp \left(4s(2s-1)C_{SNR} \left(M - \sum_{k=1}^{M} \cos \left(\frac{2\pi}{\lambda} d_{x_k} h_{\theta}\right)\right)\right) \\ & + \exp \left(4(2s-1)(s-1)C_{SNR} \left(M - \sum_{k=1}^{M} \cos \left(\frac{2\pi}{\lambda} d_{x_k} h_{\theta}\right)\right)\right) \\ & - 2(1-|h_{\theta}|) \exp \left(2s(s-1)C_{SNR} \left(M - \sum_{k=1}^{M} \cos \left(\frac{4\pi}{\lambda} d_{x_k} h_{\theta}\right)\right)\right) \end{aligned} \right)} . \tag{57} $$ + +Again, it is easy to check that $\left.\frac{\partial \mathbf{HG}^{-1}\mathbf{H}^T}{\partial s}\right|_{s=\frac{1}{2}} = 0$. Consequently, one optimal value of $s$ that maximizes $\mathbf{HG}^{-1}\mathbf{H}^T$, $\forall h_\theta$ is $s = \frac{1}{2}$. The Weiss-Weinstein bound is then simplified as follows + +$$ CWWB = \sup_{h_\theta} \frac{h_\theta^2 \left(1 - \frac{|h_\theta|}{2}\right)^2 \exp\left(-C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{x_k}h_\theta\right)\right)\right)}{2\left(1 - \frac{|h_\theta|}{2}\right) - 2(1-|h_\theta|)\exp\left(-\frac{1}{2}C_{SNR}\left(M - \sum_{k=1}^{M} \cos\left(\frac{4\pi}{\lambda}d_{x_k}h_\theta\right)\right)\right)}. \tag{58} $$ +---PAGE_BREAK--- + +In the classical case of a uniform linear array (i.e., $d_{x_k} = d$), this expression can be still simplified by +noticing that $\sum_{k=1}^{M} \cos(\frac{2\pi}{\lambda}d_{x_k}h_{\theta}) = M \cos(\frac{2\pi d}{\lambda}h_{\theta})$. + +**6. Simulation results and analysis** + +As an illustration of the previously derived results, we first consider the scenario proposed in Fig. 5 of +[18], i.e., the DOA estimation under the unconditional model using an uniform circular array consisting of +$M = 16$ sensors with a half-wavelength inter-sensors spacing. The numbers of snapshots is $T = 100$. Since +the array is symmetric, the performance estimation concerning the parameters $u$ and $v$ are the same, this is +why only the performance with respect to the parameters $u$ is given in Fig. 2. The Weiss-Weinstein bound +is computed using Eqn. (45), (46) and (47). The Ziv-Zakai bound is computed using Eqn. (24) in [18]. The +empirical global mean square error (MSE) of the maximum *a posteriori* (MAP) estimator is obtained over +2000 Monte Carlo trials. As in the Fig. (1b) in [18], one observes that both the Weiss-Weinstein bound and +the Ziv-Zakai bound are tight w.r.t. the MSE of the MAP and capture the SNR threshold. Note that, in +the Fig. (1b) in [18], the Weiss-Weinstein bound was computed numerically only. + +To the best of our knowledge, their are no closed-form expressions of the Ziv-Zakai bound for the +conditional model available in the literature. In this case, we consider 3D source localization using a V- +shaped array. Indeed, it has been shown that this kind of array is able to outperform other classical planar +arrays, more particularly the uniform circular array [40]. This array is made from two branches of uniform +linear arrays with 6 sensors located on each branches and one sensor located at the origin. We denote $\Delta$ the +angle between these two branches. The sensors are equally spaced with a half-wavelength. The number of +snapshots is $T = 20$. Fig. 3 shows the behavior of the Weiss-Weinstein bound with respect to the opening +angle $\Delta$. One can observe that when $\Delta$ varies, the estimation performance concerning the estimation of +parameter $u$ varies slightly. On the contrary, the estimation performance concerning the estimation of +parameter $v$ is strongly dependent on $\Delta$. When $\Delta$ increases from 0° to 90°, the Weiss-Weinstein bound of +$v$ decreases, as well as the SNR threshold. Fig. 3 also shows that $\Delta = 90^\circ$ is the optimal value, which is +different with the optimal value $\Delta = 53.13^\circ$ in [40] since the assumptions concerning the source signal are +not the same. + +**7. Conclusion** + +In this paper, the Weiss-Weinstein bound on the mean square error has been studied in the array process- +ing context. In order to analyze the unconditional and conditional signal source models, the structure of the +bound has been detailed for both Gaussian observation models with parameterized mean or parameterized +covariance matrix. +---PAGE_BREAK--- + +Appendix 1. Closed-form expression of $\eta_{\theta}(\alpha, \beta, \mathbf{u}, \mathbf{v})$ under the Gaussian observation model with parameterized covariance + +Since $\mathbf{y}(t) \sim \mathcal{CN}(\mathbf{0}, \mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta}))$, one has, + +$$ \eta_{\theta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) = \frac{|\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta})|^{T(\alpha+\beta-1)}}{\pi^{MT} |\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta}+\mathbf{u})|^{T\alpha} |\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta}+\mathbf{v})|^{T\beta}} \int_{\Omega} \exp \left( -\sum_{t=1}^{T} \mathbf{y}^H(t) \mathbf{\Gamma}^{-1} \mathbf{y}(t) \right) d\mathbf{Y}, \quad (1) $$ + +where $\mathbf{\Gamma}^{-1} = \alpha \mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta} + \mathbf{u}) + \beta \mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta} + \mathbf{v}) - (\alpha + \beta - 1)\mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta})$. Then, since + +$$ \int_{\Omega} \exp \left\{ -\sum_{t=1}^{T} \mathbf{y}^H(t) \mathbf{\Gamma}^{-1} \mathbf{y}(t) \right\} d\mathbf{Y} = \pi^{MT} |\mathbf{\Gamma}|^T, \quad (2) $$ + +one has + +$$ \eta_{\theta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) = \frac{|\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta})|^{T(\alpha+\beta-1)} |\mathbf{\Gamma}|^T}{|\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta}+\mathbf{u})|^{T\alpha} |\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta}+\mathbf{v})|^{T\beta}} = \frac{|\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta})|^{T(\alpha+\beta-1)}}{|\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta}+\mathbf{u})|^{T\alpha} |\mathbf{R}_{\mathbf{y}}(\boldsymbol{\theta}+\mathbf{v})|^{T\beta} |\mathbf{\Gamma}^{-1}|^T} \quad (3) $$ + +Appendix 2. Closed-form expression of $\eta_{\theta}(\alpha, \beta, u, v)$ under the Gaussian observation model with parameterized mean + +Since $\mathbf{y}(t) \sim \mathcal{CN}(\mathbf{f}_t(\boldsymbol{\theta}), \mathbf{R}_{\mathbf{y}})$, one has + +$$ \eta_{\theta}(\alpha, \beta, u, v) = \frac{1}{\pi^{MT} |\mathbf{R}_{\mathbf{y}}|^T} \int_{\Omega} \exp \left( -\sum_{t=1}^{T} \xi(t) \right) d\mathbf{Y}, \quad (4) $$ + +with⁴ + +$$ +\begin{align*} +\xi(t) &= \alpha (\mathbf{y} - \mathbf{f}_t(\boldsymbol{\theta} + \mathbf{u}))^H \mathbf{R}_{\mathbf{y}}^{-1} (\mathbf{y} - \mathbf{f}_t(\boldsymbol{\theta} + \mathbf{u})) + \beta (\mathbf{y} - \mathbf{f}_t(\boldsymbol{\theta} + \mathbf{v}))^H \mathbf{R}_{\mathbf{y}}^{-1} (\mathbf{y} - \mathbf{f}_t(\boldsymbol{\theta} + \mathbf{v})) \\ +&\quad + (1 - \alpha - \beta) (\mathbf{y} - \mathbf{f}_t(\boldsymbol{\theta}))^H \mathbf{R}_{\mathbf{y}}^{-1} (\mathbf{y} - \mathbf{f}_t(\boldsymbol{\theta})) \\ +&= \mathbf{y}^H \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{y} + \alpha \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{u}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{u}) + \beta \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{v}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{v}) + (1 - \alpha - \beta) \mathbf{f}_t^H (\boldsymbol{\theta}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta}) \\ +&\quad - 2 \operatorname{Re}\{\mathbf{y}^H \mathbf{R}_{\mathbf{y}}^{-1} (\alpha \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{u}) + \beta \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{v}) + (1 - \alpha - \beta) \mathbf{f}_t (\boldsymbol{\theta}))\}. +\end{align*} +\quad (5) +$$ + +Let us set $\mathbf{x} = \mathbf{y} - (\alpha\mathbf{f}_t(\boldsymbol{\theta} + \mathbf{u}) + \beta\mathbf{f}_t(\boldsymbol{\theta} + \mathbf{v}) + (1-\alpha-\beta)\mathbf{f}_t(\boldsymbol{\theta}))$. Consequently, + +$$ +\begin{align} +\mathbf{x}^H \mathbf{R}_{\mathrm{y}}^{-1} \mathbf{x} &= \mathbf{y}^H \mathbf{R}_{\mathrm{y}}^{-1} \mathbf{y} - 2 \operatorname{Re}\{\mathbf{y}^H \mathbf{R}_{\mathrm{y}}^{-1} (\alpha \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{u}) + \beta \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{v}) + (1 - \alpha - \beta) \mathbf{f}_t (\boldsymbol{\theta}))\} \\ +&\quad + (\alpha \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{u}) + \beta \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{v}) + (1 - \alpha - \beta) \mathbf{f}_t^H (\boldsymbol{\theta})) \mathbf{R}_{\mathrm{y}}^{-1} (\alpha \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{u}) + \beta \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{v}) + (1 - \alpha - \beta) \mathbf{f}_t (\boldsymbol{\theta})) . +\end{align} +$$ + +And $\xi(t)$ can be rewritten as + +$$ +\xi(t) = x^H R_y^{-1} x + c\xi(t), +$$ + +⁴For simplicity, the dependence on $t$ of $\tilde{\boldsymbol{f}}$ and $\tilde{\boldsymbol{x}}$ is not emphasized. +---PAGE_BREAK--- + +where + +$$ +\begin{align} +\dot{\xi}(t) ={}& \alpha (1-\alpha) \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{u}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{u}) + \beta (1-\beta) \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{v}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{v}) \nonumber \\ +& + (1-\alpha-\beta) (\alpha+\beta) \mathbf{f}_t^H (\boldsymbol{\theta}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta}) - 2 \operatorname{Re} \left\{ \alpha \beta \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{u}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta} + \mathbf{v}) \right. \nonumber \\ +& \qquad \left. + \alpha (1-\alpha-\beta) \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{u}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta}) + \beta (1-\alpha-\beta) \mathbf{f}_t^H (\boldsymbol{\theta} + \mathbf{v}) \mathbf{R}_{\mathbf{y}}^{-1} \mathbf{f}_t (\boldsymbol{\theta}) \right\}. \tag{.8} +\end{align} +$$ + +Note that $\dot{\xi}(t)$ is independent of $\mathbf{x}$. By defining $\mathbf{X} = [\mathbf{x}(1), \mathbf{x}(2), \dots, \mathbf{x}(T)]$, the function $\eta_{\theta}(\alpha, \beta, \mathbf{u}, \mathbf{v})$ becomes + +$$ +\[ +\dot{\eta}_{\theta}(\alpha, \beta, \mathbf{u}, \mathbf{v}) = \frac{1}{\pi^{MT} |\mathbf{R}_{\mathbf{y}}|^T} \int_{\Omega} \exp \left( -\sum_{t=1}^{T} x^H \mathbf{R}_{\mathbf{y}}^{-1} x + \dot{\xi}(t) \right) d\mathbf{X} = \exp \left( -\sum_{t=1}^{T} \dot{\xi}(t) \right), \quad (.9) +\] +$$ + +since $\frac{1}{\pi^{MT} |\mathbf{R_y}|^T} \int_{\Omega} \exp \left(-\sum_{t=1}^{T} x^H \mathbf{R_y}^{-1} x\right) d\mathbf{X} = 1$. + +Appendix .3. Closed-form expressions of $|m_1\mathbf{R}_y^{-1}(\theta_1) + m_2\mathbf{R}_y^{-1}(\theta_2)|$ and $|m_1\mathbf{R}_y^{-1}(\theta_1) + m_2\mathbf{R}_y^{-1}(\theta_2) + m_3\mathbf{R}_y^{-1}(\theta_3)|$ + +Note that this calculation is actually an extension of the result obtained in Appendix A of [22] in which $m_1 = m_2 = \frac{1}{2}$ and $m_3 = 0$, but follows the same method. The inverse of $\mathbf{R_y}$ can be deduced from the Woodbury formula + +$$ +\mathbf{R}_{\mathrm{y}}^{-1}(\boldsymbol{\theta}) = \frac{1}{\sigma_n^2} \left( \mathbf{I}_M - \frac{\sigma_s^2 \mathbf{a}(\boldsymbol{\theta}) \mathbf{a}^H(\boldsymbol{\theta})}{\sigma_s^2 \| \mathbf{a}(\boldsymbol{\theta}) \|^2 + \sigma_n^2} \right). +$$ + +Then, + +$$ +\sum_{k=1}^{3} m_k \mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}_k) = \frac{1}{\sigma_n^2} \sum_{k=1}^{3} m_k \left( I - \frac{\sigma_s^2 \mathbf{a}(\boldsymbol{\theta}_k) \mathbf{a}^H(\boldsymbol{\theta}_k)}{\sigma_s^2 \| \mathbf{a}(\boldsymbol{\theta}_k) \|^2 + \sigma_n^2} \right). \quad (10) +$$ + +Since the rank of **a**(*θ**k*)**a**H(*θ**k*) is equal to 1 and since *θ*1 ≠ *θ*2 ≠ *θ*3 (except for **h***k* = **h**l = 0), the above matrix has *M* − 3 eigenvalues equal to 1*σ**n*2*k*=13 *m**k* and 3 eigenvalues corresponding to the eigenvectors made from the linear combination of **a**(*θ*1), **a**(*θ*2), and **a**(*θ*3): **a**(*θ*1) + *pa*(*θ*2) + *qa*(*θ*3). The determinant will then be the product of these *M* eigenvalues⁵. Let us set + +$$ +\varphi_k = \frac{\sigma_s^2}{\sigma_s^2 \|a(\theta_k)\|^2 + \sigma_n^2}, \quad k = 1, 2, 3. \tag{.11} +$$ + +Then, the three aforementioned eigenvalues denoted $\lambda$ must satisfy: + +$$ +\left( \sum_{k=1}^{3} m_k \mathbf{R}_{\mathbf{y}}^{-1} (\boldsymbol{\theta}_k) \right) (\mathbf{a}(\boldsymbol{\theta}_1) + p\mathbf{a}(\boldsymbol{\theta}_2) + q\mathbf{a}(\boldsymbol{\theta}_3)) = \lambda (\mathbf{a}(\boldsymbol{\theta}_1) + p\mathbf{a}(\boldsymbol{\theta}_2) + q\mathbf{a}(\boldsymbol{\theta}_3)). \quad (.12) +$$ + +By using Eqn. (.10) in the above equation and after a factorization with respect to **a**(*θ*₁), **a**(*θ*₂), and **a**(*θ*₃) one obtains + +⁵Note that we are only interested by the eigenvalues. Consequently, the linear combination of $a(\theta_1)$, $a(\theta_2)$, and $a(\theta_3)$ can be written $a(\theta_1) + pa(\theta_2) + qa(\theta_3)$ instead of $ra(\theta_1) + pa(\theta_2) + qa(\theta_3)$ +---PAGE_BREAK--- + +$$ +\begin{align} +& \left( x - m_1 \varphi_1 \| \mathbf{a}(\boldsymbol{\theta}_1) \|^2 - p m_1 \varphi_1 \mathbf{a}^H(\boldsymbol{\theta}_1) \mathbf{a}(\boldsymbol{\theta}_2) - q m_1 \varphi_1 \mathbf{a}^H(\boldsymbol{\theta}_1) \mathbf{a}(\boldsymbol{\theta}_3) \right) \mathbf{a}(\boldsymbol{\theta}_1) \nonumber \\ +& + \left( -m_2 \varphi_2 \mathbf{a}^H(\boldsymbol{\theta}_2) \mathbf{a}(\boldsymbol{\theta}_1) + p (x - m_2 \varphi_2 \| \mathbf{a}(\boldsymbol{\theta}_2) \|^2) - q m_2 \varphi_2 \mathbf{a}^H(\boldsymbol{\theta}_2) \mathbf{a}(\boldsymbol{\theta}_3) \right) \mathbf{a}(\boldsymbol{\theta}_2) \nonumber \\ +& + \left( -m_3 \varphi_3 \mathbf{a}^H(\boldsymbol{\theta}_3) \mathbf{a}(\boldsymbol{\theta}_1) - m_3 \varphi_3 p \mathbf{a}^H(\boldsymbol{\theta}_3) \mathbf{a}(\boldsymbol{\theta}_2) + q (x - m_3 \varphi_3 \| \mathbf{a}(\boldsymbol{\theta}_3) \|^2) \right) \mathbf{a}(\boldsymbol{\theta}_3) = 0, \tag{.13} +\end{align} +$$ + +where⁶ + +$$ +x = 1 - \sigma_n^2 \lambda. \quad (14) +$$ + +Consequently, the coefficients of $\mathbf{a}(\boldsymbol{\theta}_1)$, $\mathbf{a}(\boldsymbol{\theta}_2)$, and $\mathbf{a}(\boldsymbol{\theta}_3)$ are equals to zero leading to a system of three equations with two unknown ($p$ and $q$). Solving the two first equations to find⁷ $p$ and $q$, and applying the solution into the last equation, one obtains the following polynomial equation of $x$ + +$$ +\begin{equation} +\begin{split} +& x^3 - x^2 \sum_{k=1}^{3} m_k \varphi_k \| \mathbf{a}(\boldsymbol{\theta}_k) \|^2 - \frac{x}{2} \sum_{k=1}^{3} \sum_{\substack{k'=1 \\ k' \neq k}}^{3} m_k \varphi_k m_{k'} \varphi_{k'} \left( \| \mathbf{a}^H(\boldsymbol{\theta}_k) \mathbf{a}(\boldsymbol{\theta}_{k'}) \|^2 - \| \mathbf{a}(\boldsymbol{\theta}_k) \|^2 \| \mathbf{a}(\boldsymbol{\theta}_{k'}) \|^2 \right) \\ +& - m_1 m_2 m_3 \varphi_1 \varphi_2 \varphi_3 (\| \mathbf{a}(\boldsymbol{\theta}_1) \|^2 \| \mathbf{a}(\boldsymbol{\theta}_2) \|^2 \| \mathbf{a}(\boldsymbol{\theta}_3) \|^2 - \| \mathbf{a}^H(\boldsymbol{\theta}_2) \mathbf{a}(\boldsymbol{\theta}_3) \|^2 \| \mathbf{a}(\boldsymbol{\theta}_1) \|^2 \\ +& - \| \mathbf{a}^H(\boldsymbol{\theta}_1) \mathbf{a}(\boldsymbol{\theta}_2) \|^2 \| \mathbf{a}(\boldsymbol{\theta}_3) \|^2 - \| \mathbf{a}^H(\boldsymbol{\theta}_3) \mathbf{a}(\boldsymbol{\theta}_1) \|^2 \| \mathbf{a}^H(\boldsymbol{\theta}_2) \|^2 + \| \mathbf{a}^H(\boldsymbol{\theta}_3) \mathbf{a}(\boldsymbol{\theta}_2) \mathbf{a}^H(\boldsymbol{\theta}_1) \mathbf{a}(\boldsymbol{\theta}_3) \| \mathbf{a}^H(\boldsymbol{\theta}_2) \| \mathbf{a}(\boldsymbol{\theta}_1) \\ +& + \| \mathbf{a}^H(\boldsymbol{\theta}_3) \mathbf{a}(\boldsymbol{\theta}_1) \mathbf{a}^H(\boldsymbol{\theta}_1) \mathbf{a}(\boldsymbol{\theta}_2) \| \mathbf{a}^H(\boldsymbol{\theta}_2) \mathbf{a}(\boldsymbol{\theta}_3) ) = 0 +\end{split} +\end{equation} +$$ + +Since we are only interested by the product of the three eigenvalues, we do not have to solve this polynomial in $\lambda$ and only the opposite of the last term is required. This leads to Eqn. (31) with $\sum_{k=1}^{3} m_k = 1$. Of course, the closed-form expression of $|m_1\mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}_1) + m_2\mathbf{R}_{\mathbf{y}}^{-1}(\boldsymbol{\theta}_2)|$ is obtained by letting $m_3 = 0$ and $\sum_{k=1}^{2} m_k = 1$ in Eqn. (32). + +Appendix .4. Closed-form expressions of $\zeta_\theta (\mu, \rho)$ + +Remind that the function $\zeta_{\theta} (\mu, \rho)$ is defined by Eqn. (18). Let us define $p$ as the number of parameters per sources (assumed to be constant for each sources). Then, without loss of generality, the full parameter vector $\theta$ can be decomposed as $\theta = [\theta_1^T ... \theta_N^T]^T$ where $\theta_i = [\theta_{i,1} ... \theta_{i,p}]^T$, $i = 1, ..., N$ with $q = Np$. Remind that $\mu = [0... \mu_i ... 0]^T$ and $\rho = [0... \rho_j ... 0]^T$. It exists two distinct cases to study: when both index $i$ and $j$ are such that $(m-1)p+1 \le i \le mp$, $m=1,...,N$ and $(m-1)p+1 \le j \le mp$ or when + +$^{6}$Note that, from Eqn. (16), $\sum_{k=1}^{3} m_k = 1$. + +$^7p$ and $q$ are given by + +$$ +p = \frac{m_2\varphi_2\mathbf{a}^H(\boldsymbol{\theta}_2)(m_1\varphi_1\mathbf{a}(\boldsymbol{\theta}_1)\mathbf{a}^H(\boldsymbol{\theta}_1) + (x-m_1\varphi_1\|\mathbf{a}(\boldsymbol{\theta}_1)\|^2)\mathbf{I})\mathbf{a}(\boldsymbol{\theta}_3)}{m_1\varphi_1\mathbf{a}^H(\boldsymbol{\theta}_1)(m_2\varphi_2\mathbf{a}(\boldsymbol{\theta}_2)\mathbf{a}^H(\boldsymbol{\theta}_2) + (x-m_2\varphi_2\|\mathbf{a}(\boldsymbol{\theta}_2)\|^2)\mathbf{I})\mathbf{a}(\boldsymbol{\theta}_3)}, \quad (.15) +$$ + +and + +$$ +q = \frac{(x - m_1 \varphi_1 ||\mathbf{a}(\boldsymbol{\theta}_1)||^2)(x - m_2 \varphi_2 ||\mathbf{a}(\boldsymbol{\theta}_2)||^2) - m_1 \varphi_1 m_2 \varphi_2 ||\mathbf{a}^H(\boldsymbol{\theta}_1)\mathbf{a}(\boldsymbol{\theta}_2)||^2 ||\mathbf{a}(\boldsymbol{\theta}_1)||^2}{m_1 \varphi_1 |\mathbf{a}^H(\boldsymbol{\theta}_1)| (m_2 \varphi_2 |\mathbf{a}(\boldsymbol{\theta}_2)| |\mathbf{a}^H(\boldsymbol{\theta}_2)| + (x - m_2 \varphi_2 ||\mathbf{a}(\boldsymbol{\theta}_2)||^2) |\mathbf{I}| |\mathbf{a}(\boldsymbol{\theta}_3)|)} . \quad (.16) +$$ +---PAGE_BREAK--- + +$(m-1)p+1 \le i \le mp, m=1,\dots,N$ and $(n-1)p+1 \le j \le np, n=1,\dots,N$ with $m \ne n$. Therefore let us denote: + +$$ +\left\{ +\begin{array}{l} +\boldsymbol{\mu}_m = [0 \cdots 0 \quad h_i \quad 0 \cdots 0]^T \in \mathbb{R}^p \\ +\boldsymbol{\rho}_m = [0 \cdots 0 \quad h_j \quad 0 \cdots 0]^T \in \mathbb{R}^p +\end{array} +\right. +\quad \text{if } (m-1)p+1 \le i,j \le mp +\qquad (17) +$$ + +and + +$$ +\left\{ +\begin{array}{ll} +\boldsymbol{\mu}_m = [0 \cdots 0 & h_i \quad 0 \cdots 0]^T \in \mathbb{R}^p, \\ +\boldsymbol{\rho}_n = [0 \cdots 0 & h_j \quad 0 \cdots 0]^T \in \mathbb{R}^p, +\end{array} +\right. +\quad +\text{if } +\left\{ +\begin{array}{l} +(m-1)p+1 \le i \le mp, \\ +(n-1)p+1 \le j \le np, +\end{array} +\right. +\quad +\text{with } m \ne n. +\tag{18} +$$ + +Appendix .4.1. The case where (m − 1) p + 1 ≤ i, j ≤ mp + +In this case, one has: + +$$ +A(\theta + \mu) - A(\theta + \rho) = [\mathbf{0} \cdots \mathbf{0} \quad a(\theta_m + \mu_m) - a(\theta_m + \rho_m) \quad \mathbf{0} \cdots \mathbf{0}] \in C^{p \times N}, \quad (19) +$$ + +and consequently, + +$$ +\zeta_{\theta}(\boldsymbol{\mu}, \boldsymbol{\rho}) = \| \mathbf{R}_{\mathrm{n}}^{-1/2} (\mathbf{a}(\boldsymbol{\theta}_{m}+\boldsymbol{\mu}_{m}) - \mathbf{a}(\boldsymbol{\theta}_{m}+\boldsymbol{\rho}_{m})) \|^{2} \sum_{t=1}^{T} \| \{\mathbf{s}(t)\}_{m} \|^{2}. \quad (20) +$$ + +Due to Eqn. (28), one has + +$$ +\[ +\|\mathbf{R}_{\mathrm{n}}^{-1/2} (\mathbf{a}(\boldsymbol{\theta}_m + \boldsymbol{\mu}_m) - \mathbf{a}(\boldsymbol{\theta}_m + \boldsymbol{\rho}_m))\|^2 = +\sum_{i=1}^{M} \sum_{j=1}^{M} \left\{ \mathbf{R}_{\mathrm{n}}^{-1} \right\}_{i,j} \exp \left( j \frac{2\pi}{\lambda} (\mathbf{r}_j^T - \mathbf{r}_i^T) \boldsymbol{\theta}_m \right) \left( \exp(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\mu}_m) - \exp(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\rho}_m) \right) \\ +\times \left( \exp(j \frac{2\pi}{\lambda} \mathbf{r}_j^T \boldsymbol{\mu}_m) - \exp(j \frac{2\pi}{\lambda} \mathbf{r}_j^T \boldsymbol{\rho}_m) \right). \tag{21} +\] +$$ + +In particular, in the case where $\mathbf{R}_n = \sigma_n^2 I$ one obtains + +$$ +\| \mathbf{R}_{\mathrm{n}}^{-1/2} (\mathbf{a}(\boldsymbol{\theta}_m + \boldsymbol{\mu}_m) - \mathbf{a}(\boldsymbol{\theta}_m + \boldsymbol{\rho}_m)) \|^{2} = \frac{1}{\sigma_n^2} \sum_{i=1}^{M} \| \exp(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\mu}_m) - \exp(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\rho}_m) \|^{2}. \quad (22) +$$ + +Appendix .4.2. The case where (m − 1) p + 1 ≤ i ≤ mp and where (n − 1) p + 1 ≤ j ≤ np + +Without loss of generality, we assume that $n > m$. Then, + +$$ +\begin{align*} +& A(\boldsymbol{\theta} + \boldsymbol{\mu}) - A(\boldsymbol{\theta} + \boldsymbol{\rho}) = [\boldsymbol{a}(\boldsymbol{\theta}_1) - \boldsymbol{a}(\boldsymbol{\theta}_1) \cdots \boldsymbol{a}(\boldsymbol{\theta}_m + \boldsymbol{\mu}_m) - \boldsymbol{a}(\boldsymbol{\theta}_m) \cdots \boldsymbol{a}(\boldsymbol{\theta}_n) - \boldsymbol{a}(\boldsymbol{\theta}_n + \boldsymbol{\rho}_n) \cdots \boldsymbol{a}(\boldsymbol{\theta}_N) - \boldsymbol{a}(\boldsymbol{\theta}_N)] \\ +& = [\mathbf{0} \cdots \mathbf{0} ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ +$$ + +$$ += [\mathbf{0}\cdots\mathbf{0}\quad a(\theta_m+\mu_m)-a(\theta_m)\quad 0\cdots0\quad a(\theta_m)-a(\theta_n+\rho_n)\quad 0\cdots0], \quad (23) +$$ + +and consequently, + +$$ +\zeta_{\theta}(\mu, \rho) = \sum_{t=1}^{T} \| R_{n}^{-1/2} (\mathbf{a}(\theta_{m}+\mu_{m}) - \mathbf{a}(\theta_{m})) [\mathbf{s}(t)]_{m} + (\mathbf{a}(\theta_{n}) - \mathbf{a}(\theta_{n}+\rho_{n})) [\mathbf{s}(t)]_{n} \|^{2}. \quad (24) +$$ +---PAGE_BREAK--- + +Let us set $\varkappa = \mathbf{R}_n^{-1/2}(\mathbf{a}(\boldsymbol{\theta}_m+\boldsymbol{\mu}_m)-\mathbf{a}(\boldsymbol{\theta}_m))$ and $\boldsymbol{\varrho} = \mathbf{R}_n^{-1/2}(\mathbf{a}(\boldsymbol{\theta}_n)-\mathbf{a}(\boldsymbol{\theta}_n+\boldsymbol{\rho}_n))$. Then, $\zeta_{\boldsymbol{\theta}}(\boldsymbol{\mu}, \boldsymbol{\rho})$ can be rewritten + +$$ +\begin{align*} +\zeta_{\boldsymbol{\theta}}(\boldsymbol{\mu}, \boldsymbol{\rho}) &= \sum_{t=1}^{T} \| \varkappa \{\mathbf{s}(t)\}_{m} + \boldsymbol{\varrho} \{\mathbf{s}(t)\}_{n} \|^2 \\ +&= \sum_{t=1}^{T} \left( \varkappa^H \varkappa \| \{\mathbf{s}(t)\}_{m} \|^2 + \varkappa^H \boldsymbol{\varrho} \{\mathbf{s}(t)\}_{m}^* \{\mathbf{s}(t)\}_{n} + \boldsymbol{\varrho}^H \varkappa \{\mathbf{s}(t)\}_{m} \{\mathbf{s}(t)\}_{n}^* + \boldsymbol{\varrho}^H \boldsymbol{\varrho} \| \{\mathbf{s}(t)\}_{n} \|^2 \right) \\ +&= \varkappa^H \varkappa \sum_{t=1}^{T} \| \{\mathbf{s}(t)\}_{m} \|^2 + \boldsymbol{\varrho}^H \boldsymbol{\varrho} \sum_{t=1}^{T} \| \{\mathbf{s}(t)\}_{n} \|^2 + 2 \operatorname{Re} \left( \varkappa^H \boldsymbol{\varrho} \sum_{t=1}^{T} \{\mathbf{s}(t)\}_{m}^* \{\mathbf{s}(t)\}_{n} \right). \tag{25} +\end{align*} +$$ + +By using the structure of the steering matrix **A**, it leads to + +$$ +\left\{ +\begin{aligned} +\varkappa^H \varkappa &= \sum_{i=1}^{M} \sum_{j=1}^{M} \{\mathbf{R}_n^{-1}\}_{i,j} \exp(j \frac{2\pi}{\lambda} (\mathbf{r}_j^T - \mathbf{r}_i^T) \boldsymbol{\theta}_m) \exp(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\mu}_m) \exp(j \frac{2\pi}{\lambda} \mathbf{r}_j^T \boldsymbol{\mu}_m), \\ +\boldsymbol{\varrho}^H \boldsymbol{\varrho} &= \sum_{i=1}^{M} \sum_{j=1}^{M} \{\mathbf{R}_n^{-1}\}_{i,j} \exp(j \frac{2\pi}{\lambda} (\mathbf{r}_j^T - \mathbf{r}_i^T) \boldsymbol{\theta}_n) \exp(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\rho}_n) \exp(j \frac{2\pi}{\lambda} \mathbf{r}_j^T \boldsymbol{\rho}_n), \\ +\varkappa^H \boldsymbol{\varrho} &= -\sum_{i=1}^{M} \sum_{j=1}^{M} \{\mathbf{R}_n^{-1}\}_{i,j} \exp(j \frac{2\pi}{\lambda} (\mathbf{r}_j^T \boldsymbol{\theta}_n - \mathbf{r}_i^T \boldsymbol{\theta}_m)) \exp(-j \frac{2\pi}{\lambda} \mathbf{r}_i^T \boldsymbol{\mu}_m) \exp(j \frac{2\pi}{\lambda} \mathbf{r}_j^T \boldsymbol{\rho}_n). +\end{aligned} +\right. +\quad (26) +$$ + +Appendix 5. Proof of Eqn. (41), (42) and (43) + +In fact, one only has to prove Eqn. (43) since Eqn. (41) and (42) can be obtained by letting $h_u = h_v$ and $s_u = s_v$ in Eqn. (43) and by using $(h_u, s_u)$ for Eqn. (41) and $(h_v, s_v)$ for Eqn. (42). By plugging Eqn. (30) and (32) into Eqn. (16), and by considering the following expressions + +$$ +\begin{align*} +\mathbf{a}^H(\boldsymbol{\theta} + \mathbf{h}_u)\mathbf{a}(\boldsymbol{\theta} + \mathbf{h}_v) &= \sum_{i=1}^{M} \exp(j\frac{2\pi}{\lambda}(d_{y_i}\mathbf{h}_v - d_{x_i}\mathbf{h}_u)) = (\mathbf{a}^H(\boldsymbol{\theta} + \mathbf{h}_v)\mathbf{a}(\boldsymbol{\theta} + \mathbf{h}_u))^H, \\ +\mathbf{a}^H(\boldsymbol{\theta} \pm \mathbf{h}_u)\mathbf{a}(\boldsymbol{\theta}) &= \sum_{i=1}^{M} \exp(\mp j\frac{2\pi}{\lambda}d_{x_i}\mathbf{h}_u), \text{ and } +\mathbf{a}^H(\boldsymbol{\theta} + \mathbf{h}_u)\mathbf{a}(\boldsymbol{\theta} - \mathbf{h}_u) = \sum_{i=1}^{M} \exp(-j\frac{4\pi}{\lambda}d_{x_i}\mathbf{h}_u), +\end{align*} +$$ + +one obtains the closed-form expressions for the set of functions $\eta_{\theta}$ ($\alpha, \beta, u, v$) + +$$ +\eta_{\theta}(s_u, s_v, h_u, h_v) = +\begin{pmatrix} +s_u s_v & \left( \left\| \sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}(d_{x_k}\mathbf{h}_u - d_{y_k}\mathbf{h}_v)) \right\|^2 - M^2 \right) \\ +& + s_u(1-s_u-s_v) & \left( \left\| \sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}d_{x_k}\mathbf{h}_u) \right\|^2 - M^2 \right) \\ +& + s_v(1-s_u-s_v) & \left( \left\| \sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}d_{y_k}\mathbf{h}_v) \right\|^2 - M^2 \right) \\ +& - s_u s_v (1-s_u-s_v) & U_{SNR}^2 / (\sigma_s^2) \\ +& + U_{SNR} & - M \\ +& + M & - M \\ +& + M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & - M \\ +& - M & -M\\ +\end{pmatrix} +^{-T} +. (27) +$$ +---PAGE_BREAK--- + +$$ +\begin{aligned} +\eta_{\theta}(1 - s_u, 1 - s_v, -\mathbf{h}_u, -\mathbf{h}_v) = & \\ +& \left( 1 - U_{SNR} \left( \begin{array}{@{}l@{}} (1-s_u)(1-s_v) \left( \left\| \sum_{k=1}^{M} \exp \left(j \frac{2\pi}{\lambda} (d_{x_k} h_u - d_{y_k} h_v)\right) \right\|^2 - M^2 \right) \\ + (1-s_u)(s_u+s_v-1) \left( \left\| \sum_{k=1}^{M} \exp \left(j \frac{2\pi}{\lambda} d_{x_k} h_u\right) \right\|^2 - M^2 \right) \\ + (1-s_v)(s_u+s_v-1) \left( \left\| \sum_{k=1}^{M} \exp \left(j \frac{2\pi}{\lambda} d_{y_k} h_v\right) \right\|^2 - M^2 \right) \end{array} \right)^{-T} \\ +& - (1-s_u)(1-s_v)(s_u+s_v-1) \frac{U_{SNR}^2 \sigma_n^2}{\sigma_s^2} \times \\ +& \times \left( \begin{array}{@{}l@{}} \sum_{k=1}^{M} \exp \left(j \frac{2\pi d_{y_k} h_v}{\lambda}\right) \sum_{k=1}^{M} \exp \left(-j \frac{2\pi d_{x_k} h_u}{\lambda}\right) \sum_{k=1}^{M} \exp \left(j \frac{2\pi (d_{x_k} h_u - d_{y_k} h_v)}{\lambda}\right) \\ + \sum_{k=1}^{M} \exp \left(-j \frac{2\pi d_{y_k} h_v}{\lambda}\right) \sum_{k=1}^{M} \exp \left(j \frac{2\pi d_{x_k} h_u}{\lambda}\right) \sum_{k=1}^{M} \exp \left(-j \frac{2\pi (d_{x_k} h_u - d_{y_k} h_v)}{\lambda}\right) \\ - M \left\| \sum_{k=1}^{M} \exp \left(-j \frac{2\pi}{\lambda} d_{y_k} h_v\right) \right\|^2 - M \left\| \sum_{k=1}^{M} \exp \left(-j \frac{2\pi}{\lambda} d_{x_k} h_u\right) \right\|^2 \\ - M \left\| \sum_{k=1}^{M} \exp \left(-j \frac{2\pi}{\lambda} (d_{x_k} h_u - d_{y_k} h_v)\right) \right\|^2 + M^3 \end{array} \right) +\end{aligned} +. (28) +$$ + +$$ +\begin{aligned} +\eta_{\theta}(s_u, 1 - s_v, \mathbf{h}_u, -\mathbf{h}_v) = & \\ +& \left( 1 - U_{SNR} \left( s_u(1-s_v) \left( \left\| \sum_{k=1}^{M} \exp\left(-j\frac{2\pi}{\lambda}(d_{x_k}h_u + d_{y_k}h_v)\right)\right\|^2 - M^2 \right) + s_u(s_v-s_u) \left( \left\| \sum_{k=1}^{M} \exp\left(-j\frac{2\pi}{\lambda}d_{x_k}h_u\right)\right\|^2 - M^2 \right) + (1-s_v)(s_v-s_u) \left( \left\| \sum_{k=1}^{M} \exp\left(j\frac{2\pi}{\lambda}d_{y_k}h_v\right)\right\|^2 - M^2 \right) \right)^{-T} \\ +& - s_u(1-s_v)(s_v-s_u) \frac{U_{SNR}^2 g_n^2}{g_s^2} \\ +& \times \left( \sum_{k=1}^{M} \exp\left(j\frac{2\pi d_{y_k}h_v}{\lambda}\right) \sum_{k=1}^{M} \exp\left(j\frac{2\pi d_{x_k}h_u}{\lambda}\right) \sum_{k=1}^{M} \exp\left(-j\frac{2\pi(d_{x_k}h_u+d_{y_k}h_v)}{\lambda}\right) + \sum_{k=1}^{M} \exp\left(-j\frac{2\pi d_{y_k}h_v}{\lambda}\right) \sum_{k=1}^{M} \exp\left(-j\frac{2\pi d_{x_k}h_u}{\lambda}\right) \sum_{k=1}^{M} \exp\left(j\frac{2\pi(d_{x_k}h_u+d_{y_k}h_v)}{\lambda}\right) \\ +& - M \left\| \sum_{k=1}^{M} \exp\left(j\frac{2\pi}{\lambda}d_{y_k}h_v\right) \right\|^2 - M \left\| \sum_{k=1}^{M} \exp\left(-j\frac{2\pi}{\lambda}d_{x_k}h_u\right) \right\|^2 \\ +& - M \left\| \sum_{k=1}^{M} \exp\left(-j\frac{2\pi}{\lambda}(d_{x_k}h_u+d_{y_k}h_v)\right) \right\|^2 + M^3 +\end{aligned} +. (29) +$$ + +$$ +\eta_\theta(s_u, 0, h_u, 0) = \left( 1 + s_u(1-s_u)U_{SNR} \left( M^2 - \left\| \sum_{k=1}^{M} \exp(-j \frac{2\pi}{\lambda} d_{x_k} h_u) \right\|^2 \right) \right)^{-T}, . (30) +$$ + +$$ +\noindent +\eta_\theta(0, s_v, 0, h_v) = (1 + s_v(1 - s_v)U_{SNR}) (M^2 - (\sum_{k=1}^{M} |\exp(-j\dfrac{2\pi}{\lambda}d_{y_k}h_v)|^2))^{-T}. +\notag +$$ + +. (31) +---PAGE_BREAK--- + +$$ +\begin{equation} +\begin{split} +\eta_{\theta}(1 - s_u, s_v, -\mathbf{h}_u, \mathbf{h}_v) = {}& \\ +& \left( + \begin{aligned} + & \left( s_v(1-s_u) \left( \left\| \sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}(d_{x_k}h_u + d_{y_k}h_v)) \right\|^2 - M^2 \right) \right) \\ + & + s_v(s_u-s_v) \left( \left\| \sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}d_{x_k}h_u) \right\|^2 - M^2 \right) \\ + & + (1-s_u)(s_u-s_v) \left( \left\| \sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}d_{y_k}h_v) \right\|^2 - M^2 \right) + \end{aligned} + \right)^{-T} \\ +& \times \left( + \begin{aligned} + & \left( \sum_{k=1}^{M} \exp(j\frac{2\pi d_{y_k} h_v}{\lambda}) \sum_{k=1}^{M} \exp(j\frac{2\pi d_{x_k} h_u}{\lambda}) \sum_{k=1}^{M} \exp(-j\frac{2\pi(d_{x_k}h_u+d_{y_k}h_v)}{\lambda}) \right) \\ + & + \sum_{k=1}^{M} \exp(-j\frac{2\pi d_{y_k} h_v}{\lambda}) \sum_{k=1}^{M} \exp(-j\frac{2\pi d_{x_k} h_u}{\lambda}) \sum_{k=1}^{M} \exp(j\frac{2\pi(d_{x_k}h_u+d_{y_k}h_v)}{\lambda}) \\ + & - M \left\| \sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}d_{y_k}h_v) \right\|^2 - M \left\| \sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}d_{x_k}h_u) \right\|^2 \\ + & - M \left\| \sum_{k=1}^{M} \exp(-j\frac{2\pi}{\lambda}(d_{x_k}h_u+d_{y_k}h_v)) \right\|^2 + M^3 + \end{aligned} + \right) +\end{split} +\tag{.32} +\end{equation} +$$ + +One notices that the set of functions $\eta_\theta(\alpha, \beta, u, v)$ does not depend on $\theta$. Consequently, it is also easy to obtain the Weiss-Weinstein bound (throughout the set of functions $\eta(\alpha, \beta, u, v)$) by using the results of Section 4.2 whatever the considered prior on $\theta$ (only the integral $\int_\Theta \frac{p^{\alpha+\beta}(\theta+u)}{p^{\alpha+\beta-1}(\theta)} d\theta$ has to be calculated or computed numerically). In our case of a uniform prior, the results are straightforward and leads to Eqn. (41), (42) and (43). + +Appendix .6. *Proof of Eqn. (48), (49) and (50)* + +The set of functions $\eta_\theta(\mu, \rho)$ from Eqn. (18). Since $\mathbf{R}_n = \sigma_n^2 \mathbf{I}$, one obtains $\zeta_\theta(\mathbf{h}_u, \mathbf{0}) = \zeta_\theta(-\mathbf{h}_u, \mathbf{0}) = 2C_{SNR} \left(M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{x_k}\mathbf{h}_u\right)\right)$, +$$ +\begin{align*} +\zeta_\theta(\mathbf{h}_v, \mathbf{0}) &= \zeta_\theta(-\mathbf{h}_v, \mathbf{0}) = 2C_{SNR} \left( M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}d_{y_k}\mathbf{h}_v\right) \right), && \zeta_\theta(\mathbf{h}_u, -\mathbf{h}_u) = \zeta_\theta(-\mathbf{h}_u, \mathbf{h}_u) = 2C_{SNR} \left( M - \sum_{k=1}^{M} \cos\left(\frac{4\pi}{\lambda}d_{x_k}\mathbf{h}_v\right) \right), \\ +\zeta_\theta(\mathbf{h}_v, -\mathbf{h}_v) &= \zeta_\theta(-\mathbf{h}_v, \mathbf{h}_v) = 2C_{SNR} \left( M - \sum_{k=1}^{M} \cos\left(\frac{4\pi}{\lambda}d_{y_k}\mathbf{h}_v\right) \right), && \zeta_\theta(\mathbf{h}_u, -\mathbf{h}_v) = \zeta_\theta(-\mathbf{h}_u, -\mathbf{h}_v) = \\ +&= 2C_{SNR} \left( M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}(d_{x_k}\mathbf{h}_u - d_{y_k}\mathbf{h}_v)\right) \right), && \zeta_\theta(-\mathbf{h}_v, -\mathbf{h}_u) = \zeta_\theta(\mathbf{h}_u, -\mathbf{h}_v) = \zeta_\theta(-\mathbf{h}_v, -\mathbf{h}_u) = \\ +&= 2C_{SNR} \left( M - \sum_{k=1}^{M} \cos\left(\frac{2\pi}{\lambda}(d_{x_k}\mathbf{h}_u + d_{y_k}\mathbf{h}_v)\right) \right), && \zeta_\theta(\mathbf{h}_u, -\mathbf{h}_u) = \zeta_\theta(-\mathbf{h}_u, -\mathbf{h}_v) = \zeta_\theta(-\mathbf{h}_v, -\mathbf{h}_u) = \\ +&= 0. +\end{align*} +$$ + +Again, since the set of functions $\zeta_\theta(\mu, \rho)$ does not depend on $\theta$, the set of functions $\eta_\theta(\alpha, \beta, u, v)$ is given by plugging the above equations into Eqn. (17) and does not depend on $\theta$. Consequently, as in unconditional case, the set of functions $\eta(\alpha, \beta, u, v)$ is obtained by using the results of Section 4.2 whatever the considered prior on $\theta$. In our case of a uniform prior, the results are straightforward and leads to Eqn. (48), (49) and (50). +---PAGE_BREAK--- + +References + +[1] R. J. McAulay and L. P. Seidman, "A useful form of the Barankin lower bound and its application to PPM threshold analysis," *IEEE Transactions on Information Theory*, vol. 15, no. 2, pp. 273-279, Mar. 1969. + +[2] R. J. McAulay and E. M. Hofstetter, "Barankin bounds on parameter estimation," *IEEE Transactions on Information Theory*, vol. 17, no. 6, pp. 669-676, Nov. 1971. + +[3] E. Chaumette, J. Galy, A. Quinlan, and P. Larzabal, "A new Barankin bound approximation for the prediction of the threshold region performance of maximum likelihood estimators," *IEEE Transactions on Signal Processing*, vol. 56, no. 11, pp. 5319-5333, Nov. 2008. + +[4] K. Todros and J. Tabrikian, "General classes of performance lower bounds for parameter estimation - part I: non-Bayesian bounds for unbiased estimators," *IEEE Transactions on Information Theory*, vol. 56, no. 10, pp. 5045-5063, Oct. 2010. + +[5] H. L. Van Trees and K. L. Bell, Eds., *Bayesian Bounds for Parameter Estimation and Nonlinear Filtering/Tracking*. New-York, NY, USA: Wiley/IEEE Press, Sep. 2007. + +[6] J. Ziv and M. Zakai, "Some lower bounds on signal parameter estimation," *IEEE Transactions on Information Theory*, vol. 15, no. 3, pp. 386-391, May 1969. + +[7] S. Bellini and G. Tartara, "Bounds on error in signal parameter estimation," *IEEE Transactions on Communications*, vol. 22, no. 3, pp. 340-342, Mar. 1974. + +[8] K. L. Bell, Y. Steinberg, Y. Ephraim, and H. L. Van Trees, "Extended Ziv-Zakaï lower bound for vector parameter estimation," *IEEE Transactions on Information Theory*, vol. 43, no. 2, pp. 624-637, Mar. 1997. + +[9] A. J. Weiss and E. Weinstein, "A lower bound on the mean square error in random parameter estimation," *IEEE Transactions on Information Theory*, vol. 31, no. 5, pp. 680-682, Sep. 1985. + +[10] I. Rapoport and Y. Oshman, "Weiss-Weinstein lower bounds for markovian systems. part I: Theory," *IEEE Transactions on Signal Processing*, vol. 55, no. 5, pp. 2016-2030, May 2007. + +[11] A. Renaux, P. Forster, P. Larzabal, C. D. Richmond, and A. Nehorai, "A fresh look at the Bayesian bounds of the Weiss-Weinstein family," *IEEE Transactions on Signal Processing*, vol. 56, no. 11, pp. 5334-5352, Nov. 2008. + +[12] K. Todros and J. Tabrikian, "General classes of performance lower bounds for parameter estimation - part II: Bayesian bounds," *IEEE Transactions on Information Theory*, vol. 56, no. 10, pp. 5064-5082, Oct. 2010. + +[13] Y. Rockah and P. Schultheiss, "Array shape calibration using sources in unknown locations-part I: Far-field sources," *IEEE Transactions on Acoustics, Speech, and Signal Processing*, vol. 35, no. 3, pp. 286-299, Mar. 1987. + +[14] I. Reuven and H. Messer, "A Barankin-type lower bound on the estimation error of a hybrid parameter vector," *IEEE Transactions on Information Theory*, vol. 43, no. 3, pp. 1084-1093, May 1997. + +[15] S. Bay, B. Geller, A. Renaux, J.-P. Barbot, and J.-M. Brossier, "On the hybrid Cramér-Rao bound and its application to dynamical phase estimation," *IEEE Signal Processing Letters*, vol. 15, pp. 453-456, 2008. + +[16] H. L. Van Trees, *Detection, Estimation and Modulation Theory*. New-York, NY, USA: John Wiley & Sons, 1968, vol. 1. + +[17] B. Ottersten, M. Viberg, P. Stoica, and A. Nehorai, "Exact and large sample maximum likelihood techniques for parameter estimation and detection in array processing," in *Radar Array Processing*, S. S. Haykin, J. Litva, and T. J. Shepherd, Eds. Berlin: Springer-Verlag, 1993, ch. 4, pp. 99-151. + +[18] K. L. Bell, Y. Ephraim, and H. L. Van Trees, "Explicit Ziv-Zakaï lower bound for bearing estimation," *IEEE Transactions on Signal Processing*, vol. 44, no. 11, pp. 2810-2824, Nov. 1996. + +[19] T. J. Nohara and S. Haykin, "Application of the Weiss-Weinstein bound to a two dimensional antenna array," *IEEE Transactions on Acoustics, Speech, and Signal Processing*, vol. 36, no. 9, pp. 1533-1534, Sep. 1988. + +[20] H. Nguyen and H. L. Van Trees, "Comparison of performance bounds for DOA estimation," in Proc. of IEEE Workshop on Statistical Signal and Array Processing (SSAP), vol. 1, Jun. 1994, pp. 313-316. +---PAGE_BREAK--- + +[21] F. Athley, "Optimization of element positions for direction finding with sparse arrays," in *Proc. of IEEE Workshop on Statistical Signal Processing (SSP)*, vol. 1, 2001, pp. 516–519. + +[22] W. Xu, A. B. Baggeroer, and C. D. Richmond, "Bayesian bounds for matched-field parameter estimation," *IEEE Transactions on Signal Processing*, vol. 52, no. 12, pp. 3293–3305, Dec. 2004. + +[23] A. Renaux, "Weiss-Weinstein bound for data aided carrier estimation," *IEEE Signal Processing Letters*, vol. 14, no. 4, pp. 283–286, Apr. 2007. + +[24] D. T. Vu, A. Renaux, R. Boyer, and S. Marcos, "Closed-form expression of the Weiss-Weinstein bound for 3D source localization: the conditional case," in *Proc. of IEEE Workshop on Sensor Array and Multi-channel Processing (SAM)*, vol. 1, Kibutz Ma'ale Hahamisha, Israel, Oct. 2010, pp. 125–128. + +[25] S. M. Kay, *Fundamentals of Statistical Signal Processing: Estimation Theory*. Upper Saddle River, NJ, USA: Prentice-Hall, Inc., Mar. 1993, vol. 1. + +[26] H. L. Van Trees, *Detection, Estimation and Modulation theory: Optimum Array Processing*. New-York, NY, USA: John Wiley & Sons, Mar. 2002, vol. 4. + +[27] Z. Ben Haim and Y. Eldar, "A comment on the Weiss-Weinstein bound for constrained parameter sets," *IEEE Transactions on Information Theory*, vol. 54, no. 10, pp. 4682–4684, Oct. 2008. + +[28] P. Stoica and A. Nehorai, "Performances study of conditional and unconditional direction of arrival estimation," *IEEE Transactions on Acoustics, Speech, and Signal Processing*, vol. 38, no. 10, pp. 1783–1795, Oct. 1990. + +[29] K. L. Bell, Y. Ephraim, and H. L. Van Trees, "Explicit Ziv-Zakaï lower bounds for bearing estimation using planar arrays," in *Proc. of Workshop on Adaptive Sensor Array Processing (ASAP)*. Lexington, MA, USA: MIT Lincoln Laboratory, Mar. 1996. + +[30] I. Reuven and H. Messer, "The use of the Barankin bound for determining the threshold SNR in estimating the bearing of a source in the presence of another," in *Proc. of IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)*, vol. 3, Detroit, MI, USA, May 1995, pp. 1645–1648. + +[31] J. Li and R. T. Compton, "Maximum likelihood angle estimation for signals with known waveforms," *IEEE Transactions on Signal Processing*, vol. 41, no. 9, pp. 2850–2862, Sep. 93. + +[32] M. Cedervall and R. L. Moses, "Efficient maximum likelihood DOA estimation for signals with known waveforms in presence of multipath," *IEEE Transactions on Signal Processing*, vol. 45, no. 3, pp. 808–811, Mar. 1997. + +[33] J. Li, B. Halder, P. Stoica, and M. Viberg, "Computationally efficient angle estimation for signals with known waveforms," *IEEE Transactions on Signal Processing*, vol. 43, no. 9, pp. 2154–2163, Sep. 1995. + +[34] E. Weinstein and A. J. Weiss, "A general class of lower bounds in parameter estimation," *IEEE Transactions on Information Theory*, vol. 34, no. 2, pp. 338–342, Mar. 1988. + +[35] P. S. La Rosa, A. Renaux, A. Nehorai, and C. H. Muravchik, "Barankin-type lower bound on multiple change-point estimation," *IEEE Transactions on Signal Processing*, vol. 58, no. 11, pp. 5534–5549, Nov. 2010. + +[36] H. L. Van Trees, *Detection, Estimation and Modulation Theory: Radar-Sonar Signal Processing and Gaussian Signals in Noise*. New-York, NY, USA: John Wiley & Sons, Sep. 2001, vol. 3. + +[37] K. L. Bell, "Performance bounds in parameter estimation with application to bearing estimation," Ph.D. dissertation, George Mason University, Fairfax, VA, USA, 1995. + +[38] W. Xu, A. B. Baggeroer, and K. L. Bell, "A bound on mean-square estimation error with background parameter mismatch," *IEEE Transactions on Information Theory*, vol. 50, no. 4, pp. 621–632, Apr. 2004. + +[39] J. Tabrikian and J. L. Krolik, "Barankin bounds for source localization in an uncertain ocean environment," *IEEE Transactions on Signal Processing*, vol. 47, no. 11, pp. 2917–2927, Nov. 1999. + +[40] H. Gazzah and S. Marcos, "Cramér-Rao bounds for antenna array design," *IEEE Transactions on Signal Processing*, vol. 54, no. 1, pp. 336–345, Jan. 2006. +---PAGE_BREAK--- + +Figure .1: 3D source localization using a planar array antenna. +---PAGE_BREAK--- + +Figure .2: Ziv-Zakai bound, Weiss-Weinstein bound and empirical MSE of the MAP estimator: unconditional case. +---PAGE_BREAK--- + +Figure 3: Weiss-Weinstein bounds of the V-shaped array w.r.t. the opening angle $\Delta$. \ No newline at end of file diff --git a/samples_new/texts_merged/4523932.md b/samples_new/texts_merged/4523932.md new file mode 100644 index 0000000000000000000000000000000000000000..725598f7ec852cf6d132a840b877b7a4d781547a --- /dev/null +++ b/samples_new/texts_merged/4523932.md @@ -0,0 +1,952 @@ + +---PAGE_BREAK--- + +# Differential Evolution - A simple and efficient adaptive scheme for global optimization over continuous spaces + +by Rainer Storn¹) and Kenneth Price²) + +TR-95-012 + +March 1995 + +## Abstract + +A new heuristic approach for minimizing possibly nonlinear and non differentiable continuous space functions is presented. By means of an extensive testbed, which includes the De Jong functions, it will be demonstrated that the new method converges faster and with more certainty than Adaptive Simulated Annealing as well as the Annealed Nelder&Mead approach, both of which have a reputation for being very powerful. The new method requires few control variables, is robust, easy to use and lends itself very well to parallel computation. + +¹) International Computer Science Institute, 1947 Center Street, Berkeley, CA 94704-1198, Suite 600, Fax: 510-643-7684. E-mail: storn@icsi.berkeley.edu. On leave from Siemens AG, ZFE T SN 2, Otto-Hahn-Ring 6, D-81739 Muenchen, Germany. Fax: 01149-636-44577, E-mail:rainer.storn@zfe.siemens.de. + +²) 836 Owl Circle, Vacaville, CA 95687, kprice@solano.community.net. +---PAGE_BREAK--- + +# Introduction + +Problems which involve global optimization over continuous spaces are ubiquitous throughout the scientific community. In general, the task is to optimize certain properties of a system by pertinently choosing the system parameters. For convenience, a system's parameters are usually represented as a vector. The standard approach to an optimization problem begins by designing an objective function that can model the problem's objectives while incorporating any constraints. Especially in the circuit design community, methods are in use which do not need an objective function [1], [2], [3]. Although these methods can make formulating a problem simpler, they are usually inferior to techniques which make full use of an objective function. Consequently, we restrict ourselves to optimization methods which fully use the objective function. In most cases, the objective function is designed to transform the optimization problem into a minimization task. To this end, we will limit our investigation in the following to minimization problems. + +When the objective function is nonlinear and non differentiable, direct search approaches are the methods of choice. The best known of these are the algorithms by Nelder&Mead [4], by Hooke&Jeeves [4], genetic algorithms [5], and evolutionary algorithms [6], [7] with the latter being truly continuous counterparts of genetic algorithms. At the heart of every direct search method is a strategy that generates variations of the parameter vectors. Once a variation is generated, a decision must be made whether or not to accept the newly derived parameters. All basic direct search methods use the greedy criterion to make this decision. Under the greedy criterion, a new parameter vector is accepted if and only if it reduces the value of the objective function. Although the greedy decision process converges fairly fast, it runs the risk of becoming trapped by a local minimum. Inherently parallel search techniques like genetic and evolutionary algorithms have some built-in safeguards to forestall misconvergence. By running several vectors simultaneously, superior parameter configurations can help other vectors escape local minima. Another method which can extricate a parameter vector from a local minimum is Simulated Annealing [8], [9], [10]. Annealing relaxes the greedy criterion by occasionally permitting an uphill move. Such moves potentially allow a parameter vector to climb out of a local minimum. As the number of iterations increases, the probability of accepting an uphill move decreases. In the long run, this leads to the greedy criterion. While all direct search methods lend themselves to annealing, it has mostly been used just for the Random Walk, which itself is the simplest case of an evolutionary algorithm [6]. Nevertheless, attempts have been made to anneal other direct searches like the method of Nelder&Mead [10] and genetic algorithms [8], [11]. + +Users generally demand that a practical optimization technique should fulfill three requirements. First, the method should find the true global minimum, regardless of the initial system parameter values. Second, convergence should be fast. Third, the program should have a minimum of control parameters so that it will be easy to use. In our search for a fast and easy to use "sure fire" technique, we developed a method which is not only astonishingly simple, but also performs extremely well on a wide variety of test problems. It is inherently parallel and hence lends itself to computation via a network of computers or processors. The basic strategy employs the difference of two randomly selected parameter vectors as the source of random variations for a third parameter vector. In the following, we present a more rigorous description of the new optimization method which we call Differential Evolution. +---PAGE_BREAK--- + +**Problem Formulation** + +Consider a system with the real valued properties + +$$g_m; m = 0, 1, 2, \dots, P-1 \tag{1}$$ + +which constitute the objectives of the system to be optimized. + +Additionally, there may be real valued constraints + +$$g_m; m = P, P+1, \dots, P+C-1 \tag{2}$$ + +which describe properties of the system that need not be optimized but neither shall be degraded. For example, one may wish to design a mobile phone with the dual objectives of maximizing the transmission power $g_1$ and minimizing the noise $g_2$ of the audio amplifier while simultaneously keeping the battery life $g_3$ above a certain threshold. The properties $g_1$ and $g_2$ represent objectives to be optimized whereas $g_3$ is a constraint. Let all properties of the system be dependent on the real valued parameters + +$$x_j; j = 0, 1, 2, \dots, D-1. \tag{3}$$ + +In the case of the mobile phone the parameters could be resistor and capacitor values. For most technical systems realizability requires + +$$x_j \in [x_{jI}, x_{jH}] . \tag{4}$$ + +Usually, restrictions on the $x_j$ will be incorporated into the collection $g_m$, $m \ge P$, of constraints. Optimization of the system means to vary the D-dimensional parameter vector + +$$\underline{x} = (x_0, x_1, \dots, x_{D-1})^T \tag{5}$$ + +until the properties $g_m$ are optimized and the constraints $g_m$, $m \ge P$, are met. An optimization task can always be reformulated as the minimization problem + +$$\min f_m(\underline{x}) \tag{6}$$ + +where $f_m(\underline{x})$ represents the function by which the property $g_m$ is calculated and its optimization or constraint preservation is represented as the minimization of $f_m(\underline{x})$. All functions $f_m(\underline{x})$ can be combined into a single objective function $z(\underline{x})$ [2], [12], which usually is computed either via the weighted sum + +$$z(\underline{x}) = \sum_{m=0}^{P+C-1} w_m \cdot f_m(\underline{x}) \tag{7}$$ + +or via + +$$z(\underline{x}) = \max(w_m \cdot f_m(\underline{x})) \tag{8}$$ + +with + +$$w_m > 0. \tag{9}$$ + +The weighting factors $w_m$ are used to define the importance associated with the different objectives and constraints as well as to normalize different physical units. The optimization task can now be restated as + +$$\min z(\underline{x}) \tag{10}$$ + +The min-max formulation (8) and (10) guarantees that all local minima, the Pareto critical points, including the possibly multiple global minima, the Pareto points, can at least theoretically be found [2], [12]. For the objective function (7) and (10) this is true only if the region of realizability of $\underline{x}$ is convex [1], [2], which in general does not apply in most technical problems. +---PAGE_BREAK--- + +# The Method of Differential Evolution + +Differential Evolution (DE) is a novel parallel direct search method which utilizes NP parameter vectors + +$$ \underline{x}_{i,G}, i = 0, 1, 2, \dots, \text{NP-1}. \qquad (11) $$ + +as a population for each generation G. NP doesn't change during the minimization process. The initial population is chosen randomly if nothing is known about the system. As a rule, we will assume a uniform probability distribution for all random decisions unless otherwise stated. In case a preliminary solution is available, the initial population is often generated by adding normally distributed random deviations to the nominal solution $\underline{x}_{\text{nom},0}$. The crucial idea behind DE is a new scheme for generating trial parameter vectors. DE generates new parameter vectors by adding the weighted difference vector between two population members to a third member. If the resulting vector yields a lower objective function value than a predetermined population member, the newly generated vector replaces the vector with which it was compared. The comparison vector can, but need not be part of the generation process mentioned above. In addition the best parameter vector $\underline{x}_{\text{best},G}$ is evaluated for every generation G in order to keep track of the progress that is made during the minimization process. + +Extracting distance and direction information from the population to generate random deviations results in an adaptive scheme with excellent convergence properties. We tried several variants of DE, the two most promising of which we subsequently present in greater detail. + +## Scheme DE1 + +Our first variant of DE works as follows: for each vector $\underline{x}_{i,G}$, $i = 0,1,2,\dots,\text{NP}-1$, a trial vector $\underline{v}$ is generated according to + +$$ \underline{v} = \underline{x}_{r_1,G} + F \cdot (\underline{x}_{r_2,G} - \underline{x}_{r_3,G}), \qquad (12) $$ + +with + +$$ r_1, r_2, r_3 \in [0, \text{NP} - 1], \text{ integer and mutually different, and } F > 0. \qquad (13) $$ + +The integers $r_1, r_2$ and $r_3$ are chosen randomly from the interval $[0, \text{NP}-1]$ and are different from the running index $i$. $F$ is a real and constant factor which controls the amplification of the differential variation $(\underline{x}_{r_2,G} - \underline{x}_{r_3,G})$. Fig. 1 shows a two dimensional example that illustrates the different vectors which play a part in DE1. +---PAGE_BREAK--- + +Fig.1: Two dimensional example of an objective function showing its contour lines and the process for generating **v** in scheme DE1. + +In order to increase the diversity of the parameter vectors, the vector + +$$ \underline{u} = (u_1, u_2, \dots, u_D)^T \qquad (14) $$ + +with + +$$ u_j = \begin{cases} v_j & \text{for } j = \langle n \rangle_D, \langle n+1 \rangle_D, \dots, \langle n+L-1 \rangle_D \\ (x_{i,G})_j & \text{otherwise} \end{cases} \qquad (15) $$ + +is formed where the acute brackets $\langle \rangle_D$ denote the modulo function with modulus D. + +I.e. a certain sequence of the vector elements of $\underline{u}$ are identical to the elements of $\underline{v}$, the other elements of $\underline{u}$ acquire the original values of $x_{i,G}$. Choosing a subgroup of parameters for mutation is similar to a process known as crossover in evolution theory. This idea is illustrated in Fig. 2 for D=7, n=2 and L=3. The starting index *n* in (15) is a randomly chosen integer from the interval [0, D-1]. The integer *L* is drawn from the interval [0, D-1] with the probability Pr(L=v) = (CR)⁰. CR ∈ [0,1] is the crossover probability and constitutes a control variable for the DE1-scheme. The random decisions for both *n* and *L* are made anew for each trial vector *v*. +---PAGE_BREAK--- + +Fig. 2: Illustration of the crossover process for D=7, n=2 and L=3. + +In order to decide whether the new vector **u** shall become a population member of generation G+1, it will be compared to **x**_{i,G}. If vector **u** yields a smaller objective function value than **x**_{i,G}, **x**_{i,G+1} is set to **u**, otherwise the old value **x**_{i,G} is retained. + +## Scheme DE2 + +Basically, scheme DE2 works the same way as DE1 but generates the vector **v** according to + +$$ \underline{v} = \underline{x}_{i,G} + \lambda \cdot (\underline{x}_{best,G} - \underline{x}_{i,G}) + F \cdot (\underline{x}_{r2,G} - \underline{x}_{r3,G}), \quad (16) $$ + +introducing an additional control variable $\lambda$. The idea behind $\lambda$ is to provide a means to enhance the greediness of the scheme by incorporating the current best vector $\underline{x}_{best,G}$. This feature can be useful for non-critical objective functions. Fig. 3 illustrates the vector-generation process defined by (16). The construction of $\underline{u}$ from $\underline{v}$ and $\underline{x}_{i,G}$ as well as the decision process are identical to DE1. +---PAGE_BREAK--- + +Fig.3: Two dimensional example of an objective function showing its contour lines and the process for generating *v* in scheme DE2. + +**Competing minimization methods** + +In order to compare the DE method with other global minimizing strategies, we looked for approaches where the source code is readily available, which are known to be powerful and which are capable of coping with nonlinear and non differentiable functions. Two methods in particular piqued our interest. The first was the annealed version of the Nelder&Mead strategy (ANM) [10] which is appealing because of its adaptive scheme for generating random parameter deviations. When the annealing part is switched off, a fast converging direct search method remains which is especially useful for non-critical objective functions. The basic control variables in ANM are T, the starting temperature, TF, the temperature reduction factor and NV, the number of random variations at a given temperature level. + +The second method of interest was Adaptive Simulated Annealing (ASA) [8] which claims to converge very quickly and to outperform genetic algorithms on the De Jong test suite [9]. Although ASA provides more than a dozen control variables, it turned out that just two of them, TEMPERATURE Ratio SCALE (TRS) and TEMPERATURE_ANNEAL_SCALE (TAS), had significant impact on the minimization process. We will compare both ANM and ASA to DE1 and DE2. During our research we also wrote an annealed version of the Hooke&Jeeves method [5] and tested two Monte Carlo methods [3] one of which used NP parallel vectors and the differential mutation scheme of DE. Although these approaches all worked, they quickly turned out not to be competitive. + +**The Testbed** + +Our function testbed contains the De Jong test functions as presented in [9] plus some additional +functions which present further distinctive difficulties for a global minimizer: +---PAGE_BREAK--- + +1) First De Jong function (sphere) + +$$f_1(\underline{x}) = \sum_{j=0}^{2} x_j^2; \qquad x_j \in [-5.12, 5.12] \tag{17}$$ + +$f_1(\underline{x})$ is considered to be a very simple task for every serious minimization method. The minimum is +$f_1(0) = 0$. + +2) Second De Jong function (Rosenbrock's saddle) + +$$f_2(\underline{x}) = 100 \cdot (x_0^2 - x_1)^2 + (1 - x_0)^2; \qquad x_j \in [-2.048, 2.048] \tag{18}$$ + +Although $f_2(\underline{x})$ has just two parameters, it has the reputation of being a difficult minimization problem. The minimum is $f_2(\underline{1})=0$. + +3) Third De Jong function (step) + +$$f_3(\underline{x}) = 30 + \sum_{j=0}^{4} \lfloor x_j \rfloor; \qquad x_j \in [-5.12, 5.12] \tag{19}$$ + +For $f_3(\underline{x})$ it is necessary to incorporate the constraints imposed on the $x_j$ into the objective function. +We implemented this according to the min-max formulation (8). The minimum is +$f_3(-5-\epsilon)=0$ where $\epsilon \in [0,0.12]$. The step function exhibits many plateaus which pose a considerable +problem for many minimization algorithms. + +4) Modified fourth De Jong function (quartic) + +$$f_4(\underline{x}) = \sum_{j=0}^{29} (x_j^4 \cdot (j+1) + \eta); \qquad x_j \in [-1.28, 1.28] \tag{20}$$ + +This function is designed to test the behavior of a minimization algorithm in the presence of noise. +In the original De Jong function, $\eta$ is a random variable produced by Gaussian noise having the +distribution N(0,1). According to [9], this function appears to be flawed as no definite global +minimum exists. In response to the problem, we followed the suggestion given in [9] and chose $\eta$ to +be a random variable with uniform distribution and bounded by [0,1). In contrast to the original +version of De Jong's quartic function, we also included $\eta$ inside the summation instead of just +adding $\eta$ to the summation result. This change makes $f_4(\underline{x})$ more difficult to minimize. The +functional minimum is $f_4(0) \le 30 \cdot E[\eta] = 15$, where $E[\eta]$ is the expectation of $\eta$. + +5) Fifth De Jong function (Shekel's Foxholes) + +$$f_5(\underline{x}) = \frac{1}{0.002 + \sum_{i=0}^{24} \frac{1}{i + \sum_{j=0}^{1}(x_j - a_{ij})^6}}; \qquad x_j \in [-65.536, 65.536] \tag{21}$$ + +with $a_{i0}=\{-32, -16,0,16,32\}$ for $i = 0,1,2,3,4$ and $a_{i0}=a_i \bmod 5$, $0$ + +as well as $a_{i1}=\{-32, -16,0,16,32\}$ for $i = 0,5,10,15,20$ and $a_{i1}=a_{i+1,k}, 1, k=1,2,3,4$ + +The global minimum for this function is $f_6(-32,-32) \approx 0.998004$. +---PAGE_BREAK--- + +6) Corana's parabola [8], [13] + +$$ +f_6(\underline{x}) = \sum_{j=0}^{3} \begin{cases} 0.15(z_j - 0.05 \cdot \operatorname{sgn}(z_j))^2 \cdot d_j & \text{if } |x_j - z_j| < 0.05 \\ d_j \cdot x_j^2 & \text{otherwise} \end{cases}; x_j \in [-1000, 1000] \quad (22) +$$ + +with +$$ +z_j = \left\lfloor \frac{|x_j|}{0.2} + 0.49999 \right\rfloor \cdot \operatorname{sgn}(x_j) \cdot 0.2 +$$ + +and +$d_j = \{1,1000,10,100\}$ + +$f_6(\underline{x})$ defines a paraboloid whose axes are parallel to the coordinate axes. It is riddled with a set of holes that increase in depth the closer one approaches the origin. Any minimization algorithm that goes strictly downhill will almost always be captured by the holes. The minimum here is $f_6(\underline{x}) = 0$, with $|\underline{x}_j|<0.05$, $j=0,1,2,3$. + +7) Griewangk's function [14] + +$$ +f_7(\underline{x}) = \sum_{j=0}^{9} \frac{x_j^2}{4000} - \prod_{j=0}^{9} \cos\left(\frac{x_j}{\sqrt{j+1}}\right) + 1; \quad x_j \in [-400, 400] \qquad (23) +$$ + +Like test function f₆(x), f₇(x) has many local minima so that it is very difficult to find the true +minimum f₇(0) = 0. + +8) Zimmermann's problem [15] + +$$ +f_8(\underline{x}) = 9 - x_0 - x_1; \qquad x_j > 0, j=1,2 \tag{24} +$$ + +with +$$ +(x_0 - 3)^2 + (x_1 - 2)^2 \leq 16 \tag{25} +$$ + +and +$$ +x_0 \cdot x_1 \leq 14 \tag{26} +$$ + +Finding the minimum $f_8(7,2)=0$ poses a special problem, because the minimum is located at the corner of the constrained region defined by (24), (25) and (26). + +9) Polynomial fitting problem + +$$ +f_9(\underline{x}, z) = \sum_{j=0}^{2k} x_j \cdot z^j, k \text{ integer and } >0, \qquad (27) +$$ + +is a polynomial of degree 2k in z with the coefficients x_j such that + +$$ +f_9(\underline{x}, z) \in [-1,1] \quad \text{for} \quad z \in [-1,1] \tag{28} +$$ + +and +$f_9(\underline{x},z) \ge T_{2k}(1.2)$ for $z = \pm 1.2$ +(29) + +with $T_{2k}(z)$ being a Chebychev Polynomial of degree 2k. The Chebychev Polynomials are defined recursively according to the difference equation $T_{n+1}(z) = 2z \cdot T_n(z) - T_{n-1}(z)$, $n$ integer and $> 0$, with the initial conditions $T_0(z)=1$ and $T_1(z)=z$. The solution to the polynomial fitting problem is, of course, $f_9(\underline{x}, z) = T_{2k}(z)$, a polynomial which oscillates between -1 and 1 when its argument $z$ is between -1 and 1. Outside this "tube" the polynomial rises steeply in direction of high positive ordinate values. The polynomial fitting problem has its roots in electronic filter design [16] and +---PAGE_BREAK--- + +challenges an optimization procedure by forcing it to find parameter values with grossly different magnitudes, something very common in technical systems. In our test suite we employed + +$$T_8(z) = 1 - 32z^2 + 160z^4 - 256z^6 + 128z^8 \quad (30)$$ + +with + +$$T_8(1.2) \approx 72.6606669 \quad (31)$$ + +as well as + +$$T_{16}(z) = 1 - 128z^2 + 2688z^4 - 21504z^6 + 84480z^8 - \\ +180224z^{10} + 212992z^{12} - 131072z^{14} + 32768z^{16} \quad (32)$$ + +with + +$$T_{16}(1.2) \approx 10558.1450229. \quad (33)$$ + +and used the weighted sum (7) of squared errors in order to transform the above constrained optimization problem into an objective function to be minimized. The starting values for the parameters were drawn randomly from the interval [-100,100] for (30), (31) and [-1000,1000] for (32), (33). + +## Test Results + +We tried to optimize each of the four algorithms by experimenting to find the control settings which provided fastest and smoothest convergence. Table I contains our choice of control variable settings for each minimization algorithm and each test function along with the averaged number of function evaluations (nfe) which were required to find the global minimum. + +221671280421161·10-6\n
fi(x)ANMASADE1DE2 (F=1)
TTFNVnfeTRSTASnfeNPFCRnfeNPλCRnfe
10n.a.1951·10-510397100.50.349060.950.5392
20n.a.11061·10-5100001127560.950.574660.950.5615
33000.9920902581·10-7100354100.80.3915200.950.21300
43000.9830-1·10-51004812100.750.52378100.950.22873
530000.99550-1·10-51001379150.90.3735200.950.2828
65·1060.995100-1·10-51003581100.40.2834100.90.21125
7100.9950-1·10-50.1-301.0.3200.990.2
850.955
\tbody>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
fi(x)(x)T(x)TF(x)NV(x)nfe(x)TRS(x)TAS(x)nfe(x)NP(x)F(x)CR(x)nfe(x)F(x)λ(x)CR(x)nfe(x)λ(x) + + + + + + + + + + + +
iTTFNVnfeTAS
TRS
zs-4/3/6/8/9/.../i-4/.../i-6/.../i-8/.../i-9/.../i-.../i-8/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/.../i-.../i-4/.../i-6/.../i-4/...//... +
TAS
TRS
zs=3/s=6/s=9/s=12/s=15/s=.../zs=3/s=6/s=9/s=12/s=15/s=.../zs=3/s=6/s=9/s=12/s=.../zs=3/s=6/s=9/s=.../zs=3/s=6/s=.../zs=3/s=.../zs=.../zs=3/s=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=.../zs=... +
TAS
TRS
zi+3/s+6/s+9/s+12/s+15/s+.../zi+3/s+6/s+9/s+12/s+15/s+.../zi+3/s+6/s+9/s+.../zi+3/s+6/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+.../zi+3/s+... +
TAS
TRS
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/z
s/zbr> + +
+ + + + + + + + + + + + + + + + + + + + + +
f(1)f(2)f(3)f(4)f(5)f(6)f(7)f(8)f(9)f(10)f(11)f(12)f(13)f(14)f(15)f(16)
+

Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".                                                                                                                      A hyphen indicates misconvergence and n.a. stands for "not applicable". 

+ +
+ + + + + + + + + + + + + + + +
f(7)f(8)f(9)f(10)f(11)f(12)f(13)f(14)f(15)f(16)
+ +
+ + + + + + + + + + + + + + + +
f(7)f(8)f(9)f(10)f(11)f(12)f(13)f(14)f(15)f(16)
+ +
+ + + + + + + + + + + + + + + +
f(7)f(8)f(9)f(10)f(11)f(12)f(13)f(14)f(15)f(16)
+ +
+ + + + + + + + + + + + + + + +
f(7)f(8)f(9)f(10)f(11)f(12)f(13)f(14)f(15)f(16)
+ +
+ + + + + + + + + + + + + + + +
f(7)f(8)f(9)f(10)f(11)f(12)f(13)f(14)f(15)f(16)
+ +
+ f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. +
+ +
+ f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. +
+ +
+ f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. +
+ +
+ f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. +
+ +
+ f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. +
+ +
+ f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. + f(i) is the i-th function value. +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \hline + \multicolumn{8}{c}{\textbf{Table I:}} \\ + \hline + \multicolumn{8}{c}{Averaged number of function evaluations (nfe) required for finding the global minimum. A hyphen indicates misconvergence and n.a. stands for "not applicable".} \\ + \hline + \end{tabular} +
+ +
+ \begin{tabular}{@{}l@{}} + \\[-2ex] + \\[-2ex] + \\[-2ex] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-2ex] +\\[-.5em] +\end{tabular} + +\end{document} +---PAGE_BREAK--- + +If the corresponding field for the number of function evaluations contains a hyphen, the global minimum could not be found. If the number is enclosed in parentheses, not all of the test runs provided the global minimum. We executed ten test runs with randomly chosen initial parameter vectors for each test function and each minimization. + +When the global minimum was 0, we defined the minimization task to be completed once the final value was obtained with an accuracy better than $10^{-6}$. For $f_4(x)$, we chose a value less than 15 to indicate the global minimum and a value less than 0.998004 in the case of $f_5(x)$. + +## Conclusion + +The Differential Evolution method (DE) for minimizing continuous space functions has been introduced and shown to be superior to Adaptive Simulated Annealing (ASA) [8] as well as the Annealed Nelder&Mead approach (ANM) [10]. DE was the only technique to converge for all of the functions in our test function suite. For those problems where ASA or ANM could find the minimum, DE usually converged faster, especially in the more difficult cases. Since DE is inherently parallel, a further significant speedup can be obtained if the algorithm is executed on a parallel machine or a network of computers. This is especially true for real world problems where computing the objective function requires a significant amount of time. + +Despite these already promising results, DE is still in its infancy and can most probably be improved. Further research might include a mathematical convergence proof like the one that exists for Simulated Annealing. A theoretically sound analysis to determine why DE converges so well would also be of great interest. Whether or not an annealed version of DE, or the combination of DE with other optimization approaches is of practical use, is still unanswered. Finally, it is important for practical applications to gain more knowledge on how to choose the control variables for DE. +---PAGE_BREAK--- + +References + +1. Brayton, H., Hachtel, G. and Sangiovanni-Vincentelli, A., A Survey of Optimization Techniques for Integrated Circuit Design, Proc. IEEE 69, 1981, pp. 1334 - 1362. + +2. Lueder, E., Optimization of Circuits with a Large Number of Parameters, Archiv f. Elektr. u. Uebertr., Band 44, Heft 2, 1990, pp 131 - 138. + +3. Storn, R., Constrained Optimization, Dr. Dobb's Journal, May 1995, pp. 119 - 123. + +4. Bunday, B.D. and Garside G.R., Optimisation Methods in Pascal, Edward Arnold Publ., 1987. + +5. Goldberg, D.E., Genetic Algorithms in Search, Optimization & Machine Learning, Addison-Wesley, 1989. + +6. Rechenberg, I., Evolutionsstrategie: Optimierung technischer Systeme nach Prinzipien der biologischen Evolution. Frommann-Holzboog, Stuttgart, 1973. + +7. Voigt, H. M., Fuzzy Evolutionary Algorithms, Technical Report TR-92-038 at ICSI, ftp.icsi.berkeley.edu, 1992. + +8. Ingber, L., Simulated Annealing: Practice Versus Theory, J. Mathl. Comput. Modelling, Vol. 18, No. 11, 1993, pp. 29 - 57. + +9. Ingber, L. and Rosen, B., Genetic Algorithms and Very Fast Simulated Annealing: A Comparison, J. Mathl. Comput. Modelling, Vol. 16, No. 11, 1992, pp. 87 - 100. + +10. Press, W.H., Teukolsky, S.A., Vetterling, W.T. and Flannery, B.P., Numerical Recipes in C, Cambridge University Press, 1992. + +11. Price, K., Genetic Annealing, Dr. Dobb's Journal, Oct. 1994, pp. 127 - 132. + +12. Moebus, D., Algorithmen zur Optimierung von Schaltungen und zur Loesung nichtlinearer Differentialgleichungen, Diss. am Inst. fuer Netzwerk- und Systemtheorie der Univ. Stuttgart, 1990. + +13. Corana, A., Marchesi, M., Martini, C. and Ridella, S., Minimizing Multimodal Functions of Continuous Variables with the "Simulated Annealing Algorithm", ACM Trans. Mathl. Software, March 1987, pp. 272 - 280. + +14. Griewangk, A.O., Generalized Descent for Global Optimization, JOTA, vol. 34, 1981, pp. 11 - 39. + +15. Zimmermann, W., Operations Research, Oldenbourg, 1990. + +16. Rabiner, L.R. and Gold, B., Theory and Applications of Digital Signal Processing, Prentice-Hall, Englewood Cliffs, N.J., 1975. \ No newline at end of file diff --git a/samples_new/texts_merged/4753802.md b/samples_new/texts_merged/4753802.md new file mode 100644 index 0000000000000000000000000000000000000000..fe1e8e14c6bd7ef2011b258190d4827c02d84520 --- /dev/null +++ b/samples_new/texts_merged/4753802.md @@ -0,0 +1,646 @@ + +---PAGE_BREAK--- + +On the irreducibility of certain polynomials +with coefficients as products of terms in an +arithmetic progression + +by + +CARRIE E. FINCH (Lexington, VA) and N. SARADHA (Mumbai) + +1. Introduction. In 1929, Schur [10] used prime ideals in algebraic number fields to prove that the Taylor polynomials for the exponential function, with some possible variations in the coefficients, are irreducible. + +THEOREM 1. Let $m$ be a positive integer and let $a_0, \dots, a_m$ be arbitrary integers with $|a_0| = |a_m| = 1$. Then + +$$ (1.1) \qquad a_m \frac{x^m}{m!} + a_{m-1} \frac{x^{m-1}}{(m-1)!} + \dots + a_1 x + a_0 $$ + +is irreducible over the rationals. + +Filaseta [7] used Newton polygons to obtain Schur's result, and also strengthened the result by allowing more possible values for the leading coefficient than just those of absolute value 1. + +THEOREM 2. Let $m$ be a positive integer and let $a_0, \dots, a_m$ be arbitrary integers with $|a_0| = 1$ and $0 < |a_m| < m$. Then the polynomial given in (1.1) is irreducible over the rationals except when + +$$ a_m = \pm 5 \text{ and } m = 6 \quad \text{or} \quad a_m = \pm 7 \text{ and } m = 10. $$ + +Filaseta's use of Newton polygons to demonstrate the irreducibility of the polynomials in (1.1) is based on the theorem of Dumas [3] regarding the construction of the Newton polygon of the product of two polynomials. In particular, Filaseta employs the following useful lemma, from [6]. + +LEMMA 1. Let $k$ and $l$ be integers with $k > l \ge 0$. Suppose $g(x) = \sum_{j=0}^{n} b_j x^j \in \mathbb{Z}[x]$ and $p$ is a prime such that $p \nmid b_n$, $p \mid b_j$ for all $j \in \{0, 1, \dots, n-l-1\}$, and the rightmost edge of the Newton polygon for $g(x)$ + +2010 Mathematics Subject Classification: Primary 12E05; Secondary 12D05. +Key words and phrases: arithmetic progressions, greatest prime factor, irreducibility of polynomials, Newton polygons. +---PAGE_BREAK--- + +with respect to $p$ has slope $< 1/k$. Then for any integers $a_0, a_1, \ldots, a_n$ with +$|a_0| = |a_n| = 1$, the polynomial $f(x) = \sum_{j=0}^n a_j b_j x^j$ cannot have a factor +with degree in the interval $[l+1, k]$. + +From this lemma, we see that using Newton polygons to eliminate the +possibility of factors of a particular degree hinges on finding primes that +divide certain coefficients of the polynomial. To obtain Theorem 2, Filaseta +appeals to a result of Ecklund, Eggleton, Erdős and Selfridge [5] on prime +divisors of binomial coefficients. We refer to [7] for details. Moreover, using +the same tools, Allen and Filaseta [1], [2] proved the following result. + +**THEOREM 3.** Let $m > 1$ and $a_0, \dots, a_m$ denote arbitrary integers with $|a_0| = 1$. + +(i) Suppose $m + 1 = k'2^u$ with $k'$ odd and $(m + 1)m = k''2^v3^w$ with $\text{gcd}(k'', 6) = 1$. Let $0 < |a_m| < \min\{k', k''\}$. Then + +$$ +(1.2) \quad a_m \frac{x^m}{(m+1)!} + a_{m-1} \frac{x^{m-1}}{m!} + \cdots + a_2 \frac{x^2}{2!} + a_1 x + a_0 +$$ + +is irreducible over the rationals. + +(ii) Suppose $0 < |a_m| < 2m - 1$. Then + +$$ +(1.3) \quad a_m \frac{x^{2m}}{1 \cdot 3 \cdots (2m-1)} + a_{m-1} \frac{x^{2m-2}}{1 \cdot 3 \cdots (2m-3)} + \dots \\ +\phantom{(1.3) \quad a_m \frac{x^{2m}}{1 \cdot 3 \cdots (2m-1)} + } + a_2 \frac{x^4}{1 \cdot 3} + a_1 \frac{x^2}{1} + a_0 +$$ + +is irreducible over the rationals. + +We observe that the common thread among the polynomials in (1.1), +(1.2), and (1.3) is that the denominators of the coefficients are products +of integers in an arithmetic progression; in the case of (1.1) and (1.2), we +see an arithmetic progression with initial term 1 and common difference 1, +and in (1.3) we see an arithmetic progression with initial term 1 and com- +mon difference 2. In this paper, we prove analogous results by considering +denominators which are again products of integers in an arithmetic progres- +sion with initial term an odd integer *a* and common difference 2. Let + +$$ +(1.4) \quad f(x) = \frac{x^m}{a(a+2)\cdots(a+2(m-1))} + \cdots + \frac{x^2}{a(a+2)} + \frac{x}{a} + 1, +$$ + +$$ +(1.5) \quad g(x) = a_m \frac{x^m}{a(a+2)\cdots(a+2(m-1))} \\ +\qquad + a_{m-1} \frac{x^{m-1}}{a(a+2)\cdots(a+2(m-2))} + \cdots + a_2 \frac{x^2}{a(a+2)} + a_1 \frac{x}{a} + a_0. +$$ + +Letting $P(n)$ denote the greatest prime factor of the positive integer $n$ +(putting $P(1) = 1$), we prove the following results. +---PAGE_BREAK--- + +**THEOREM 4.** Let $a \ge 1$ be an odd integer, $\max\{a, 110\} \le k \le m/2$ and $a_0, \dots, a_m$ be arbitrary integers with $P(a_0a_m) \le 2k+a$. Then $f(x)$ and $g(x)$ do not have a factor of degree $k$. + +Now we restrict to $1 \le a < 29$. We have + +**THEOREM 5.** Let $a$ be an odd integer with $1 \le a < 29$, and let $m > 1$ and $a_0, \dots, a_m$ be arbitrary integers with $P(a_0a_m) \le a + 4$. Then + +(i) $f(x)$ has no factor of degree $\ge 2$, + +(ii) $g(x)$ has no factor of degree $\ge 3$, and + +(iii) $g(x)$ has no factor of degree 2 except perhaps when + +$$ (a,m) \in \{(21,4), (19,59), (5,121), (19,114), (21,113), (21,163), (21,554)\}. $$ + +Further if there exists a prime $p \ge a+2$ dividing $a+2(m-1)$, then $f(x)$ has no linear factor. Also if such a prime $p$ does not divide $a_0a_m$, then $g(x)$ has no linear factor. + +When $(a, m) = (21, 4)$, by choosing $a_0 = a_4 = 1$, $a_1 = -15$, $a_2 = -140$, $a_3 = 35$, we see that + +$$ g(x) = \frac{x^4}{21 \cdot 23 \cdot 25 \cdot 27} + \frac{35x^3}{21 \cdot 23 \cdot 25} - \frac{140x^2}{21 \cdot 23} - \frac{15x}{21} + 1 $$ + +$$ = \frac{1}{326025} (x^2 - 90x - 315)(x^2 + 1035x - 1035). $$ + +We thank the referee for providing this example. As in the proofs of Theorems 2 and 3, our method also depends on the use of Dumas' theorem on Newton polygons for the irreducibility of polynomials. On the other hand, we do not use results from Ecklund, Eggleton, Erdős, and Selfridge [5]. Instead, we establish a Sylvester type result on the greatest prime factor of a product of several consecutive terms in an arithmetic progression. This result is of interest independent of its application to establish Theorems 4 and 5. We show for instance that for any $k \ge 2$, + +$$ P(n(n+2)\cdots(n+2(k-1))) > 2k+a $$ + +if $n \ge 2k+a$, where $n$ is odd and $a$ is a positive odd integer less than 29, except for an explicitly given set of values of $(n,k,a)$. This result depends on a result of Lehmer [8] and several computations. The above assertion is also true for any odd $a$ provided $k$ is large; see Lemma 5. As an application of Theorem 5 we give another criterion for the irreducibility of (1.3). + +**COROLLARY 6.** Let $P(a_0a_m) \le 5$. Suppose there exists a prime $p$ such that + +$$ p \mid (2m-1) \quad \text{and} \quad p \nmid a_0a_m. $$ + +Then the polynomial given in (1.3) is irreducible over the rationals. +---PAGE_BREAK--- + +Let $a$ be an even integer equal to $2b$, say. Then $f(x)$ and $g(x)$ can be +transformed into a polynomial of the form + +$$ +a'_{m} \frac{x^m}{b(b+1)\cdots(b+m-1)} + \cdots + a'_{1} \frac{x}{b} + a'_{0} +$$ + +with $a'_m = 1, a'_0, \dots, a'_{m-1}$ integers. The case $b=1$ and $|a'_0| = 1$ is Schur's +polynomial given in (1.1). For some results on the factors of such polynomials, +we refer to [11]. We will not deal with this case in the present paper. + +The remainder of this paper is organized as follows. In Section 2, we prove the result about the greatest prime factor of a product of consecutive terms in arithmetic progression. In Section 3, we use Newton polygons to exclude some factors of the polynomials in question. In particular cases, all factors of degree ≥ 2 are excluded. Theorem 4 comes out as a consequence of Lemmas 5 and 11. In Section 4 we discuss linear factors and prove Theorem 5 from Theorems 8 and 9. + +**2. Greatest prime factor of a product of integers in arithmetic progression.** The letters $n, d, k$ denote positive integers with $\gcd(n, d) = 1$. Set $\Delta = n(n+d)\cdots(n+(k-1)d)$. Let $\pi(n)$ denote the number of primes $\le n$ and $\pi_d(n)$ be the number of primes $\le n$ that are co-prime to $d$. Let $\nu_p(n)$ denote the power of the prime $p$ in $n$ and $p_i$ denote the $i$th prime. In this section, we obtain lower bounds on $P(\Delta)$, and conclude the section with Theorem 8, a particular result for $P(\Delta)$ when $d=2$. + +We state without proof our first lemma. See [9] for details. + +LEMMA 2. For $0 \le i < k$, suppose $P(n + id) \le c_0k$. Let $S = \{n, n + d, \dots, n + (k-1)d\}$. For every prime $p \le c_0k$ with $p \nmid d$, choose $n + ipd \in S$ such that $p$ does not appear to a higher power in the factorization of any other element of $S$. Let $S_1$ be the subset of $S$ obtained by deleting from $S$ all $n + ipd$ with $p \le c_0k$ and $p \nmid d$. Then + +$$ +\prod_{n+id \in S_1} (n+id) \le (k-1)! \prod_{p|d} p^{-\nu_p((k-1)!)}. +$$ + +When $d=1$, the product on the right hand side is taken as 1. In the next +lemma, inequality (i) is an easy consequence of the formula of Legendre on +the $\nu_p((k-1)!)$. The estimate for $\pi(x)$ in (ii) is due to Dusart [4]. + +LEMMA 3. + +(i) $\nu_p((k-1)!) \ge \frac{k-p}{p-1} - \frac{\log(k-1)}{\log p}$. + +(ii) $\pi(x) \le \frac{x}{\log x} \left( 1 + \frac{1.2762}{\log x} \right)$ for $x > 1$. +---PAGE_BREAK--- + +LEMMA 4. Let $k \ge 2$, $c_0 > 1$, $c_1 > 0$, $d \ge 1$ and $k - \pi_d(c_0 k) \ge 1$. Suppose $n \ge c_1 k d$ and $P(\Delta) \le c_0 k$. Then + +$$ (2.1) \qquad (c_1 d)^{k-\pi_d(c_0 k)} \le k^{\pi_d(c_0 k)} \prod_{p|d} p^{-\nu_p((k-1)!)}. $$ + +*Proof.* Observe that $\Delta$ is not divisible by primes dividing $d$ and that every prime $> k$ may divide only one term of $\Delta$. Hence there are at least $k - \pi_d(c_0k) + \pi_d(k)$ terms which are divisible only by primes $\le k$. By deleting a term in which a prime $p \le k$, $p\nmid d$ appears to the maximum power, using the notation from Lemma 2, we see that $|S_1| \ge k - \pi_d(c_0k) \ge 1$. We set $t := |S_1| - 1$. We arrange the elements of $S_1$ as + +$$ n + i_0 d < n + i_1 d < \dots < n + i_t d. $$ + +Then by Lemma 2, + +$$ \prod_{v=0}^{t} (n + i_v d) \le (k-1)! \prod_{p|d} p^{-\nu_p((k-1)!)}. $$ + +This gives + +$$ n^{k-\pi_d(c_0k)} \le n(n+d)\cdots(n+(k-\pi_d(c_0k)-1)d) \le (k-1)! \prod_{p|d} p^{-\nu_p((k-1)!)}. $$ + +Since $n \ge c_1kd$, we get + +$$ (c_1 d)^{k-\pi_d(c_0 k)} k^{k-\pi_d(c_0 k)} \le k^k \prod_{p|d} p^{-\nu_p((k-1)!)}, $$ + +which gives the assertion of the lemma. ■ + +Putting together the inequalities from Lemma 3 with the result in Lemma 4 and observing that $\pi_d(c_0k) \le \pi(c_0k)$, we obtain the following result. + +COROLLARY 7. Let $k \ge 2$, $c_0 > 1$, $c_1 > 0$, $d = p$ prime and $k - \pi_p(c_0k) \ge 1$. Suppose that $n \ge c_1kp$ and $P(\Delta) \le c_0k$. Let + +$$ f(k,p) = \begin{cases} 0 & \text{if } p \ge k, \\ \displaystyle \frac{1}{p-1} - \frac{p}{k(p-1)} - \frac{\log(k-1)}{k \log p} & \text{otherwise.} \end{cases} $$ + +Then + +$$ p \le \exp \left[ \frac{c_0 + \frac{1.2762c_0}{\log c_0 k} - \left(1 - \frac{c_0}{\log c_0 k} - \frac{1.2762c_0}{(\log c_0 k)^2}\right) \log c_1}{1 - \frac{c_0}{\log c_0 k} - \frac{1.2762c_0}{(\log c_0 k)^2} + f(k,p)} \right]. $$ +---PAGE_BREAK--- + +For the rest of this section, we restrict our attention to arithmetic progressions with common difference $d = 2$. We denote + +$$ \Delta_2 = n(n+2)\cdots(n+2(k-1)) \quad \text{with } n \text{ odd.} $$ + +Before we state the next lemma, we note that Allen and Filaseta [1] showed that for every $n \ge 213$, there exists a prime $p \in (n, 1.05n]$. We will use this result in the next lemma. + +**LEMMA 5.** Let $a \ge 1$ be an odd integer, $k \ge \max\{a, 110\}$ and $n \ge 2k+a$. Then $P(\Delta_2) > 2k+a$. + +*Proof.* Suppose $P(\Delta_2) \le 2k+a$. First assume that $n \ge 40(k-1)$. Note that $2+a/k \le 3$ since $a \le k$. We apply Corollary 7 with $c_0 = 3$ and $c_1 = 19.5$. Note that $\pi(c_0k) < k$ since $k \ge 110$. We find that the right hand side of the inequality in Corollary 7 is a decreasing function of $k$ since each term involving $k$ is a decreasing function of $k$. Hence if the inequality is not valid for some $k=k_0$, then it is not valid for any $k > k_0$. We check that the inequality is not valid for $k_0=110$. This proves the assertion of the lemma for $n \ge 40(k-1)$. + +Next we assume that $n < 40(k-1)$. Note that $n \ge 213$. Then there exists a prime $p$ in $\{n+2, \dots, n+2(k-1)\}$ since the interval $(n, 1.05n]$ is contained in $(n, n+2(k-1)]$ as $n < 40(k-1)$. Further this prime exceeds $n \ge 2k+a$, by assumption. Thus $P(\Delta_2) > 2k+a$. $\blacksquare$ + +Now we restrict to odd $a < 29$. + +**LEMMA 6.** Let $1 \le a < 29$, *a odd*, $k \ge 31$ and $n \ge 2k+a$. Then $P(\Delta_2) > 2k + 29$. + +*Proof.* Suppose $P(\Delta_2) \le 2k+29$. We follow the argument as in Lemma 5. First let $n \ge 40(k-1)$. We apply Corollary 7 with $c_0 = 2+29/k$ and $c_1 = 19.5$. We check that the inequality in Corollary 7 is not valid for $k_0 = 100$. Thus we may assume that $k \le 99$. Now we check that the inequality (2.1) with actual values of the $\pi$-function is invalid for all $31 \le k \le 99$. + +Next we assume that $213 \le n < 40(k-1)$. Since now $(n, 1.05n] \subset (n, n+2(k-1)]$, there exists a prime $\ge 2k+a$ dividing $\Delta_2$. Hence we may assume that $n < 213$. Then we need only consider $2k+a \le n < 213$ with $n$ odd. For these finitely many values of $n$ and $k$, we check directly that the assertion of the lemma is true. $\blacksquare$ + +Let $T$ be the set of all integers $M \ge 1$ with $P(M(M+2)) \le 31$. Table 1 below shows 101 such integers put in groups according to the largest prime factor of $M(M+2)$. It follows from Lehmer's work [8] that if $n > 1$ is an +---PAGE_BREAK--- + +integer, then $P(n(n+2)) \ge 37$ except when $n = M$ with $M$ given by Table 1. +Thus Table 1 gives all the integers of $T$. + +**Table 1** + +
pIntegers M with P(M(M + 2)) = p
31
53, 25
75, 7, 243
119, 33, 75
1311, 13, 63, 273, 845, 1573
1715, 49, 117, 119, 187, 1375
1917, 19, 55, 133, 169, 245, 323, 361, 625, 663, 1615, 3211, 3969
2321, 23, 115, 207, 253, 297, 343, 1125, 1309, 2185, 2275, 2873, 3703, 6875, 8073, 9315, 18513, 41743, 57475, 1128125, 1447873
2927, 85, 143, 145, 375, 435, 493, 665, 2871, 8379, 9945, 12673, 14875, 16443, 24563, 41325, 45617, 87723, 184875
3129, 31, 91, 93, 153, 341, 403, 525, 527, 713, 897, 1083, 1519, 1953, 2695, 3625, 4123, 5423, 7161, 19435, 22475, 86273, 130975, 203203, 2509045, 3322053, 287080365
+ +The next three lemmas deal with the complementary case of Lemma 6 when $k \le 30$. + +LEMMA 7. Let $2 \le k \le 30$. Suppose no $M \in T$ is of the form $n + 4j$ for any $j$ with $0 \le j \le (k-2)/2$. Then $P(\Delta_2) \ge 2k + 29$. + +*Proof.* We divide the integers $n, n + 2, \dots, n + 2(k-1)$ into pairs + +$$ (2.2) \qquad (n, n+2), (n+4, n+6), \dots $$ + +Note that there are at least $[k/2]$ pairs. By hypothesis, none of these pairs coincides with $(M, M+2)$ for any $M \in T$. Then the product of integers in each pair in (2.2) has a prime factor $\ge 37$. Since these integers are in a block of length at most 30, we see that each pair in (2.2) must have a distinct prime $\ge 37$ dividing their product. Thus $\Delta_2$ is divisible by at least $[k/2]$ primes $\ge 37$. Hence + +$$ P(\Delta_2) \ge p_{\lfloor k/2 \rfloor+11}. $$ + +We check that $p_{\lfloor k/2 \rfloor+11} \ge 2k+29$ for $2 \le k \le 30$, which completes the proof of the lemma. $\blacksquare$ + +LEMMA 8. Let $3 \le k \le 30$ and $n \ge 2k+29$. Then $P(\Delta_2) \ge 2k+29$ except when $(n,k)$ is one of the following ten pairs: + +(91, 3), (115, 3), (115, 4), (117, 3), (143, 3), (243, 3), (341, 3), +(525, 3), (663, 3), (2871, 3). +---PAGE_BREAK--- + +*Proof.* By Lemma 7, we need only consider $n$ such that + +$$M = n + 4j \quad \text{for some } M \in T \text{ and some } j \text{ with } 0 \le j \le (k-2)/2.$$ + +Then we find that + +$$ (2.3) \qquad 2k+29 \le n \le M \le n+2(k-2). $$ + +Let $p$ be the largest prime $\le M$ and $q$ the smallest prime $\ge \max\{2k+29, M\}$. If $p \ge n$, then (2.3) implies that $p$ divides $\Delta_2$. If $p < n$ and $q \le p+2k$, then $q$ divides $\Delta_2$. Thus for any $k \ge (q-p)/2$, the product $\Delta_2$ is divisible by either $p$ or $q$. So the assertion of the lemma is true provided $p \ge 2k+29$. Thus we may assume that either + +$$ k < \frac{q-p}{2} \quad \text{or} \quad k > \frac{p-29}{2}. $$ + +Combining with (2.3), we have + +$$ (2.4) \qquad \begin{array}{l} \max\{2k+29, M-2(k-2)\} \le n \le M, \\ k < \min\left\{31, \frac{q-p}{2}\right\} \quad \text{or} \quad \frac{p-29}{2} < k \le 30. \end{array} $$ + +Thus for each $M \in T$, we check for the finitely many values of $(n,k)$ in (2.4) whether $P(\Delta) \ge 2k+29$. We illustrate the above procedure with an example. Let $M = 243$. Then $p=241$ and $q=251$. Hence + +$$ k < 5 \quad \text{and} \quad 239 \le n \le 243, \quad n \text{ odd.} $$ + +In these cases we check directly that $P(\Delta) \ge 2k+29$, the only exception being $(n,k) = (243,3)$. By the above procedure we find only the 10 exceptions listed in the statement of the lemma. $\blacksquare$ + +Finally we show + +LEMMA 9. Let $3 \le k \le 30$ and $n \ge 2k+a$ with $1 \le a < 29$, $a$ odd. +Assume that $\Delta_2$ is not equal to any of the ten products in Lemma 8. Then + +$$ P(\Delta_2) > 2k + a $$ + +except when $(n,k,a) \in \{(23,3,17), (31,3,25)\}$. + +*Proof.* By Lemmas 7 and 8, we need to check the assertion only when + +$$ 2k + a \le n < 2k + 29, \quad 1 \le a < 29, \quad a \text{ odd}, \quad 3 \le k \le 30, $$ + +which is done by direct computation. $\blacksquare$ + +For each odd $a \in [1,27]$, let $T(a)$ be the set of $M \in T$ for which +$P(M(M+2)) \le a + 4 \le M$. For example, when $a = 1$, $T(a) = \{25\}$; +when $a = 3$, $T(a) = \{7,25,243\}$. Let $k$ be given and $n \ge 2k + a$ with +---PAGE_BREAK--- + +$1 \le a < 29$, $a$ odd. We denote by $a^*$ the smallest $a$ such that + +$$P(\Delta_2) \le 2k + a.$$ + +With the above notation, we combine Lemmas 6 and 9 to obtain the following theorem. + +**THEOREM 8.** Let $k \ge 2$ and $n \ge 2k+a$ with $1 \le a < 29$, $a$ odd. Then + +$$ (2.5) \qquad P(\Delta_2) > 2k + a $$ + +except for the following values of $n, k$ and $a$: + +$$ k = 2, 1 \le a < 29 \text{ with } n \in T(a), $$ + +$$ k = 3, (n, a) = (23, 17), (31, 25), $$ + +$$ (2.6) \qquad \begin{aligned} k = 3, (n, a^*) &= (91, 25), (115, 17), (117, 11), (143, 23), (243, 13), \\ & \phantom{k=3,} (341, 25), (525, 25), (663, 23), (2871, 23), \end{aligned} $$ + +$$ k = 4, (n, a^*) = (115, 15). $$ + +*Proof.* Let $k \ge 3$. Suppose $(n, k, a) \in \{(23, 3, 17), (31, 3, 25)\}$. Then $P(\Delta_2) \le 2k+a$ and these exceptions are listed in (2.6). Now assume that $(n, k, a) \notin \{(23, 3, 17), (31, 3, 25)\}$. Then by Lemmas 6 and 9 we find that $P(\Delta_2) > 2k+a$ except possibly when $(n,k)$ equals any of the ten pairs in Lemma 8. Let us take $(n,k) = (91,3)$. Then $P(\Delta_2) = 31 > 2k+a$ except when $a=25,27$. Thus $a^*=25$. Similarly $a^*$ for other pairs in Lemma 8 are found and listed in (2.6). + +Now we take $k=2$. Then + +$$ P(n(n+2)) \ge 37 > 2k+a $$ + +for all $n$ except those $n=M$ listed in $T$. For any given odd $a$, $1 \le a < 29$, by our notation $T(a)$ denotes the values of $n \ge 2k+a$ for which (2.5) does not hold. Hence $T(a)$ gives the set of exceptional values of $n$. This proves the theorem. $\blacksquare$ + +**3. Newton polygons.** As mentioned in the Introduction, a result of Dumas [3], from 1906, led Filaseta [6] to Lemma 1. Filaseta also remarks in [6] that this lemma may be strengthened by only requiring that $p$ not divide $a_0a_m$ in place of the condition that $|a_0| = |a_m| = 1$; we make use of this stronger version of the lemma here. In [11], Shorey and Tijdeman gave a refinement of Lemma 1 using the notion of Newton function. Let $f$ be any polynomial of degree $n$ in $\mathbb{Z}[x]$. The *Newton function* $Nf_p(x)$ with respect to a prime $p$ is a real valued function on the interval $[0,n]$ which has the Newton polygon of $f$ with respect to $p$ as its graph. We shall give below a slightly modified version of their lemma. +---PAGE_BREAK--- + +LEMMA 10. Let $k$ and $l$ be integers with $k > l \ge 0$. Suppose $u(x) = \sum_{j=0}^n b_j x^j \in \mathbb{Z}[x]$ and $p$ is a prime such that $p \nmid b_n$ and $p | b_j$ for all $j \in \{0, 1, \dots, n-l-1\}$. Let $a_0, a_1, \dots, a_n$ be integers with $p \nmid a_0 a_n$. Put $v(x) = \sum_{j=0}^n a_j b_j x^j$. Then for any factor $h(x)$ of $v(x)$ having degree $k > l$, we have + +$$Nu_p(y) \le Nh_p(y), \quad Nu_p(n) - Nu_p(n-k+y) \ge Nh_p(k) - Nh_p(y)$$ + +for any $y \in [0, k]$ where $Nh_p(k)$ is a positive integer. + +*Proof.* Suppose + +$$v(x) = h(x)w(x)$$ + +with $\deg h(x) = k > l$. If the leftmost edge of the Newton polygon of $v(x)$ is of slope 0, then its $x$-length is $\le l$. Since $h(x)$ is of degree $k > l$, the Newton polygon of $h(x)$ has at least one edge of non-zero slope. Thus $Nh_p(k)$ is a positive integer. + +From the hypothesis it is clear that + +$$ (3.1) \qquad \begin{cases} Nu_p(0) = Nv_p(0) = 0, & Nu_p(n) = Nv_p(n), \\ Nu_p(x) \le Nv_p(x) & \text{for } x \in (0, n). \end{cases} $$ + +Further, by Dumas' theorem, we have + +$$ (3.2) \qquad \begin{cases} Nv_p(y) \le Nh_p(y) & \text{for } y \in [0, k], \\ Nv_p(n) - Nv_p(n-k+y) \le Nu_p(n) - Nu_p(n-k+y) & \text{for } y \in [0, k]. \end{cases} $$ + +Let us translate parallel to *xy*-axes the Newton polygon of *h* defined on the interval $[0, k]$ so that the point $(k, Nh_p(k))$ coincides with $(n, Nv_p(n))$. Then the origin is shifted to $(n-k, Nv_p(n)-Nh_p(k))$ and any $(x, Nh_p(x))$ for $x \in [0, k]$ goes to $(n-k+x, Nh_p(x)+Nv_p(n)-Nh_p(k))$. Thus the shifted Newton polygon of *h* goes from $(n-k, Nv_p(n)-Nh_p(k))$ to $(n, Nv_p(n))$ and it lies on or above the Newton polygon of *v* in the interval $[n-k, n]$, by Dumas' theorem. Hence for any $x \in [0, k]$, we have + +$$Nh_p(x) + Nv_p(n) - Nh_p(k) \ge Nv_p(n-k+x)$$ + +or + +$$Nv_p(n) - Nv_p(n-k+x) \ge Nh_p(k) - Nh_p(x).$$ + +Thus from (3.1) and (3.2) we get the assertion of the lemma. $\blacksquare$ + +Note that Lemma 1 follows from the above lemma, since, when the last edge of the Newton polygon of *g* has slope $< 1/k$, then $Ng_p(n) - Ng_p(n-k) < 1$, by taking $u = g$ in the above lemma, from which we get $Nh_p(k) = 0$, a contradiction. + +LEMMA 11. Let $a, m$ and $k$ be positive integers with an odd and $k \le m/2$. Assume that there exists a prime $p > 2k + a$ dividing + +$$(a+2(m-k))\cdots(a+2(m-1)).$$ +---PAGE_BREAK--- + +Then the polynomial + +$$F(x) = x^m + (a + 2(m-1))x^{m-1} + \cdots + (a + 2(m-1))(a+2(m-2))\cdots a$$ + +has no factor of degree $k$. Also the polynomial + +$$G(x) = a_m x^m + a_{m-1}(a+2(m-1))x^{m-1} + \cdots + a_0(a+2(m-1))(a+2(m-2))\cdots a$$ + +has no factor of degree $k$, where $a_0, \dots, a_m$ are integers with $p \nmid a_0 a_m$. + +*Proof.* Write $F(x) = x^m + c_{m-1}x^{m-1} + \cdots + c_1x + c_0$ and $G(x) = a_m x^m + a_{m-1}c_{m-1}x^{m-1} + \cdots + a_1c_1x + a_0c_0$, where + +(3.3) $\quad c_j = (a+2j)\cdots(a+2(m-1))$ for $0 \le j < m$ and $c_m = 1$. + +By assumption, there exists a prime $p > 2k + a$ dividing $c_0, c_1, \ldots, c_{m-k}$. +From Lemma 1 we see that it suffices to show that the slope of the rightmost +edge of the Newton polygon of $F(x)$ with respect to the prime $p$ is $< 1/k$. +The slope of the rightmost edge is + +$$ (3.4) \qquad L = \max_{1 \le j \le m} \left\{ \frac{\nu_p(c_0) - \nu_p(c_j)}{j} \right\}. $$ + +Therefore $L$ equals + +$$ \nu_p(a(a+2)\cdots(a+2(j-1))/j \quad \text{for some } 1 \le j \le m. $$ + +Let $j \le k$. Since $p > 2k + a$, we see that + +$$ \nu_p(a(a+2)\cdots(a+2(j-1))/j = 0 < 1/k. $$ + +Let $j > k$. Since $p \ge 2k + a + 2$, we have + +$$ \nu_p(a(a+2)\cdots(a+2(j-1))) \le \nu_p((a+2j)!) < \frac{a+2j}{p-1} \le \frac{a+2j}{a+1+2k} < \frac{j}{k}. $$ + +Thus, $L < 1/k$, as desired, completing the proof. $\blacksquare$ + +*Proof of Theorem 4.* The assertion is a direct consequence of Lemmas 5 and 11. $\blacksquare$ + +We combine Theorem 8 and Lemma 11 to obtain the following theorem. + +**THEOREM 9.** Let $a \ge 1$ be an odd integer less than 29, and let $m$ be any integer $\ge 2$. Assume that $2m \ne n + 2k - a$ for any $(n,k,a)$ given by (2.6). Then the polynomial $F(x)$ in Lemma 11 has no factor of degree $\ge 2$. Further the polynomial $G(x)$ also has no factor of degree $\ge 2$ provided $a_0$ and $a_m$ are composed of primes $\le a + 4$. + +**4. Linear factors of $F(x)$ and $G(x)$.** In this section we deal with the linear factors of $F(x)$ and $G(x)$. Again using Lemma 11, we show +---PAGE_BREAK--- + +LEMMA 12. Let $a \ge 1$ be an odd integer and let $m \ge 2$. Assume that there exists a prime $p$ such that + +$$p \nmid a, \quad p \mid (a+2(m-1)), \quad p^{1-1/(p-1)} \ge (a+2)^{1/2}.$$ + +Then the polynomial $F(x)$ has no linear factor. Also the polynomial $G(x)$ +has no linear factor if such a prime $p$ does not divide $a_0a_m$. + +*Proof*. Since *p* divides *a* + 2(*m* − 1), we see that *p* | *c**j* for 0 ≤ *j* < *m* where c_j is given by (3.3). Further we require that L < 1 where L is given by (3.4). Thus we need + +$$ +(4.1) \qquad \nu_p(a(a+2)\cdots(a+2(j-1))) < j \quad \text{for } 1 \le j < m. +$$ + +Note that $\nu_p(a) = 0$. Hence we may assume that $j > 1$. Also we may suppose +that $p \le a + 2(j-1)$, otherwise (4.1) holds since the left hand side of the +inequality is 0. Suppose $p^\alpha \le a + 2(j-1) < p^{\alpha+1}$. Then by taking blocks of +$p, p^2, \dots, p^\alpha$ successive terms we see that + +$$ +\begin{align*} +\nu_p(a(a+2)\cdots(a+2(j-1))) &\le \left(\left\lfloor \frac{j}{p} \right\rfloor + 1\right) + \cdots + \left(\left\lfloor \frac{j}{p^\alpha} \right\rfloor + 1\right) \\ +&< \frac{j}{p-1} + \alpha \le \frac{j}{p-1} + \frac{\log(a+2(j-1))}{\log p}. +\end{align*} +$$ + +Thus (4.1) is valid if + +$$ +p^{j(1-\frac{1}{p-1})} \geq a + 2(j-1), +$$ + +which is true for $j \ge 2$ by the assumption on $p$. Now the result follows by +Lemma 1. $\blacksquare$ + +**5. Proof of Theorem 5.** Suppose $g(x)$ has a factor of degree $k$ with $2 \le k \le m/2$. By Theorem 9 we need only consider the exceptional values given in (2.6) with $n = M = a+2(m-k) \ge a+2k$. To exclude these cases, we use Lemma 1. This requires finding a suitable prime so that + +$$ +(5.1) \qquad p \mid c_0, \dots, c_{m-k} \quad \text{and} \quad L < 1/k +$$ + +where *L* is given by (3.4). For this, we use the following procedure. + +Let $p$ be a prime such that + +$$ +(5.2) \qquad p | c_{m-k} +$$ + +and $p \mid (a + 2(m - \delta))$ with $\delta \ge 1$ chosen as small as possible. + +Suppose $a+2h$ is the least integer in $\{a, a+2, \dots, a+2(m-1)\}$ divisible by $p$. Let $\nu_p(a+2(h+ip)) = \theta_i$ for $0 \le i \le r = (m-\delta-h)/p$. Let + +$$ +L' = \max \left\{ \frac{\theta_0}{h+1}, \frac{\theta_0 + \theta_1}{h+p+1}, \dots, \frac{\theta_0 + \dots + \theta_r}{h+rp+1} \right\}. +$$ +---PAGE_BREAK--- + +Then we see that + +$$ +\begin{align*} +\nu_p(c_0) &= \dots = \nu_p(c_h), \\ +\nu_p(c_{h+1}) &= \dots = \nu_p(c_{h+p}) = \nu_p(c_0) - \theta_0, \\ +&\vdots \\ +\nu_p(c_{h+(r-1)p+1}) &= \dots = \nu_p(c_{h+rp}) = \nu_p(c_0) - \theta_0 - \dots - \theta_{r-1}, \\ +\nu_p(c_{h+rp+1}) &= \dots = \nu_p(c_{m-1}) = \nu_p(c_0) - \theta_0 - \dots - \theta_r. +\end{align*} +$$ + +Since $\theta_r > 0$, we have $L' > 0$. Thus by definition, $L = L'$. + +Assume that + +$$ +(5.3) \quad \frac{\theta_0}{h+1} < \frac{1}{k} \quad \text{and} \quad \theta_i < \frac{p}{k} \quad \text{for } 1 \le i \le r. +$$ + +Then + +$$ +\begin{align*} +\frac{\theta_0 + \theta_1}{h+p+1} &= \frac{\theta_0}{h+1} \frac{h+1}{h+p+1} + \frac{\theta_1}{h+p+1} \\ +&< \frac{h+1}{k(h+p+1)} + \frac{p}{k(h+p+1)} = \frac{1}{k} +\end{align*} +$$ + +and by induction, we see that + +$$ +\frac{\theta_0 + \cdots + \theta_s}{h + sp + 1} < \frac{1}{k} \quad \text{for } 1 < s \le r. +$$ + +Thus $L < 1/k$, which is required in (5.1). Thus we need only satisfy (5.3). +Since + +$$ +p^{\theta_i} \le a + 2(h + ip) \le a + 2(m - 1) = M + 2(k - 1), +$$ + +condition (5.3) is satisfied if + +$$ +(5.4) \qquad \frac{\theta_0}{h+1} < \frac{1}{k} \quad \text{and} \quad M+2(k-1) < p^{p/k}. +$$ + +Thus we need only choose a prime *p* satisfying (5.2) and (5.4). In Tables 2 and 3, we give a choice of *p* for most of the values of (*M*; *a*) listed in (2.6). Note that the choice of *p* is not unique. When *k* = 4, we have (*M*; *a*) = (115; 15–27). We exclude this case by taking *p* = 13 so that *a* + 2*h* = 39 giving *6* ≤ *h* ≤ 12 and *θ*₀ = 1, which satisfy (5.2) and (5.4). + +Table 2 (k = 3) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ p + + (M; a) +
+ 7 + + (31; 25) +
+ 11 + + (117; 13), (143; 23–27), (341; 25–27), (2871; 23–27) +
+ 13 + + (91; 25–27), (115; 17–27), (117; 15–27), (243; 15–27), (663; 23–27) +
+ 17 + + (117; 11), (525; 25–27) +
+ 19 + + (243; 13) +
+ 23 + + (23; 17) +
+---PAGE_BREAK--- + +**Table 3** ($k=2$) + +
p(M; a)
5(5; 1), (13; 9), (25; 1, 7, 9, 11, 17)
7(7; 3), (19; 15), (33; 9–17), (49; 13–17), (63; 9–17), (75; 9–17), (117; 13–17), (133; 15–17, 23–27), (145; 25–27), (243; 3, 9–17, 23–27), (273; 9–17), (343; 23–27), (845; 9–17), (1125; 23–27)
11(31; 27), (33; 7, 19–27), (55; 15–27), (75; 7, 19–27), (119; 13–27), (143; 25–27), (187; 13–27), (207; 19–27), (253; 19–27), (273; 19–27), (297; 19–27), (341; 27), (361; 15–27), (493; 25–27), (625; 15–27), (713; 27), (845; 19–27), (1309; 19–27), (1375; 13–27), (1573; 13–27), (1615; 15–27), (2275; 19–27), (2695; 27), (2871; 25–27), (3969; 15–27), (4123; 27), (5423; 27), (6875; 19–27), (7161; 27), (9315; 19–27), (16443; 25–27), (18513; 19–27), (19435; 27), (24563; 25–27), (41325; 25–27), (41743; 19–27), (45617; 25–27), (57475; 19–27), (86273; 27), (87723; 25–27), (130975; 27), (184875; 25–27), (203203; 27)
13(63; 19–27), (91; 27), (115; 19–27), (117; 19–27), (169; 15–27), (245; 15–27), (323; 15–27), (375; 25–27), (403; 27), (663; 15–27), (897; 27), (1519; 27), (1573; 9), (2873; 19–27), (3211; 15–27), (3625; 27), (3703; 19–27), (8073; 19–27), (9945; 25–27), (12673; 25–27), (22475; 27), (1128125; 19–27), (1447873; 19–27)
17(49; 19–27), (85; 25–27), (153; 27), (525; 27), (527; 27), (1953; 27), (8379; 25–27), (14875; 25–27), (2509045; 27), (3322053; 27)
19(93; 27), (133; 21), (435; 25–27), (665; 25–27), (1083; 27), (2185; 21–27), (287080365; 27)
23(23; 19), (343; 19), (1125; 19), (2185; 19)
+ +For the choices of $p$ given in Tables 2 and 3, conditions (5.2) and (5.4) are satisfied and thus all these values are excluded. When $k=2$ and $(M;a) = (243; 7)$, $(1573; 11)$, we take $p=5, 7$, respectively, and compute $L'$ to get $L < 1/k$. Hence these cases are also excluded. Thus all values of $(M;a)$ given in (2.6) are excluded except when + +$$ +(5.5) \quad (M;a) \in \{(25\text{};3\text{,}5\text{,}13\text{,}15\text{,}19\text{,}21)\text{, } (133\text{};19)\text{, } (243\text{};5\text{,}19\text{,}21)\text{,}\\ +\phantom{(5.5) \quad} (343\text{};21)\text{, } (1125\text{};21)\}. +$$ + +Next we illustrate the application of Lemma 10 with an example. Let $(M;a) = (25\text{;}3)$. Then $m=13$ and $u(x) = x^{13} + 27x^{12} + \dots + 3 \cdot 5 \cdot \dots \cdot 27$. The vertices of the Newton polygon of $u(x)$ with respect to the prime $p=3$ are + +$$ +(0,0) - (9,5) - (12,7) - (13,8). +$$ + +By Lemma 10, any quadratic factor $h(x)$ satisfies $Nh_3(2) \ge Nu_3(2) = 10/9$ +---PAGE_BREAK--- + +implying $Nh_3(2) \ge 2$. Also we have + +$$Nu_3(13) - Nu_3(11) = 8 - 19/3 = 5/3 \ge Nh_3(2),$$ + +which is a contradiction. Thus the case $(M;a) = (25;3)$ is excluded. Below we give the values of $(M;a, p)$ together with the vertices of the corresponding Newton polygon which are excluded by Lemma 10. + +$$ (25; 5, 3): \quad (0,0) - (9,5) - (12,7), $$ + +$$ (25; 13, 3): \quad (0,0) - (8,5), $$ + +$$ (25; 15, 3): \quad (0,0) - (6,4) - (7,5), $$ + +$$ (25; 19, 3): \quad (0,0) - (5,4). $$ + +As already noted, in the case $(M;a) = (25;21)$ there are reducible polynomials. Thus we are left with six undecided cases in (5.5). + +In these cases including $(M;a) = (25;21)$ we check directly with MATH-EMATICA that the resulting polynomials $f(x)$ do not factor. Thus $f(x)$ has no factors of degree $\ge 2$. This completes the proof of the theorem. $\blacksquare$ + +**Acknowledgments.** The authors would like to thank Professor Michael Filaseta for many helpful discussions. The second author also wishes to thank him for his kind hospitality during her visit to the University of South Carolina in May-June, 2007. We thank Professors T. N. Shorey and R. Tijdeman for providing us with the preprint of their paper [11]. We also owe our sincere thanks to the referee for his/her helpful comments and for pointing out a rectifiable error in the earlier version of the paper. + +**References** + +[1] M. Allen and M. Filaseta, *A generalization of a second irreducibility theorem of I. Schur*, Acta Arith. 109 (2003), 65–79. + +[2] —, —, *A generalization of a third irreducibility theorem of I. Schur*, ibid. 114 (2004), 183–197. + +[3] G. Dumas, *Sur quelques cas d'irréductibilité des polynômes à coefficients rationnels*, J. Math. Pures Appl. 2 (1906), 191–258. + +[4] P. Dusart, *Autour de la fonction qui compte le nombre de nombres premiers*, Ph.D. thesis, Université de Limoges, 1998. + +[5] E. F. Ecklund, Jr., R. B. Eggleton, P. Erdős and J. L. Selfridge, *On the prime factorization of binomial coefficients*, J. Austral. Math. Soc. Ser. A 26 (1978), 257–269. + +[6] M. Filaseta, *The irreducibility of all but finitely many Bessel polynomials*, Acta Math. 174 (1995), 383–397. + +[7] —, *A generalization of an irreducibility theorem of I. Schur*, in: Analytic Number Theory, Proc. Internat. Conf. in Honor of Heini Halberstam, Vol. 1, B. C. Berndt, H. G. Diamond and A. J. Hildebrand (eds.), Birkhäuser, Boston, 1996, 371–395. + +[8] D. H. Lehmer, *On a problem of Störmer*, Illinois J. Math. 8 (1964), 57–79. +---PAGE_BREAK--- + +[9] N. Saradha and T. N. Shorey, *Almost perfect powers in arithmetic progression*, Acta Arith. 99 (2001), 363–388. + +[10] I. Schur, *Einige Sätze über Primzahlen mit Anwendungen auf Irreduzibilitätsfragen, I, II*, Sitzungsber. Preuss. Akad. Wiss. Berlin Phys.-Math. Kl. 1929, 125–136, 370–391. + +[11] T. N. Shorey and R. Tijdeman, *Generalizations of some irreducibility results by Schur*, preprint. + +Carrie E. Finch +Mathematics Department +Washington and Lee University +Lexington, VA 24450, U.S.A. +E-mail: finchc@wlu.edu + +N. Saradha +School of Mathematics +Tata Institute of Fundamental Research +Homi Bhabha Road +Mumbai, 400 005, India +E-mail: saradha@math.tifr.res.in + +*Received on 24.5.2008* +*and in revised form on 13.1.2010* + +(5712) \ No newline at end of file diff --git a/samples_new/texts_merged/4971236.md b/samples_new/texts_merged/4971236.md new file mode 100644 index 0000000000000000000000000000000000000000..67a5bfe30606bcea700bbf157e3578a4b6179399 --- /dev/null +++ b/samples_new/texts_merged/4971236.md @@ -0,0 +1,562 @@ + +---PAGE_BREAK--- + +# Parameter Estimation of Bernoulli Distribution using Maximum Likelihood and Bayesian Methods + +Nurmaita Hamsyiah¹), Khoirin Nisa¹), & Warsono¹) + +¹) Department of Mathematics, Faculty of Mathematics and Science, University of Lampung +Jl. Prof. Dr. Sumantri Brojonegoro No. 1 Bandar Lampung +Phone Number +62 721 701609 Fax +62 721 702767 +E-mail: itamath98@gmail.com + +## ABSTRACT + +The term parameter estimation refers to the process of using sample data to estimate the parameters of the selected distribution. There are several methods that can be used to estimate distribution parameter(s). In this paper, the maximum likelihood and Bayesian methods are used for estimating parameter of Bernoulli distribution, i.e. θ, which is undefined as the probability of success event for two possible outcomes. The maximum likelihood and Bayesian estimators of Bernoulli parameter are derived, for the Bayesian estimator the Beta prior is used. The analytical calculation shows that maximum likelihood estimator is unbiased while Bayesian estimator is asymptotically unbiased. However, empirical analysis by Monte Carlo simulation shows that the mean square errors (MSE) of the Bayesian estimator are smaller than maximum likelihood estimator for large sample sizes. + +**Keywords:** Bernoulli distribution, beta distribution, conjugate prior, parameter estimation. + +## 1. PENDAHULUAN + +Parameter estimation is a way to predict the characteristics of a population based on the sample taken. In general, parameter estimation is classified into two types, namely point estimation and interval estimation. The point estimation of a parameter is a value obtained from the sample and is used as a parameter estimator whose value is unknown. + +Several point estimation methods are used to calculate the estimator, such as moment method, maximum likelihood method, and Bayesian method. The moment method predicts the parameters by equating the values of sample moments to the population moment and solving the resulting equation system [1]. The maximum likelihood (ML) method uses differential calculus to determine the maximum of the likelihood function to obtain the parameters estimates. The Bayesian method differs from the traditional methods by introducing a frequency function for the parameter being estimated namely prior distribution. The Bayesian method combines the prior distribution and sample distribution. The prior distribution is the initial distribution that provides information about the parameters. The sample distribution combined with the prior distribution provides a new distribution i.e. the posterior distribution that expresses a degree of confidence regarding the location of the parameters after the sample is observed [2]. + +Researches on parameter estimation using various methods of various distributions have been done, for example: Bayesian estimation of exponential distribution [3], [4], ML and Bayesian estimations of Poisson distribution [5], Bayesian estimation of Poisson-Exponential distribution [6], and Bayesian estimation of Rayleigh distribution [7]. + +The difference between the ML and the Bayesian methods is that the ML method considers that the parameter is +---PAGE_BREAK--- + +an unknown quantity of fixed value and the inference is based only on the information in the sample; while the Bayesian method considers the parameter as a variable that describes the initial knowledge of the parameters before the observation is performed and expressed in a distribution called the prior distribution. After the observation is performed, the information in the prior distribution is combined with the sample data information through Bayesian theorem, and the result is expressed in a distribution form called the posterior distribution, which further becomes the basis for inference in the Bayesian method [8]. + +The Bayesian method has advantages over other methods, one of which is the Bayesian method can be used for drawing conclusions in complicated or extreme cases that cannot be handled by other methods, such as in complex hierarchical models. In addition, if the prior information does not indicate complete and clear information about the distribution of the prior, appropriate assumptions may be given to its distribution characteristics. Thus, if the prior distribution can be determined, then a posterior distribution can be obtained which may require mathematical computation [8]. + +This paper examines the parameter estimation of Bernoulli distribution using ML and Bayesian methods. A review of Bernoulli distribution and Beta distribution is presented in Section 2. The research methodology is described in Section 3. Section 4 provides the results and discussion. Finally, the conclusion is given in Section 5. + +# 2. THEORETICAL FRAMEWORK + +## 2.1 Bernoulli Distribution + +Bernoulli distribution was introduced by Swiss mathematician Jacob Bernoulli (1654-1705). It is the probability distribution resulting from two outcomes or events in a given experiment, i.e. success ($X = 1$) and fail ($X = 0$), with the probability of the success is $\theta$ and the probability of failure is $1 - \theta$. + +*Definition* + +A random variable X is called a Bernoulli random variable (or X is Bernoulli distributed) if and only if its probability distribution is given by + +$$f(x; \theta) = \theta^x (1-\theta)^{1-x}, \text{ for } x = 0,1.$$ + +*Proposition 1* + +Bernoulli distribution $f(x; \theta)$ has mean and variance as follows: + +$$\mu = \theta \text{ and } \sigma^2 = \theta(1 - \theta).$$ + +*Proof:* + +The mean of Bernoulli random variable X is +---PAGE_BREAK--- + +$$ +\begin{align*} +\mu &= E(X) \\ +&= \sum_{x=0}^{1} x f(x; \theta) \\ +&= \sum_{x=0}^{x=1} x \theta^x (1 - \theta)^{1-x} \\ +&= 0 \cdot \theta (1 - \theta)^{1-0} + 1 \cdot (1 - \theta)^{1-1} = \theta. +\end{align*} +$$ + +The variance, i.e. $\sigma^2 = E(X - \mu)^2 = E(X^2) - [E(X)]^2$, of Bernoulli distribution is obtained as follows: + +$$ +\begin{align*} +E(X^2) &= \sum_{x=0}^{1} x^2 f(x; \theta) \\ +&= \sum_{x=0}^{1} x^2 f(x; \theta) \\ +&= \sum_{x=0}^{1} x^2 \theta (1 - \theta)^{1-x} \\ +&= 0^2 \cdot \theta^0 (1 - \theta)^{1-0} + 1^2 \cdot (1 - \theta)^{1-1} = \theta. +\end{align*} +$$ + +Then, + +$$ +\sigma^2 = E(X - \mu)^2 = \theta - \theta^2 = \theta(1 - \theta). +$$ + +## 2.2. Beta Distribution + +### Definition + +A random variable X is called a betarandom variable with parameters a and b if the density function of X is given by + +$$ +f(x) = \begin{cases} \frac{1}{B(a,b)} x^{a-1} (1-x)^{b-1}, & 0 < x < 1 \\ 0, & \text{lainnya} \end{cases} +$$ + +where $B(a, b)$ is betafunction defined as + +$$ +B(a, b) = \int_{0}^{1} x^{a-1} (1-x)^{b-1} dx ; a > 0, b > 0. \quad (1) +$$ + +### Proposition 2 + +The beta function and gamma function is connected by + +$$ +B(a, b) = \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)}. \qquad (2) +$$ + +Proof : + +$$ +\mathbb{I}(a)\mathbb{I}(b) = \int_{x=0}^{\infty} x^{a-1} e^{-x} dx \cdot \int_{y=0}^{\infty} y^{b-1} e^{-y} dy +$$ +---PAGE_BREAK--- + +$$ = \int_{y=0}^{\infty} \int_{x=0}^{\infty} x^{a-1} y^{b-1} e^{-x-y} dxdy. $$ + +Let $f(z,t) = z$ and $y = g(z,t) = z(1-t)$, + +$$ +\begin{aligned} +\Gamma(a)\Gamma(b) &= \int_{z=0}^{\infty} \int_{t=0}^{1} (zt)^{a-1}[z(1-t)]^{b-1}e^{-z} |J(z,t)| dt dz \\ +&= \int_{z=0}^{\infty} \int_{t=0}^{1} (zt)^{a-1}[z(1-t)]^{b-1}e^{-z} zdtdz \\ +&= \int_{z=0}^{\infty} \int_{t=0}^{1} z^{a-1+b-1+1}e^{-z} t^{a-1}(1-t)^{b-1} dt dz \\ +&= \int_{z=0}^{\infty} z^{a+b-1}e^{-z} dz \cdot \int_{t=0}^{1} t^{a-1}(1-t)^{b-1} dt \\ +&= \Gamma(a+b)B(a,b). +\end{aligned} +$$ + +Then, + +$$ B(a, b) = \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)}. $$ + +Proposisi 3 + +The mean and variance of beta distribution with parameters *a* and *b* are + +$$ \mu = \frac{a}{a+b} \quad \text{and} \quad \sigma^2 = \frac{ab}{(a+b+1)(a+b)^2}. $$ + +Proof: + +The proposition can be proved by using the moment of beta distribution as follows: + +$$ +\begin{aligned} +E(X^n) &= \frac{1}{B(a, b)} \int_0^1 x^n x^{a-1} (1-x)^{b-1} dx \\ +&= \frac{1}{B(a, b)} \int_0^1 x^{(a+n)-1} (1-x)^{b-1} dx. +\end{aligned} +$$ + +From equations (1) and (2) we obtain + +$$ +\begin{aligned} +E(X^n) &= \frac{B(a + n, b)}{B(a, b)} \\ +&= \frac{\displaystyle\frac{\Gamma(a+n)\Gamma(b)}{\Gamma(a+b+n)}}{\displaystyle\frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)}} +\end{aligned} +$$ +---PAGE_BREAK--- + +$$ +\begin{align*} +&= \frac{\Gamma(a+n)\Gamma(b)}{\Gamma(a+b+n)} \times \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} \\ +&= \frac{\Gamma(a+n)\Gamma(a+b)}{\Gamma(a+b+n)\Gamma(a)}. \tag{3} +\end{align*} +$$ + +Thus the mean and variance of beta distribution will be obtained by substituting $n = 1$ and $n = 2$ to equation (3), then + +$$ +\begin{align*} +\text{Mean}(X) &= E(X^1) = \frac{\Gamma(a+1)\Gamma(a+b)}{\Gamma(a+b+1)\Gamma(a)} \\ +&= \frac{a\Gamma(a)\Gamma(a+b)}{(a+b)\Gamma(a+b)\Gamma(a)} \\ +&= \frac{a}{a+b} +\end{align*} +$$ + +and $Var(X) = \sigma^2 = E(X^2) - [E(X)]^2$. + +Since + +$$ +\begin{align*} +E(X^2) &= \frac{\Gamma(a+2)\Gamma(a+b)}{\Gamma(a+b+2)\Gamma(a)} \\ +&= \frac{(a+1)\Gamma(a+1)\Gamma(a+b)}{(a+b+1)\Gamma(a+b+1)\Gamma(a)} \\ +&= \frac{(a+1)a\Gamma(a)\Gamma(a+b)}{(a+b+1)(a+b)\Gamma(a+b)\Gamma(a)} \\ +&= \frac{(a+1)a}{(a+b+1)(a+b)}, +\end{align*} +$$ + +then + +$$ +\begin{align*} +\mathrm{Var}(X) &= \frac{(a+1)a}{(a+b+1)(a+b)} - \left(\frac{a}{a+b}\right)^2 \\ +&= \frac{(a+1)a}{(a+b+1)(a+b)} - \frac{a^2}{(a+b)^2} \\ +&= \frac{(a+b)(a^2+a) - a^2(a+b+1)}{(a+b)^2(a+b+1)} \\ +&= \frac{a^3 + a^2b + a^2 + ab - a^3 - a^2b - a^2}{(a+b)^2(a+b+1)} \\ +&= \frac{ab}{(a+b)^2(a+b+1)}. +\end{align*} +$$ + +# 3. RESEARCH METHOD + +The research method for estimating the parameter of Bernoulli distribution in this paper can be described as follows. For ML estimation, the parameter estimation is done by differentiating partially the log of the likelihood +---PAGE_BREAK--- + +function and equation it by zero, + +$$ \frac{\partial \ln L(\theta)}{\partial \theta} = 0 $$ + +to obtain ML estimator($\hat{\theta}_{ML}$). The second derivation assessment is performed to show that the resulted $\hat{\theta}$ truly maximize the likelihood function. For the Bayesian method, the parameter estimation is done through the following steps: + +1. Form the likelihood function of Bernoulli distribution as follows: + +$$ L(x_1, x_2, \dots, x_n | \theta) = \prod_{i=1}^{n} f((x_i)|\theta). $$ + +2. Calculate the joint probability distribution, which is obtained by multiplying the likelihood function and the prior distribution, + +$$ H(x_1, x_2, \dots, x_n; \theta) = L(x_1, x_2, \dots, x_n | \theta) \cdot \pi(\theta). $$ + +3. Calculate the marginal probability distribution function, + +$$ p(x_1, x_2, \dots, x_n) = \int H(x_1, x_2, \dots, x_n; \theta) d\theta. $$ + +4. Calculate the posterior distribution by dividing the joint probability distribution function by the marginal function, + +$$ \pi(\theta|x_1, x_2, \dots, x_n) = \frac{H(x_1, x_2, \dots, x_n; \theta)}{p(x_1, x_2, \dots, x_n)} $$ + +The Bayesian parameter estimate of $\theta$ is then produced as the mean of the posterior distribution. + +After the parameter estimate of $\theta$ is obtained by MLE and Bayesian methods, the evaluation of the estimators is performed by assessing their bias, variance, and mean square error. + +# 4. RESULT AND DISCUSSION + +## 4.1. The ML Estimator of the Bernoulli Distribution Parameter ($\theta$) + +Let $X_1, X_2, \dots, X_n$ be Bernoulli distributed random sample with $X_i \sim Bernoulli(\theta)$, where $\theta \in \Omega = (0,1)$. The probability function of $X_i$ is + +$$ f(x_i; \theta) = \theta^{x_i} (1 - \theta)^{1-x_i} \text{with } x_i \in \{0,1\}. $$ + +The likelihood function of Bernoulli distribution is given by + +$$ +\begin{align*} +L(\theta) &= f(x_1, x_2, \dots, x_n; \theta) \\ +&= \prod_{i=1}^{n} f(x_i; \theta) \\ +&= \prod_{i=1}^{n} \theta^{x_i} (1 - \theta)^{1-x_i} +\end{align*} +$$ +---PAGE_BREAK--- + +$$ +\begin{align*} +&= \theta^{x_1}(1-\theta)^{1-x_1} \cdot \theta^{x_2}(1-\theta)^{1-x_2} \cdots \theta^{x_n}(1-\theta)^{1-x_n} \\ +&= \theta^{\sum_{i=1}^{n} x_i} (1-\theta)^{n-\sum_{i=1}^{n} x_i}. \tag{4} +\end{align*} +$$ + +The natural logarithm of the likelihood function is then + +$$ +\begin{align*} +\ln L(\theta) &= \ln[\theta^{\sum_{i=1}^{n} x_i} (1-\theta)^{n - \sum_{i=1}^{n} x_i}] \\ +&= \ln \theta^{\sum_{i=1}^{n} x_i} + \ln(1-\theta)^{n - \sum_{i=1}^{n} x_i} \\ +&= \sum_{i=1}^{n} x_i \ln \theta + (n - \sum_{i=1}^{n} x_i) \ln(1-\theta). \tag{5} +\end{align*} +$$ + +The ML estimate value of θ is obtained by differentiating equation (5) with respect to θ and equating the differential result to zero, i.e. + +$$ +\begin{align*} +\frac{\partial}{\partial \theta} \ln L(\theta) &= \frac{\partial}{\partial \theta} \left[ \sum_{i=1}^{n} x_i \ln \theta + \left( n - \sum_{i=1}^{n} x_i \right) \ln(1-\theta) \right] = 0 \\ +&= \frac{\sum_{i=1}^{n} x_i}{\theta} - \frac{n - \sum_{i=1}^{n} x_i}{1-\theta} = 0 \\ +(1-\theta) \sum_{i=1}^{n} x_i - \theta \left( n - \sum_{i=1}^{n} x_i \right) &= 0 \\ +\sum_{i=1}^{n} x_i - \theta \sum_{i=1}^{n} x_i - n\theta + \theta \sum_{i=1}^{n} x_i &= 0 \\ +\sum_{i=1}^{n} x_i &= n\theta, +\end{align*} +$$ + +then we obtain + +$$ +\hat{\theta} = \frac{1}{n} \sum_{i=1}^{n} x_i. +$$ + +To show that $\hat{\theta}$ is the value that maximizes the likelihood function $L(\theta)$, it must be confirmed that the second derivative of the likelihood function for $\theta = \hat{\theta}$ is negative: + +$$ +\begin{align*} +\frac{\partial^2}{\partial \theta^2} \ln L(\theta) &= \frac{\partial^2}{\partial \theta^2} \left[ \sum_{i=1}^{n} x_i \ln \theta + \left( n - \sum_{i=1}^{n} x_i \right) \ln(1-\theta) \right] \\ +&= -\frac{\sum_{i=1}^{n} x_i}{\theta^2} - \frac{n - \sum_{i=1}^{n} x_i}{(1-\theta)^2} \\ +&= \frac{-(1-\theta)^2 \sum_{i=1}^{n} x_i - \theta^2 (n - \sum_{i=1}^{n} x_i)}{\theta^2 (1-\theta)^2} \\ +&= \frac{-\sum_{i=1}^{n} x_i + 2\theta \sum_{i=1}^{n} x_i - \theta^2 \sum_{i=1}^{n} x_i - n\theta^2 + \theta^2 \sum_{i=1}^{n} x_i}{\theta^2 (1-\theta)^2} +\end{align*} +$$ +---PAGE_BREAK--- + +$$ = \frac{-n\theta^2 + 2\theta \sum_{i=1}^{n} x_i - \sum_{i=1}^{n} x_i}{\theta^2(1-\theta)^2} < 0. $$ + +Since $\hat{\theta}$ maximizes the likelihood function, we conclude that the ML estimator of $\theta$ is given by + +$$ \hat{\theta}_{ML} = \frac{1}{n} \sum_{i=1}^{n} x_i. $$ + +## 4.2. The Bayesian Estimator of the Bernoulli Distribution Parameter($\theta$) + +To estimate $\theta$ using Bayesian method, it is necessary to choose the initial information of a parameter called the prior distribution, denoted by $\pi(\theta)$, to be applied to the basis of the method namely the conditional probability. In this paper, the prior selection for Bernoulli distribution refers to the formation of its likelihood function. From equation (4) we have + +$$ \pi(\theta) \propto \theta^{\sum_{i=1}^{n} x_i} (1 - \theta)^{1 - \sum_{i=1}^{n} x_i}. $$ + +A distribution having probability function in the same form as the above expression is the beta distribution with density function + +$$ f(\theta; a, b) = \frac{1}{B(a,b)} \theta^{a-1} (1-\theta)^{b-1}, 0 < \theta < 1 $$ + +where $a-1 = \sum_{i=1}^{n} x_i$, $b-1 = n - \sum_{i=1}^{n} x_i$, and $\frac{1}{B(a,b)}$ are factors required for the density function to be satisfied. + +The prior distribution is combined with the sample distribution to produce a new distribution called posterior distribution and denoted by $\pi(\theta|x_1, x_2, \dots, x_n)$. Posterior distribution is obtained by dividing the joint density distribution by the marginal distribution. + +Joint probability density function of $(x_1, x_2, \dots, x_n)$ is given by: + +$$ +\begin{align*} +H(x_1, x_2, \dots, x_n; \theta) &= L(x_1, x_2, \dots, x_n | \theta) \cdot \pi(\theta) \\ +&= \theta^{\sum_{i=1}^{n} x_i} (1-\theta)^{n-\sum_{i=1}^{n} x_i} \cdot \frac{1}{B(a,b)} \theta^{a-1} (1-\theta)^{b-1} \\ +&= \frac{1}{B(a,b)} \theta^{a+\sum_{i=1}^{n} x_i - 1} (1-\theta)^{b+n-\sum_{i=1}^{n} x_i - 1} \tag{6} +\end{align*} +$$ + +and the marginal function of $(x_1, x_2, \dots, x_n)$ is obtained as follows: + +$$ p(x_1, x_2, \dots, x_n) = \int_0^1 H(x_1, x_2, \dots, x_n; \theta) d\theta. $$ + +Using equation (6) we have + +$$ p(x_1, x_2, \dots, x_n) = \int_0^1 \frac{1}{B(a,b)} \theta^{a+\sum_{i=1}^{n} x_i - 1} (1-\theta)^{b+n-\sum_{i=1}^{n} x_i - 1} d\theta $$ +---PAGE_BREAK--- + +$$ +\begin{aligned} +&= \frac{1}{B(a, b)} \int_{0}^{1} \theta^{a + \sum_{i=1}^{n} x_i^{-1}} (1 - \theta)^{b + n - \sum_{i=1}^{n} x_i^{-1}} d\theta \\ +&= \frac{1}{B(a,b)} B(a + \sum_{i=1}^{n} x_i, b + n - \sum_{i=1}^{n} x_i). +\end{aligned} +\quad (7) $$ + +Then from equation (6) and (7) the posterior distribution can be written as follows: + +$$ +\begin{aligned} +\pi(\theta | x_1, x_2, \dots, x_n) &= \frac{H(x_1, x_2, \dots, x_n; \theta)}{p(x_1, x_2, \dots, x_n)} \\ +&= \frac{\frac{1}{B(a,b)} \theta^{a + \sum_{i=1}^{n} x_i^{-1}} (1-\theta)^{b+n-\sum_{i=1}^{n} x_i^{-1}}}{\frac{1}{B(a,b)} B(a + \sum_{i=1}^{n} x_i, b+n-\sum_{i=1}^{n} x_i)} \\ +&= \frac{\theta^{a+\sum_{i=1}^{n} x_i^{-1}(1-\theta)^{b+n-\sum_{i=1}^{n} x_i^{-1}}}}{B(a+\sum_{i=1}^{n} x_i, b+n-\sum_{i=1}^{n} x_i)}. +\end{aligned} +\quad (8) $$ + +The posterior distribution expressed in equation (8) is obviously following beta distribution also with parameter ($a + \Sigma_{i=1}^n x_i$) and ($b + n - \Sigma_{i=1}^n x_i$), or + +$$ \hat{\theta} \sim \text{Beta}(a + \sum_{i=1}^{n} x_i, b + n - \sum_{i=1}^{n} x_i). $$ + +Since the prior and posterior distribution of Bernoulli follows the same distribution, i.e. the Beta distribution, beta distribution is called as the conjugate prior of the Bernoulli distribution. The posterior mean is used as the parameter estimate $\theta$ in Bayesian method. Using Proposition 2, the Bayesian estimator of parameter $\theta$ is obtained as follows: + +$$ +\begin{aligned} +\hat{\theta}_B &= \frac{a + \sum_{i=1}^{n} x_i}{a + \sum_{i=1}^{n} x_i + b + n - \sum_{i=1}^{n} x_i} \\ +&= \frac{a + \sum_{i=1}^{n} x_i}{a + b + n}. +\end{aligned} +$$ + +### 4.3. Evaluation of the Estimators Properties + +The parameter estimation of the Bernoulli distribution is obtained by the MLE and Bayesian methods yields different estimates. The best estimator has to meet the following properties: + +#### 1. Unbiased + +An estimator is called to be unbiased if its expected values is equal to the estimated parameter, i.e. $\hat{\theta}$ is an unbiased estimator of $\theta$ if $E(\hat{\theta}) = \theta$. The bias of an estimator is then given by: + +$$ \operatorname{Bias}(\hat{\theta}) = E(\hat{\theta}) - \theta. \quad (9) $$ + +Let $X_1, X_2, ..., X_n$ are Bernoulli ($\theta$) random sample observations. Since $\hat{\theta}_{ML} = \frac{1}{n}\sum_{i=1}^n x_i$ is the ML estimator of $\theta$, its expected value is as follows: +---PAGE_BREAK--- + +$$ +\begin{align*} +E(\hat{\theta}_{ML}) &= E\left(\frac{1}{n}\sum_{i=1}^{n} x_i\right) \\ +&= \frac{1}{n}E\left(\sum_{i=1}^{n} x_i\right) \\ +&= \frac{1}{n}\sum_{i=1}^{n} E(x_i) \\ +&= \frac{1}{n} \cdot n\theta = \theta. \tag{10} +\end{align*} +$$ + +Since $E(\hat{\theta}_{ML}) = \theta, \hat{\theta}_{MLE}$ is an unbiased estimator of $\theta$. + +Now consider the Bayesian estimator of θ i.e. $\hat{\theta}_B = \frac{a + \sum_{i=1}^n x_i}{a+b+n}$. The expected value of Bayesian estimator is given by + +$$ +\begin{align*} +E(\hat{\theta}_B) &= E \left( \frac{a + \sum_{i=1}^{n} x_i}{a + b + n} \right) \\ +&= \frac{1}{a + b + n} E \left( a + \sum_{i=1}^{n} x_i \right) \\ +&= \frac{1}{a + b + n} \left[ E(a) + E \left( \sum_{i=1}^{n} x_i \right) \right] \\ +&= \frac{1}{a + b + n} \left[ E(a) + \sum_{i=1}^{n} E(x_i) \right] \\ +&= \frac{1}{a + b + n} (a + n\theta). \tag{11} +\end{align*} +$$ + +Since $E(\hat{\theta}_B) \neq \theta$, $\hat{\theta}_B$ is a biased estimator of $\theta$. The bias value of $\hat{\theta}_B$ is: + +$$ +\begin{align} +\operatorname{Bias}(\hat{\theta}_B) &= E(\hat{\theta}_B) - \theta \nonumber \\ +&= \frac{a+n\theta}{a+b+n} - \theta. \tag{12} +\end{align} +$$ + +Although $\hat{\theta}_B$ is a biased estimator of $\theta$, it can be shown that $\hat{\theta}_B$ is asymptotically unbiased. The proof is given as follows: + +$$ +\begin{align*} +\lim_{n \to \infty} E(\hat{\theta}_B) &= \lim_{n \to \infty} \frac{a+n\theta}{a+b+n} \\ +&= \lim_{n \to \infty} \frac{\frac{a}{n} + \frac{np}{n}}{\frac{a}{n} + \frac{b}{n} + \frac{n}{n}} \\ +&= \lim_{n \to \infty} \frac{\frac{a}{n} + p}{\frac{a}{n} + \frac{b}{n} + 1} +\end{align*} +$$ +---PAGE_BREAK--- + +$$ = \frac{\theta}{1} = \theta. \qquad (13) $$ + +Since $\lim_{n \to \infty} E(\hat{\theta}_B) = \theta$, $\hat{\theta}_B$ is an asymptotically unbiased estimator of $\theta$. + +## 2. Efficiency + +The efficiency of an estimator is observed from its variance. The best parameter estimator is the one that has the smallest variance. This is because the variance of an estimator is a measure of the spread of the estimator around its mean. + +The variance of ML estimator $\hat{\theta}_{ML}$ is: + +$$ +\begin{aligned} +Var(\hat{\theta}_{ML}) &= Var\left(\frac{1}{n}\sum_{i=1}^{n} x_i\right) \\ +&= \frac{1}{n^2} Var\left(\sum_{i=1}^{n} x_i\right) \\ +&= \frac{1}{n^2} \sum_{i=1}^{n} Var(x_i) \\ +&= \frac{1}{n^2} n\theta(1-\theta) \\ +&= \frac{1}{n}\theta(1-\theta). +\end{aligned} +\qquad (14) $$ + +While the variance of the Bayesian estimator $\hat{\theta}_B$ is given by: + +$$ +\begin{aligned} +Var(\hat{\theta}_B) &= Var\left(\frac{a + \sum_{i=1}^{n} x_i}{a + b + n}\right) \\ +&= \frac{1}{(a+b+n)^2} Var\left(a + \sum_{i=1}^{n} x_i\right) \\ +&= \frac{1}{(a+b+n)^2} \left[ Var(a) + \sum_{i=1}^{n} Var(x_i) \right]. +\end{aligned} +$$ + +Since $Var(a) = 0$ and $Var(x_i) = \theta(1 - \theta)$, we obtain + +$$ Var(\hat{\theta}_B) = \frac{1}{(a+b+n)^2} n\theta(1-\theta). \qquad (15) $$ + +From equation (10), it is shown that the ML estimator is unbiased, whereas from equations (11) and (12) it is shown that Bayesian estimator is biased. As a result, the efficiency of the two methods cannot be compared because the efficiency of estimators applies to unbiased estimators. +---PAGE_BREAK--- + +### 3. Consistency + +The consistency of the estimators is evaluated from their mean square error (MSE). The MSE can be expressed as + +$$MSE(\hat{\theta}) = E(\hat{\theta} - \theta)^2 = Var(\hat{\theta}) + (\text{bias}\hat{\theta})^2. \quad (16)$$ + +If the sample size grows infinitely, a consistent estimator will give a perfect point estimate to $\theta$. Mathematically, $\theta$ is a consistent estimator if and only if + +$$E(\hat{\theta} - \theta)^2 \to 0 \text{ when } n \to \infty,$$ + +which means that the bias and the variance approaches to 0 if $n \to \infty$. + +Substituting equation (10) and (14) to equation (16), the MSE of ML estimator $\hat{\theta}_{MLE}$ is then + +$$ +\begin{aligned} +E(\hat{\theta}_{MLE} - \theta)^2 &= Var(\hat{\theta}_{MLE}) + (\text{bias}\hat{\theta}_{MLE})^2 \\ +E(\hat{\theta}_{MLE} - \theta)^2 &= Var(\hat{\theta}_{MLE}) = \frac{1}{n}\theta(1-\theta). +\end{aligned} +$$ + +For $n \to \infty$, we have + +$$\lim_{n \to \infty} E(\hat{\theta}_{MLE} - \theta)^2 = \lim_{n \to \infty} \frac{1}{n} \theta(1-\theta) = 0. \quad (17)$$ + +In the same manner, by substituting equation (12) and (15) the MSE of Bayesian estimator $\hat{\theta}_B$ is: + +$$ +\begin{aligned} +E(\hat{\theta}_B - \theta)^2 &= Var(\hat{\theta}_B) + (\text{bias}\hat{\theta}_B)^2 \\ +E(\hat{\theta}_B - \theta)^2 &= \left[ \frac{1}{(a+b+n)^2} n\theta(1-\theta) \right] + \left( \frac{a+n\theta}{a+b+n} - \theta \right)^2. +\end{aligned} +$$ + +For $n \to \infty$, we have + +$$\lim_{n \to \infty} (\hat{\theta}_B - \theta)^2 = \lim_{n \to \infty} \left[ \frac{1}{(a+b+n)^2} n\theta(1-\theta) + \left( \frac{a+n\theta}{a+b+n} - \theta \right)^2 \right] = 0. \quad (18)$$ + +From equation (17) and (18), we can conclude that ML and Bayesian estimators are consistent estimators of $\theta$. + +### 4.4. Empirical Comparison of the Properties of ML and Bayesian Estimators + +To compare the ML and Bayesian estimators of $\theta$, a Monte Carlo simulation using R program was conducted. The simulation was performed by generating Bernoulli distributed data with $\theta = 0.1, 0.3$, and $0.5$ and eight different sample sizes, i.e. $n = 20, 50, 100, 300, 500, 1000, 5000$, and $10000$. The simulation was repeated 1000 times for each combination of $\theta$ and $n$. The generated data were used to estimate parameter $\theta$ using the two methods. Furthermore, the bias and MSE of both estimators were calculated using the formulas in equations (9) and (16) and the results are presented in Table 1. +---PAGE_BREAK--- + +**Table 1. The bias and MSE of ML and Bayesian estimators of θ** + +
θNBiasMSE
MLBayesian
(α = 1, β = 1)
MLBayesian
(α = 1, β = 1)
0,1200,0012000,2230840,0314780,558149
500,0021800,0366020,0152640,041740
1000,0002700,0093280,0070580,008843
3000,0004130,0010750,0019010,002906
5000,0002100,0003640,0011830,000551
10000,0001950,0000910,0005030,000128
50000,0000030,0000030,0001430,000004
100000,0001140,0000010,0001840,000002
0,3200,0031000,5366090,0016520,493287
500,0013000,0900000,0031130,142692
1000,0006300,0213410,0015830,067615
3000,0000030,0022050,0003270,002307
5000,0003120,0008450,0001110,008999
10000,0005450,0002070,0004440,002177
50000,0003840,0000880,0044431.453419
1.5.1.1
1.5.2.1
1.5.3.1
1.5.4.1
1.5.5.1
1.5.6.1
1.5.7.1
1.5.8.1
1.5.9.1
2.5.1.1
2.5.2.1
2.5.3.1
2.5.4.1
2.5.5.1
2.5.6.1
2.5.7.1
2.5.8.1
2.5.9.1
3.5.1.1
3.5.2.1
3.5.3.1
3.5.4.1
3.5.5.1
3.5.6.1
3.5.7.1
3.5.8.1
3.5.9.1
4.5.1.1
4.5.2.1
4.5.3.1
4.5.4.1
4.5.5.1
4.5.6.1
4.5.7.1
4.5.8.1
4.5.9.1
5.5.1.1
5.5.2.1
5.5.3.1
5.5.4.1
5.5.5.1
5.5.6.1
5.5.7.1
5.5.8.1
5.5.9.1
6.5.1.1
6.5.2.1
6.5.3.1
6.5.4.1
6.5.5.1
6.5.6.1
6.5.7.1
6.5.8.1
6.5.9.
7.
















































































+ +Table 1 shows the bias and MSE values of ML and Bayesian estimates for a successful probability of $\theta = 0.1$, 0.3 and 0.5. + +From the table it can be seen that ML estimator produces smaller biases than Bayesian estimates for finite sample (i.e., *n* < 10**3). However, when the sample size equal or larger than 1**E** thousand (i.e., 5**E** thousand and 1**Z**E thousand), the biases of the Bayesian estimator are smaller than the ML estimator. + +Even though the bias values of ML estimates changes inconsistently throughout the sample sizes, analytically it has been proved that ML estimator is an unbiased estimator. + +This appears to be different from the bias values for Bayesian estimator. + +It is because for all the considered success probabilities of the bias values become smaller when the sample size increases, although analytically it is found that Bayesian estimator is a biased estimator. + +As a result the efficiency of the two estimators cannot be compared. + +Therefore, to compare the best estimators we use MSE of both estimators. + +This is because MSE considers both the bias and variance values. + +The MSE values of ML and Bayesian estimators that have been shown in Table 1 have similarities, i.e., the MSE value decreases as the sample size increases and it closes to 1. + +Thus, both estimators are consistent estimators. + +This also corresponds to the results obtained analytically. + +Based on the simulation results in this study, it can be seen that for the larger sample sizes Bayesian estimator is better than ML estimator. + +This is because the MSE value of Bayesian estimator is smaller than the ML estimator. + +As shown in Table 1, when $\theta = 2/3$, the MSE value +---PAGE_BREAK--- + +of the Bayesian estimator is smaller than the ML estimator for *n* = 500, 1000, and 10000; and when $\theta$ = 0.3 and 0.5, the MSE values of the Bayesian estimator are smaller than the ML estimator for *n* = 1000, 5000, and 10000. + +# 5. CONCLUSION + +In this paper, we derived the ML and Bayesian estimator (using beta prior) of Bernoulli distribution parameter. Analytically we show that the ML estimator is an unbiased estimator and Bayesian estimator is a biased estimator for parameter $\theta$. However, Bayesian estimator is asymptotically unbiased. Based on the simulation result, both ML and Bayesian estimator are consistent estimators of $\theta$ because the two estimators satisfy the property of consistency, i.e. $E(\hat{\theta} - \theta)^2 \to 0$ when $\hat{n} \to \infty$. The simulation result also shows that the Bayesian estimator using beta prior is better than the MLE method for large sample sizes ($n \ge 1000$). + +# REFERENCES + +[1]. Bain, L.J. and Engelhardt, M. (1992). *Introduction to Probability and Mathematical Statistics*. Duxbury Press, California. + +[2]. Walpole, R.E dan Myers, R.H. (1995). *Ilmu Peluang dan Statistika untuk Insinyur dan Ilmuwan*. ITB, Bandung. + +[3]. Al-Kutubi H. S., Ibrahim N.A. (2009). Bayes Estimator for Exponential Distribution with Extension of Jeffery Prior Information. *Malaysian Journal of Mathematical Sciences*. 3(2):297-313. + +[4]. Nurlaila, D., Kusnandar D.,& Sulistianingsih, E. (2013). Perbandingan Metode Maximum Likelihood Estimation (MLE) dan Metode Bayes dalam Pendugaan Parameter Distribusi Eksponensial. *Buletin Ilmiah Mat. Stat. dan Terapannya*. 2(1):51-56. + +[5]. Fikhri, M., Yanuar, F., & Yudiantri A. (2014). Pendugaan Parameter dari Distribusi Poisson dengan Menggunakan Metode Maximum Likelihood Estimation (MLE) dan Metode Bayes. *Jurnal Matematika UNAND*. 3(4):152-159. + +[6]. Singh S. K., Singh, U., & Kumar, M. (2014). Estimation for the Parameter of Poisson-Exponential Distribution under Bayesian Paradigm. *Journal of Data Science*.12:157-173. + +[7]. Gupta, I. (2017). Bayesian and E-Bayesian Method of Estimation of Parameter of Rayleigh Distribution-A Bayesian Approach under Linex Loss Function. *International Journal of Statistics and Systems*.12(4):791-796. + +[8]. Box, G.E.P& Tiao, G.C. (1973). *Bayesian Inference in Statistical Analysis*. Addision-Wesley Publishing Company, Philippines. \ No newline at end of file diff --git a/samples_new/texts_merged/500594.md b/samples_new/texts_merged/500594.md new file mode 100644 index 0000000000000000000000000000000000000000..84fdf1f1541a3d9e140f849d6e0bf928a542500d --- /dev/null +++ b/samples_new/texts_merged/500594.md @@ -0,0 +1,443 @@ + +---PAGE_BREAK--- + +New Time Dependent Gravity +Displays Dark Matter and +Dark Energy Effects + +Pharis E. Williams +Williams Research +15247 W. Domingo Ln. +Sun City West AZ, 85375 + +It is shown that a time dependent gravitational field that is getting weaker with time will produce the effects measured for both the tangential velocity in the arms of spiral galaxies and for the high z supernovas. These results show that the effects that have led to the hypothesis of Dark Matter and Dark Energy may come from the same basic physical phenomena, namely that gravity is getting weaker as a function of time, and not from the existence of exotic matter. + +*Keywords:* distances and red shifts, dark matter, dark energy, theory + +**Introduction** + +Much has been written, hypothesized, and calculated on the subject of Dark Matter and Dark Energy. However, none consider a time dependent gravitational field. A gravitational field that gets weaker with time will display galaxy dynamics responding to a much stronger field before sending light from space towards the Earth +---PAGE_BREAK--- + +that can only be received many light years later. The theoretical +basis for such a time dependent gravitational field has already been +presented [1][2][3]. Three elements of this theory apply to the +potential explanation of Dark Matter and Dark Energy. These +elements include: + +1. The theory is a five dimensional gauge theory with Weyl geometry [4]. This means that the fields within the theory are gauge fields. However, the theory is not another Kalusa-Klein type of theory in that the fifth dimension describes a real physical property, mass density, and, therefore, is not hidden or obscured by some mathematical technique. The five dimensionality of the gauge theory requires that the gravitational field be time dependent. + +2. Quantum Mechanics is required by restricting the Weyl scale factor within the gauge theory to have only a value of unity. This was noted by Schrödinger [5] before he published his wave equations and later it was shown by London [6] that this restriction required Schrödinger's wave equations. This quantization requires that the gauge potentials be non-singular [1]. + +3. The fundamental Weyl geometry requires that the Poisson brackets and the unit of action be dependent upon the gauge function [1]. This variable unit of action leads to a relation determining the red shift of light coming to Earth from distant stars [12]. + +These three aspects of the new theory suffice to offer a different +view of the data from which the hypothesis of dark matter and dark +energy have evolved. +---PAGE_BREAK--- + +# Dark Matter + +Data wherein the tangential velocities of stars in the arms of a spiral galaxy differed from Newtonian predictions were first reported nearly seventy years ago [7]. A fundamental theory supporting these data has not heretofore been given, though empirical theories have been presented. The best of these theories is the Modified Newtonian Dynamics (MOND) [8][9][10]. The theory presented here had its beginning in 1974 and only recently has it been applied to the dynamics of spiral galaxies. + +Newtonian uniform circular motion equates the gravitational acceleration to the centripetal acceleration so that + +$$ \frac{GMm}{r^2} = \frac{mv^2}{r}. \qquad (1) $$ + +A time-dependent, non-singular gravitational field, such as the Dynamic Theory predicts, alters Equation (1) to + +$$ \frac{GMm(1 - H_0 z)}{r^2} \left(1 - \frac{\lambda}{r}\right) e^{-\frac{\lambda}{z}} = \frac{mv^2}{r}, \qquad (2) $$ + +where $H_0$ is Hubble's constant and + +$$ \lambda \equiv \frac{GM}{c^2} \qquad (3) $$ + +as determined by planetary orbits. For this time dependent gravitational field the gravitational acceleration acting on an arm of a galaxy feels is due to the gravitational field of the mass M at a previous time. This previous time is given by the time that it takes for the field to travel from the site of the gravitational field to the point on the arm under consideration. This means that when all the mass is considered to be at the center of the galaxy the time that enters into +---PAGE_BREAK--- + +Equation (2) is $\tau = \frac{-r}{c}$ so that, when $r \gg \lambda$ the velocity of the arm of the galaxy would be given by + +$$v = \sqrt{\frac{GM(1-H_0\tau)}{r}} = \sqrt{GM\left(\frac{1}{r} + \frac{H_0}{c}\right)}. \quad (4)$$ + +Equation (4) shows a very different character than the expression for the velocity for the time independent gravitational field. This expression shows that the velocity of the galaxy arms should not be expected to drop off as the time independent Newtonian gravitational field does. + +We may look at the 5-dimensional approach of the Dynamic Theory by looking at the Lagrangian + +$$L = \frac{1}{2}mc^2 (\dot{\tau})^2 + \frac{1}{2}m\dot{r}^2 + \frac{1}{2}m(r\dot{\theta})^2 + GMm(1-H_o\tau)\frac{e^{-\frac{\lambda}{r}}}{r} \quad (5)$$ + +where the universe time, $\tau$, is treated as another variable and t is the local time. The universe time, $\tau$, becomes a geometrical coordinate that makes the problem local-time independent in five dimensions. + +The time Lagrange equation may then be written as + +$$\frac{d}{ds}\left[\frac{\partial L}{\partial \dot{\tau}}\right] - \frac{\partial L}{\partial \tau} = 0 = \frac{d}{dt}[m\dot{\tau}] + H_o \lambda m \frac{e^{-\frac{\lambda}{r}}}{r}. \quad (6)$$ + +For a spherically symmetric field the radial equation is + +$$\frac{d}{dt}\left[\frac{\partial L}{\partial \dot{r}}\right] - \frac{\partial L}{\partial r} = 0 = \frac{d}{dt}[mr] - mr\dot{\theta}^2 + GMm(1-H_o\tau)\left(1-\frac{\lambda}{r}\right)\frac{e^{-\frac{\lambda}{r}}}{r^2}. \quad (7)$$ + +The third Lagrange equation becomes +---PAGE_BREAK--- + +$$ \frac{d}{dt} \left[ \frac{\partial E}{\partial \dot{\theta}} \right] - \frac{\partial E}{\partial \theta} = 0 = \frac{d}{dt} \left[ m r^2 \dot{\theta} \right]. \quad (8) $$ + +For the problem of spiral galaxy behaviour we may assume the $\lambda << r$ and write the equations of motion as + +$$ \ddot{\tau} = -\frac{H_o \lambda}{r}, \quad (9) $$ + +$$ (\ddot{r} - r\dot{\theta})^2 = -(1-H_o\tau)\frac{GM}{r^2}\left(1-\frac{\lambda}{r}\right) \quad (10) $$ + +and + +$$ \ddot{\theta} + \frac{2}{r}\dot{r}\dot{\theta} = 0. \quad (11) $$ + +If we now look at uniform circular motion we find that Equation (9) becomes + +$$ \ddot{\tau} = -\frac{H_o \lambda}{r} = \text{constant} \Rightarrow \frac{d\tau}{dt} = -\frac{H_o \lambda}{r} \quad (12) $$ + +so that this may be integrated to get + +$$ \dot{\tau} = \dot{\tau}_o - \frac{H_o \lambda}{r} (t-t_o) \quad (13) $$ + +which may be integrated again to get + +$$ \tau = \tau_o - \frac{H_o \lambda}{2r} t^2 + \left( \dot{\tau}_o + \frac{H_o \lambda}{r} t_o \right) t. \quad (14) $$ + +Also for the assumed uniform circular motion Equation (10) may be written as +---PAGE_BREAK--- + +$$v^2 = (1 - H_o \tau) \frac{GM}{r} \left(1 - \frac{\lambda}{r}\right) \quad (15)$$ + +where $v$ is the tangential velocity of the uniform circular motion. Putting Equation (14) into Equation (15) obtains + +$$v^2 = \frac{GM}{r} \left\{ 1 - H_o \tau_o + \frac{H_o^2 \lambda}{2r} t^2 - \left( H_o \tau_o + \frac{H_o^2 \lambda}{r} t_o \right) t \right\} \left( 1 - \frac{\lambda}{r} \right). (16)$$ + +We must keep in mind there are two times to be considered. First there is the time it takes for the gravitational change to travel from the center of the galaxy to the point of measurement in the galaxy arm. The second time is for the light signal to travel from the galaxy to the Earth. + +Let us set $\tau_o = 0$ and $t_o = 0$ at the point in time when the light left the star on its way toward Earth. Now our Equation (16) becomes + +$$v^2 \approx \frac{GM}{r} \left\{ 1 + \frac{H_o^2 \lambda}{2r} t^2 - H_o \tau_o t \right\} \left(1 - \frac{\lambda}{r}\right). \quad (17)$$ + +Time runs from the time the gravitational signal left the center of the galaxy at + +$$t = \frac{-r}{c}, \qquad (18)$$ + +where $r$ is the distance from the center of the galaxy. Using Equation (18) in Equation (17) we find + +$$v^2 \approx \frac{GM}{r} \left\{ 1 + \frac{H_o \tau_o r}{c} \left( 1 - \frac{H_o^2 \lambda}{2c^2} \right) \right\} \approx \frac{GM}{r} \left\{ 1 + \frac{H_o \tau_o r}{c} \right\}. \quad (19)$$ + +Now we need to establish a value for $\tau_o$. Look at the energy at time $t=0$ and $\tau=0$ with $r \gg \lambda$, or +---PAGE_BREAK--- + +$$E_o = \frac{1}{2}mc^2(\dot{\tau}_o)^2 + \frac{1}{2}mv^2 - \frac{GMm}{r_o} \quad (20)$$ + +This may be rewritten as + +$$\frac{2E_o}{mc^2} = (\dot{\tau}_o)^2 + \frac{v^2}{c^2} - \frac{\lambda}{r_o} \quad (21)$$ + +Since the tangential velocities are non-relativistic this requires that + +$$\dot{\tau}_o = \sqrt{\frac{2E_o}{mc^2} + \frac{\lambda}{r_o}} \quad (22)$$ + +Equation (22) shows that the initial conditions establish the point at which the tangential velocities begin to differ from those predicted by Newtonian gravity. In the absence of a means of evaluating the initial conditions we may turn to experimental results. First, suppose we write the acceleration in the arms of the galaxy as + +$$a = a_N \left\{ 1 - \frac{H_o^2 \lambda}{2r} t^2 - H_o \dot{\tau}_o t \right\} . \quad (23) \\ \approx a_N \left\{ 1 + H_o \dot{\tau}_o \frac{r}{c} \right\}$$ + +We now use the data that shows the acceleration begins to deviate from Newtonian when the acceleration drops to a value of $1.2 \times 10^{-10}$ m/sec² so that + +$$a_N = \frac{GM}{r_c^2} \approx 1.2 \times 10^{-10} \Rightarrow r_c \approx \sqrt{\frac{GM}{1.2 \times 10^{-10}}} \quad (24)$$ + +Then requiring +---PAGE_BREAK--- + +$$ \dot{\tau}_o = \frac{c}{r_c H_o} \qquad (25) $$ + +sets a value of $\dot{\tau}_o$ in keeping with the data. Equation (23) becomes + +$$ a \approx a_N \left\{ 1 + \frac{r}{r_c} \right\} \qquad (26) $$ + +where we see the short range Newtonian acceleration and the long range acceleration predicted by MOND. + +It should be noted that the approximate linearity of the tangential velocity with respect to time of Equations (17) and (19) displays an independence of the time it takes for light to travel from the galaxy to Earth. This apparent independence of time masks the fact that the gravitational strength of the galaxy, relative to the current epoch, depends upon the time of light travel to Earth. + +## Dark Energy + +Data displaying evidence that provided the beginning of the hypothesized dark energy was first presented in 1998 [11]. To date no fundamental theory has had success in explaining these data. + +The universe expansion factor is taken from general relativity and is + +$$ \frac{\ddot{a}}{a} = -\frac{4}{3}\pi G \left( \rho + 3\frac{p}{c^2} \right). \qquad (27) $$ + +The mean density and pressure are currently taken to include dark energy and are taken to obey the local conservation of energy relation + +$$ \dot{\rho} = -3\frac{\dot{a}}{a}\left(\rho + \frac{p}{c^2}\right). \qquad (28) $$ +---PAGE_BREAK--- + +The first integral of Equations (27) and (28) is the Friedman equation + +$$ \dot{a}^2 = \frac{8}{3}\pi G\rho a^2 + \text{constant.} \quad (29) $$ + +But consider what happens if one wishes to compare this with the cosmology produced by the non-singular, time dependent, gravitational gauge potential. Then Equation (27) becomes + +$$ m_g \frac{d^2 x}{dt^2} = \frac{4\pi}{3} \frac{x^3 \rho(t) G m_g}{x^2} \left(1 - \frac{\lambda}{x}\right) (1 - H_o \tau) e^{-\frac{\lambda}{x}} , \quad (30) \\ = \frac{4\pi}{3} G \rho(t) x \left(1 - \frac{\lambda}{x}\right) (1 - H_o \tau) e^{-\frac{\lambda}{x}} $$ + +where $\tau$ is the universe time. + +Now let us replace $x$ with the co-moving coordinate $x=R(t)r$ where $R(t)$ is the scale factor of the universe and $r$ is the co-moving distance coordinate as is done in the standard model. When we also normalize the density to its value at the present epoch, $\rho_o$, by $\rho(t)=\rho_o R^{-3}(t)$ we obtain + +$$ \frac{d^2 R}{dt^2} = \frac{4\pi G \rho R}{3} \left( 1 - \frac{\frac{\lambda}{r}}{R} \right) (1 - H_o \tau) e^{-\frac{\lambda}{R}} . \quad (31) $$ + +If we multiply Equation (31) by $dR/dt$ and integrate with respect to time we find + +$$ \int \dot{R} \frac{d^2 R}{dt^2} dt = \frac{4\pi G \rho}{3} \int (1 - H_o \tau) R \dot{R} dt \quad (32) \\ \frac{\dot{R}^2}{2} - \frac{R_o^2}{2} = \frac{4\pi G \rho}{3} \int (1 - H_o \tau) R dR $$ +---PAGE_BREAK--- + +We now need to know how to integrate the right hand side of Equation (32). Suppose we consider the time it takes for light to travel from the distant star to Earth, or $t = -\frac{a}{c}$, where a is the distance from the star to Earth and the minus sign comes from looking backwards in time. The radius of the universe now has two parts. The first part is the radius of the universe when the light left the star on its journey to the Earth. Let this time be $R_0$. Thus we see that + +$$R = R_0 + a \quad (33)$$ + +and + +$$\dot{R} = \dot{a} \quad (34)$$ + +Further, from considerations of the dark matter it was determined that the world time was given by + +$$\tau = \tau_o - \frac{H_o GM}{2c^2 R} t^2 + \left( \dot{\tau}_o + \frac{H_o GM}{c^2 R} t_o \right) t. \quad (35)$$ + +When we set both initial times to zero and use the value of + +$$\lambda_U = \frac{GM}{c^2}, \quad (36)$$ + +Equation (35) becomes + +$$\tau = -\frac{H_o \lambda_U}{2R} t^2 + \dot{\tau}_o t. \quad (37)$$ + +Now we find that Equation (32) may be written as + +$$\dot{a}^2 = \frac{8\pi G\rho}{6c^2} \left\{ \begin{aligned} & 2R_0 c^2 a + (c^2 + H_0 \dot{\tau}_o c R_0) a^2 \\ & + \frac{1}{3} (H_0^2 \lambda_U + H_0 \dot{\tau}_o 2c) a^3 \end{aligned} \right\} + K. \quad (38)$$ +---PAGE_BREAK--- + +If we set the constant of integration, $K$, to zero, then Equation (38) becomes + +$$ \dot{a}^2 = H_o^2 \Omega'_M \left\{ R_o a + \frac{1}{2} \left( 1 + \frac{H_o \dot{\tau}_o R_o}{c} \right) a^2 + \frac{1}{6} \left( \frac{H_o^2 \lambda_U}{c^2} + \frac{2H_o \dot{\tau}_o}{c} \right) a^3 \right\} . \quad (39) $$ + +where we have used the definitions + +$$ \rho_c = \frac{3H_o^2}{8\pi G}, \quad \text{and} \quad \Omega'_M = \frac{\rho_c}{\rho_c}. \qquad (40) $$ + +In Equation (39) we find that the mass density term splits into three terms for a time-dependent gravitational field. For a time-independent gravitational field there was only one term. + +An interesting aspect of Equation (39) is that the two new mass terms both involve the same time dependence factor as the one that causes the tangential velocity of the arms of spiral galaxies to differ from Newtonian behaviour. That is to say that should the two new terms provide a basis for the current experimental evidence for dark energy it comes from the same source as the basis for dark matter. The time dependence of the gravitational field explains both phenomena. + +Consider Equation (39) again and add the usual term for radiation so that we find + +$$ \left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \begin{aligned} & \Omega'_M \left[ \frac{R_o}{a} + \frac{1}{2} \left( 1 + \frac{H_o \dot{\tau}_o R_o}{c} \right) + \frac{1}{6} \left( \frac{H_o^2 \lambda_U}{c^2} + \frac{2H_o \dot{\tau}_o}{c} \right) a \right] \\ & + \Omega_{RO} \end{aligned} \right\} \quad (41) $$ + +where we did not add a term for the cosmological constant. If this is to compare with the usual expression we could write +---PAGE_BREAK--- + +$$ \left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \begin{aligned} & \Omega_M' (1+z)^3 + \Omega_{DM}' (1+z)^3 + \Omega_{DE}' (1+z)^3 \\ & + \Omega_{RO} (1+z)^4 \end{aligned} \right\} \quad (42) $$ + +wherein the sum of the terms are taken to be unity at $z=0$ and the integration constant has been taken to be zero. Equation (41) and (42) would require + +$$ \Omega_M' \left[ \frac{R_o}{a} + \frac{1}{2} \left( 1 + \frac{H_o \dot{\tau}_o R_o}{c} \right) + \frac{1}{6} \left( \frac{H_o^2 \lambda_U}{c^2} + \frac{2H_o \dot{\tau}_o}{c} \right) a \right] + \Omega_{RO} = 1. \quad (43) $$ + +The relation between the red shift and a is + +$$ 1+z=\left[\frac{a(t_{\text{obs}})}{a(t_{\text{em}})}\right]=\frac{R_o+a}{R_o} \quad (44) $$ + +By putting Equation (44) into (41) we find + +$$ \left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \begin{aligned} & \Omega_M' \left[ z + \frac{1}{2} \left( 1 + \frac{H_o \dot{\tau}_o c^3 a}{z c^4} \right) + \frac{1}{6} \left( \frac{H_o^2 G M}{c^4} + \frac{2 H_o \dot{\tau}_o}{c} \right) a \right] \\ & + \Omega_{RO} \end{aligned} \right\} \quad (45) $$ + +The fact that these terms have expressions relating them argues that their relative values may be determined. + +For example, if $\Omega_{RO}$ is taken to be small compared with the mass terms and $\Omega_M$ is set at the typical value of 0.25, then we would require + +$$ \dot{\tau}_o = \left( \frac{z c \left( 21 - 6z - \frac{H_o^2 \lambda_U a}{c^2} \right)}{H_o a (3+2z)} \right). \quad (46) $$ +---PAGE_BREAK--- + +Since the source of the light being measured left its origin some time +after the universe completed the exponential inflationary expansion +early in universe time, we would have + +$$ +\dot{\tau}_o = \left( \frac{z c 3 (7 - 2z)}{H_o a (3 + 2z)} \right). \qquad (47) +$$ + +Putting this back into Equation (45) we find + +$$ +\left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \Omega'_M \left[ z + \frac{12-2z}{3+2z} + \frac{z(7-2z)}{3+2z} \right] + \Omega_{RO} \right\}. \quad (48) +$$ + +There are three terms for the mass with different functions of z. +Now we would have + +$$ +\Omega'_{M} = \frac{(3+2z)}{4(3+2z)} = 0.25 \tag{49} +$$ + +as set above and we can then evaluate each term when z=0. Let us +associate the middle term with ΩM, the first term with ΩDMO and the +remaining term with ΩDEO. We would then have the values + +$$ +\begin{align*} +(\Omega_M]_{z=0} &= \left( \Omega'_M \frac{12-2z}{3+2z} \right]_{z=0} = 1 \\ +(\Omega_{DM}]_{z=0} &= (\Omega'_M z]_{z=0} = 0 \tag{50} \\ +(\Omega_{DE}]_{z=0} &= \left( \Omega'_M \frac{z(7-2z)}{3+2z} \right]_{z=0} = 0 +\end{align*} +$$ + +for z=0. + +Our overall equation would then be +---PAGE_BREAK--- + +$$ \left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \left[ \Omega_M z + \Omega_M \frac{12-2z}{3+2z} + \Omega_M \frac{z(7-2z)}{3+2z} \right] + \Omega_{RO} \right\}, (51) $$ + +where $Ω_M$ varies as $(1+z)^3$. + +## Comparing with Experiment + +The expansion of the universe means the distance between two distant galaxies varies with time as + +$$ L(t) \propto a(t). \qquad (52) $$ + +The rate of change of the distance is the speed + +$$ v = \frac{dl}{dt} = Hl, \quad H = \frac{a}{\dot{a}} \qquad (53) $$ + +where H is the time dependent Hubble parameter. + +A method of measuring of the expansion of the universe comes from measuring the shift of frequencies of light, the red shift, coming from distant stars. The observed wave length, $λ_r$, of a feature in the spectrum that had wavelength $λ_e$ at emission is given by the relation + +$$ 1+z = \frac{\lambda_r}{\lambda_e} = \frac{a(t_r)}{a(t_e)}. \qquad (54) $$ + +When the velocity is given by $cz$ then Hubble's law is written as + +$$ cz = Hl \qquad (55) $$ + +from which we see that + +$$ z = \frac{HL}{c}, \text{ or } H = \frac{cz}{L}. \qquad (56) $$ +---PAGE_BREAK--- + +An additional feature of the new theory presented here is the expression for the red shift of light from distant stars. This has been shown to be [12] + +$$ z_{\text{exp}} = \frac{\Delta\lambda}{\lambda_e} = \exp\left\{ \left( \frac{-G}{c^2} \right) \left[ \frac{M_r e^{-\frac{\lambda_r}{R_r}}}{R_r} - \frac{M_e e^{-\frac{\lambda_e}{R_e}}}{R_e} \right] + \left( \frac{HL}{c} \right) \frac{\left( \frac{M_r}{R_r} \right)}{\left( \frac{M_E}{R_E} \right)} \right\} - 1, (57) $$ + +where the subscript r designates values at the time and point of reception, the subscript e represents values at the time and point of emission and the quantities, $M_E$ and $R_E$, represent the mass and mean radius of the Earth. We have also used the subscript 'exp' on the red shift to indicate that it is the experimental value of red shift measured at the receiving location. + +There are two parts to the red shift. One part is due to the gravitational fields at the points and time of emission and reception and the other part is due to the travel time between emission and reception. This is the part that involves the expansion of the universe. Therefore let us rewrite Equation (57) as + +$$ z_{\text{exp}} = \exp\left\{ \left( \frac{-G}{c^2} \right) \left[ \frac{M_r e^{-\frac{\lambda_r}{R_r}}}{R_r} - \frac{M_e e^{-\frac{\lambda_e}{R_e}}}{R_e} \right] \right\} \exp\left\{ \left( \frac{HL}{c} \right) \frac{\left( \frac{M_r}{R_r} \right)}{\left( \frac{M_E}{R_E} \right)} \right\} - 1, (58) $$ + +and then rearrange it to get +---PAGE_BREAK--- + +$$ z \equiv \frac{HL}{c} = \left\{ \left( \frac{G}{c^2} \right) \left[ \frac{M_r e^{-\frac{\lambda_r}{R_r}}}{R_r} - \frac{M_e e^{-\frac{\lambda_e}{R_e}}}{R_e} \right] + \log (1 + z_{\text{exp}}) \right\} \left\{ \frac{\frac{M_E}{R_E}}{\frac{M_r}{R_r}} \right\} \quad (59) $$ + +which is the red shift of the universe expansion. + +Two simplifications may now be made. First, in many cases the gravitational component of the experimental red shift may be ignored. Secondly, if we are only using the red shift data measured at the Earth's surface then Equation (59) reduces to + +$$ z \equiv \frac{HL}{c} = \log(1 + z_{\text{exp}}). \qquad (60) $$ + +This is the red shift value to be used in the expansion velocity of the universe, Equation (51), so that we may write + +$$ \left(\frac{\dot{a}}{a}\right)^2 = H_o^2 \left\{ \begin{aligned} & 0.25(1+\log(1+z_{\text{exp}}))^3 \log(1+z_{\text{exp}}) \\ & +0.25(1+\log(1+z_{\text{exp}}))^3 \frac{12-2\log(1+z_{\text{exp}})}{(3+2\log(1+z_{\text{exp}}))} \\ & +0.25(1+\log(1+z_{\text{exp}}))^3 \frac{z(7-2\log(1+z_{\text{exp}}))}{(3+2\log(1+z_{\text{exp}}))} \end{aligned} \right\} + \Omega_{RO} \quad (61) $$ + +If one can simultaneously measure the red shift and the distance to the object then Equation (56) gives a value of Hubble's parameter +---PAGE_BREAK--- + +that may be used in Equation (53) to get the universe expansion velocity. + +Standard Candles + +One reason for choosing the Type Ia supernova in the universe expansion research is the assumption that the mass of this type supernova are all the same; roughly the Chandrasekhar Limit mass of 1.39 solar masses. However, a time dependent gravitational field causes this limit to change with time. This may be seen by considering the Newtonian equation of hydrostatic equilibrium known as the Tolman-Oppenheimer-Volkov [TOV] equation, or + +$$ \frac{dp}{dr} = \frac{-GM(r)(1 - H_o\tau)\rho}{r^2}. \qquad (62) $$ + +The gravitational field that is holding the star together against the internal pressure is diminishing in time. This means that the limiting mass increases in time. Supernova found closer to Earth will have more mass and therefore greater luminosity, than more distant supernova. A reduction in luminosity from the assumed constancy would show up in an analysis by making the more distant supernova appear further away than it really is. The natural conclusion, based on the time-independent gravitational field that produces the constant Chandrasekhar limiting mass, would be that the expansion of the universe is accelerating. + +Using the Virial Theorem development by Collins [13] who arrives at the Chandrasekhar limiting mass with the equation + +$$ \frac{R_o}{\left(\frac{2GM}{c^2}\right)} > 228 \left(\frac{M_{\text{Sun}}}{M}\right)^{\frac{4}{9}} \approx 200 \qquad (63) $$ +---PAGE_BREAK--- + +the time dependent gravitational field requires that this relation +become + +$$ +\frac{R_o}{\left(\frac{2GM}{c^2}\right)(1-H_o\tau)} > 228 \left(\frac{M_{sun}}{M}\right)^{\frac{4}{9}} \approx 200. \quad (64) +$$ + +This gives the limiting mass as + +$$ +M_L = M_{Ch} (1 - H_o \tau)^{-\frac{9}{4}}, \qquad (65) +$$ + +where $M_{Ch}$ is the Chandrasekhar limiting mass. By differentiating +Equation (65) with respect to universe time we find the limiting mass +for the type Ia supernovae to change according to + +$$ +\frac{dM_L}{d\tau} = \left( \frac{9H_o}{4} \right) M_{Ch} (1 - H_o \tau)^{-\frac{13}{4}} . \quad (66) +$$ + +Conclusions + +A time-dependent gravitational field, that gets weaker in time, shows +the physical effects of this past, stronger field in the dynamics of +spiral galaxies. This weakening gravitational field also shows up in +the analysis of the distances to, and red shift of light from, +supernovas. Here it adds terms to the universe expansion velocity +relations that are not present in the analysis of time-independent +fields. It also changes the luminosity of the supernovas that were +assumed to have constant luminosity. These effects of the time +dependent gravitational field remove the need for hypothesizing new +matter or energy to explain these effects. + +There have been many attempts in the past to find different +solutions to Einstein's field equations and to show how an expanding +universe may be viewed in different ways. Portions of the above may +---PAGE_BREAK--- + +be reminders of prior approaches. Therefore, it may prove useful to point out what is new in this article. + +Fundamentally there are three things that are new in this article. First, the fifth dimension is considered to be a real physical entity. All five dimensional theories that I know of in the past, whether by Kalusa-Klein, Einstein with his many collaborators, and others, did not consider the fifth dimension to be real and, therefore, required several terms in the resulting gauge field equations to be zero. Here these terms are non-zero and require that the gravitational potential and field be time-dependent. Second, this article uses the Weyl Gauge Principle as its basis for quantum theory and this requires that the gravitational potential be a non-singular potential. These two things require the gravitational field to be a time-dependent, non-singular, gauge field not seen previously. The third aspect of the article is that the Weyl Gauge Principle requires that the unit of action be dependent upon the gauge function. This requires the red-shift from distant objects to have an exponential dependence upon both the time and distance between emission and reception. The new red-shift relation becomes important in both dark matter and dark energy predictions because both phenomena are witnessed by red-shifted light. The time dependence, or weakening, of the gravitational field is the major factor in predicting effects interpreted as dark matter. The time dependence of the gravitational field also provides the major factor in predictions with respect to dark energy as it is responsible for the diminishing of the luminosity of the distant supernovas used as standard candles and the expression for the expansion of the universe. + +**References:** + +[1] Williams, P.E., Mechanical Entropy and its Implications, Entropy, 3, 76-115. +http://www.mdpi.org/entropy/list01.htm#new +---PAGE_BREAK--- + +[2] Williams, P. E., 2002, Energy and Entropy as the Fundaments of Theoretical Physics, Entropy, 4, 128-141. +http://www.mdpi.org/entropy/htm/e4040128.htm + +[3] Williams, P. E., 2007, Alternate Communications for Space Travel, Space Technology and Applications International Forum (STAIM-2007), Albuquerque, NM. + +[4] Weyl, H., 1918, Space Time Matter. + +[5] Schrödinger, E., 1922, On a Remarkable Property of the Quantum-Orbits of a Single Electron, Zeit. F. Phys. 12. + +[6] London, F., 1927, Quantum-Mechanical Interpretation of Weyl's Theory, Zeit. F. Phys. 42. + +[7] Zwicky, F., 1937. On the Masses of Nebulae and of Clusters of Nebulae, Astrophysical Journal, Vol. 86, No. 3. + +[8] Milgrom, M., 1983a, ApJ 270, 365. + +[9] Milgrom, M., 1983b, ApJ 270, 371. + +[10] Milgrom, M., 1983c, ApJ 270, 384. + +[11] Riess, et. al., 1998, Observational Evidence from Supernovae for an Accelerating Universe and a Cosmological Constant, Astron.J. 116, 1009-1038. + +[12] Williams, P.E., 2001b, Using the Hubble Telescope to Determine the Split of a Cosmological Object's Redshift in its Gravitational and Distance Parts, Apeiron, Vol. 8, No. 2, +http://redshift.vif.com/JournalFiles/V08NO2PDF/V08N2WIL.pdf + +[13] Collins II, 2003, Virial Theorem in Stellar Astrophysics, +http://ads.harvard.edu/books/1978vtsa.book/ \ No newline at end of file diff --git a/samples_new/texts_merged/5718759.md b/samples_new/texts_merged/5718759.md new file mode 100644 index 0000000000000000000000000000000000000000..e512cd03e178f4db5123eb0163704b4e4acb50bb --- /dev/null +++ b/samples_new/texts_merged/5718759.md @@ -0,0 +1,262 @@ + +---PAGE_BREAK--- + +Probing local density of states near the diffraction limit using nanowaveguide coupled cathode luminescence + +Yoshinori Uemura,¹ Masaru Irita,¹ Yoshikazu Homma,¹ and Mark Sadgrove*¹ + +¹Department of Physics, Faculty of Science, Tokyo University of Science, +1-3 Kagurazaka, Shinjuku-ku, Tokyo 162-8601, Japan* + +The photonic local density of states (PLDOS) determines the light matter interaction strength in nanophotonic devices. For standard dielectric devices, the PLDOS is fundamentally limited by diffraction, but its precise dependence on the size parameter *s* of a device can be non-trivial. Here, we measure the PLDOS dependence on the size parameter in a waveguide using a new technique - nanowaveguide coupled cathode luminescence (CL). We observe that depending on the position within the waveguide cross-section, the effective diffraction limit of the PLDOS varies, and the PLDOS peak shape changes. Our results are of fundamental importance for optimizing coupling to nanophotonic devices, and also open new avenues for spectroscopy based on evanescently coupled CL. + +# I. INTRODUCTION + +The rate of decay of an emitter into a given optical mode is governed by Fermi's golden rule, and is proportional to the photonic local density of states (PLDOS) $\rho$ associated with that mode. A fundamental limit on $\rho$ for nanophotonic devices is the diffraction limit which places a lower bound on the mode size of ~$\lambda/2$ in a given dimension [1]. Dielectric devices with a characteristic size less than this have sub-optimal PLDOS due to redistribution of mode amplitude into the evanescent region - i.e. a loss of mode confinement. An operational definition of the diffraction limit for nanodevices is, therefore, the size at which the PLDOS is maximized. + +An important class of diffraction limited nano devices is that of nanowaveguides, which are used in fields ranging from quantum optics [2] and optomechanics [3] through to particle manipulation [4]. For certain nanowaveguide types, systematic measurement of the photonic local density of states via cathode luminescence (CL) spectroscopy [5-7] has been achieved via leaky modes. In this remarkable technique, depicted in Fig. 1(a), electrons incident on a device induce luminescence, offering essentially tomographic PLDOS reconstruction due to the point-dipole-like excitation provided by the electron beam [8-10]. However, because luminescence is collected in the far-field, the PLDOS of true waveguide modes (which by definition do not couple to radiation modes) cannot be measured in general. Furthermore, although it is well known that an optimal diameter exists for coupling to nanowaveguides [11], no systematic measurement of the diffraction limited behavior of waveguide PLDOS has ever been performed to the best of our knowledge. + +Here, we detect CL emitted into a the fundamental mode of a nanowaveguide (optical fiber taper) as depicted in Fig. 1(b). We use this new technique to characterize hitherto unmeasured aspects of the waveguide mode + +PLDOS. In particular, we measure the PLDOS dependence on the waveguide size parameter $s$ (defined below) around the diffraction limit. Using different electron energies, we probe the PLDOS i) close to the waveguide surface, where the near-field character of the mode is strong, and ii) nearer to the waveguide center where the mode has a standard transverse wave character. These two regimes are shown to exhibit different dependence on the size parameter, and in particular a different effective diffraction limit. These results shed light on a fundamental characteristic of nanowaveguides, and illuminate the subtle nature of the widely used diffraction limit concept for nanohotonic devices. Furthermore, the new method of waveguide-coupled CL promises a novel way to create fiber coupled electrically driven photon sources and probe previously inaccessible characteristics of optical near-fields using the CL technique. + +# II. PRINCIPLE AND METHODS + +The principle of our experiment is shown in Figs. 1(b) and (c). Electrons from a scanning electron microscope (SEM) penetrate a vacuum clad silica fiber (core refractive index $n_{co} = 1.46$) of radius $a$ ($200 \text{ nm} \le a \le 1 \text{ µm}$) to a depth $\delta$ which depends on the electron energy. The electrons induce luminescence in the silica, a portion of which couples directly to the fiber fundamental modes with an intensity that depends on the photonic local density of states of the modes. As shown in Fig. 1(c), for a given value of $\delta$ and a position $y$ along the fiber cross section, the radial position $r$ and angle $\theta$ of the electron stopping position can be defined, with $\phi = \sin^{-1}(y/a)$, $r = \sqrt{y^2 + (a \cos\phi - \delta)^2}$ and $\theta = \pi/2 - \cos^{-1}(y/r)$. In Fig. 1(d), the so-parameterized stopping point of the electrons as a function of $y$ is overlaid on the profile of a fundamental fiber mode for the case where $a = 200 \text{ nm}$, and the CL wavelength is 659 nm for three different values of $\delta$. + +As shown in Fig. 1(e), we assume that the measured light is from incoherent CL [5] which is produced in an ef- + +* mark.sadgrove@rs.tus.ac.jp +---PAGE_BREAK--- + +FIG. 1. Principle of the experiment. (a) Example of a standard cathode luminescence spectroscopy experiment. A resonant mode leaks photons which reach a detector in the far field. (b) Concept of the present work. Electrons are incident on a vacuum clad optical fiber of radius $a$ and CL is detected through the guided mode itself. (c) Electrons incident at a point $(a, \phi)$ penetrate a distance $\delta$ into the fiber to point $(r, \theta)$ and induce cathode luminescence which couples directly to the fiber fundamental mode. (d) Intensity $|e|^2$ of a circularly-polarized fundamental (HE$_{11}$) mode of the fiber with curves showing electron stopping position for $\delta = 10$ nm (solid line), $\delta = 50$ nm (dotted line) and $\delta = 100$ nm (dashed line) (e) Emission model. The energetic electron is assumed to excite an emitter within the fiber silica matrix to a high energy level which then decays by non-radiative processes before emitting a randomly polarized photon into the fiber fundamental mode with propagation constant $\beta$ at a center wavelength near 659 nm. (f) The thick red (magenta) line shows the normalized photonic local density of states $\bar{\gamma}_g$ at the fiber surface (center) nm as a function of the size parameter $s$. Also shown are $v_g/c$ (dotted blue line), and the effective refractive index of the mode $n_{\text{eff}}$ (dotted black line). + +fective off-resonant excitation process in which unpaired oxygen defect centers in the silica [12] are excited to a high energy level which decays non-radiatively before a final radiative transition produces randomly polarized luminescence with a phonon-broadened spectrum. The emission is assumed to occur at the point in the material where the electron comes to a stop, i.e., a distance $\delta$ from the fiber surface. (In fact, the process is more complicated: a cascade of secondary electrons is also created after the primary electron enters the material, and CL can originate from these electrons too. For the 0.5 keV energy used predominantly in this work, this cascade region is approximately 10 nm in diameter. We treat this behavior phenomenologically by treating the electron beam as having a Gaussian distribution of a similar width and convolving this distribution with the PLDOS.) + +Assuming a single mode fiber, the coupled intensity of the CL is proportional to the decay rate $\gamma_g$ into the fundamental fiber modes at the position $\mathbf{r}_0$ in the fiber where CL is generated. In general we may write this relation as $[1, 13] \quad \gamma_g = \frac{2\mu_0\omega_0^2}{\hbar} \text{Im}[\mathbf{p} \cdot \mathbf{G}_T(\mathbf{r}_0, \mathbf{r}_0, \omega_0) \cdot \mathbf{p}]$, where $\omega_0$ is the transition resonant frequency, $\mathbf{p}$ is the dipole moment, and $\mathbf{G}_T$ is the guided mode transverse Green tensor. The imaginary part of the Green tensor may be evaluated $[13, 14]$ yielding $\text{Im}[\mathbf{G}^T(\mathbf{r}_0, \mathbf{r}_0, \omega_0)] = \frac{c^2 \mathbf{e}(\mathbf{r}_0) \mathbf{e}^*(\mathbf{r}_0)}{4\gamma_g \omega_0}$. + +Here, $v_g$ is the mode group velocity and $\mathbf{e}(\mathbf{r}_0)$ is taken to be the normalized mode function of the positive propagating, left hand circular polarized HE$_{11}$ fundamental mode of the fiber. The mode function is normalized according to the condition $1 = \int d^2r n(r)^2 |\mathbf{e}(\mathbf{r}_0)|^2$, where the integral is taken over a plane perpendicular to the fiber axis. The product of mode functions is interpreted as a dyad. Details of the mode functions are given in the Appendix. In our present study, the wavelength of the modes is fixed at $\lambda = 659$ nm, and the value that the mode function takes depends on the fiber radius $a$, at the radial position $\mathbf{r}_0(y, \delta)$. Note that the quantity $|\mathbf{e}(\mathbf{r}_0)|^2$ has units m$^{-2}$ and may be considered to be a dimensionless- energy flux. This should be compared to the usual energy density associated with three dimensionally confined resonant modes. + +By circular symmetry, a randomly polarized dipole couples with the same strength to either of the two orthogonally polarized fundamental modes. We may average over dipole polarization to produce the photonic local density of states associated with the fundamental modes [1] + +$$ \rho_g(s, \mathbf{r}) = \frac{2}{3} \frac{6\omega_0}{\pi c^2} \text{Im}[\text{Tr}[\mathbf{G}(\mathbf{r}_0, \mathbf{r}_0, \omega_0)]] = \frac{|\mathbf{e}(s, \mathbf{r}_0)|^2}{v_g}, \quad (1) $$ + +where the factor of 1/3 arises from the average over dipole +---PAGE_BREAK--- + +orientations, and the factor of 2 arises due to the two possible orthogonal polarizations of the fundamental mode. + +Finally, we see that + +$$ \bar{\gamma}_g = \frac{\pi\omega_0}{3\hbar\epsilon_0} p^2 \rho_g(s, \mathbf{r}), \quad (2) $$ + +where $\bar{\gamma}_g$ is the decay rate into the fundamental modes averaged over polarization, and the dipole moment strength is assumed to be $p = |\mathbf{p}|$ in any direction. Note that for a given $s$, $\rho_g$ contains all the dependence of $\bar{\gamma}_g$ on the fiber mode behavior. Our experimental measurements are of photon count rates through the fiber over some time $\Delta t$. It may be seen that such measurements are proportional to $\bar{\gamma}_g \Delta t \propto \rho_g$. In practice, we normalize both our measurements and the theoretical predictions for $\rho_g$ so that their maxima are equal to unity before comparing them. We denote the so-normalized value of the PLDOS by $\bar{\rho}_g$. + +Because Maxwell's equations are scale free, the functional dependence of the local density of states on the waveguide transverse dimension *a* or the wavelength *λ* are most generally expressed using the dimensionless size parameter $s = ka = (c/\omega_0)a$, where $k = 2\pi/\lambda$. By using a tapered fiber, we allow the measurement of the PLDOS as a function of *s* for fixed *λ* and variable *a*. + +The thick red line in Fig. 1(f) shows the normalized local density of states as a function of *s* just inside the fiber surface. The thick magenta line shows the same calculation made at the fiber center. Also shown are the scaled group velocity of the fundamental mode $v_g/c$ (dotted blue line) and the effective refractive index $n_{\text{eff}}$ for the fundamental mode (dotted black line). It may be seen that peak region of the PLDOS is associated with the transition of $v_g$ from the bulk silica value of $v_g \approx c/1.45$, to $v_g \approx c$ as the fiber mode is dominated by its evanescent component. Note that the maximum value of the unscaled PLDOS at the fiber center is almost three times larger than that just inside the fiber surface. Because the present experiment does not allow us to cleanly measure the relative amplitude of the PLDOS at these two different radial positions, we use the normalized PLDOS and focus on the differences seen in the peak position and peak width. + +The most notable aspect of the PLDOS curves for different radial positions is that the peak value occurs at a different value of *s*. In this sense, the effective diffraction limit of *s* is different depending on where in the fiber cross-section it is measured. This is a generic feature of waveguides (i.e. not just fibers) and occurs due to the behavior of the mode function $|e(\mathbf{r})| = A(s)F(s,r)$, where $A(s)$ is a normalization factor depending only on the size parameter, and $F(s,r)$ is in general a decreasing function of the radial distance $r$ from the fiber center. Broadly speaking, $A(s)$ sets the intensity scale at a given value of $s$ for a fixed optical power, and thus has a peaked structure which gives rise to the diffraction limit. $F(s,r)$ can generally be written in the form $F(ur/a)$, where $u = a\sqrt{n_{\text{co}}^2 k^2 - \beta^2}$ is a dimensionless wavenumber + +which increases monotonically with the waveguide size parameter *s*. As $r/a$ increases, the fall-off in *F* as a function of *u* becomes steeper, leading to the peak of the PLDOS occuring at lower *s*. This is also the reason for the the narrower width of the PLDOS peak when *r* = *a* compared with *r* = 0. More details are given in the supplementary material. In this sense, despite being polarization averaged, the PLDOS near the diffraction limit contains information about the near-field nature of the mode, which is transverse near the fiber center but vectorial in nature at the fiber surface. + +Experimentally, we detect the intensity in the fiber modes by passing a single mode fiber which is adiabatically connected to the fiber taper out of the SEM vacuum via a feedthrough. The fiber can be connected to a spectrum analyzer or a modified Hanbury-Brown-Twiss setup which allows measurement of both polarization and the intensity correlation function $g^{(2)}$. In experiments, we used electron energies of 0.5 keV in a spot excitation configuration, and 2 keV in a sweep excitation configuration. CL emitted into the fiber taper passed through a 630 nm cutoff single mode fiber to ensure that only light in the fundamental modes was collected. Further details of the experiment are given in the Appendix. + +### III. RESULTS + +We now turn to our experimental results. First, we look at general properties of the fiber coupled cathode luminescence. The CL spectrum measured through the guided modes is shown in Fig. 2(a). A Lorentzian curve was fitted to the data and, as indicated, the center wavelength was found to be 659 nm and the full width at half maximum (FWHM) was found to be 28 nm. This spectrum is similar to that seen in silica fibers due to radiation induced defects, or the fiber drawing process itself [12]. The luminescence has been attributed to unpaired oxygen atoms in the silica matrix. + +We also checked the polarization at the fiber output by rotating both a half waveplate and a quarter waveplate before the light entered a polarizing beam splitter, and measuring the output at both ports. For both waveplates, we saw variations in intensity of about ±5% of the mean value, suggesting nearly perfect random polarization. + +Because little is known about the density of defects in silica which produce the observed cathode luminescence, we also measured the count coincidence rate of the CL through the guided modes. The normalized coincidence signal corresponds to the second order correlation function $g^{(2)}(\tau) = \langle n(t)n(t+\tau) \rangle / \langle \langle n(t) \rangle \rangle \langle n(t+\tau) \rangle \rangle$, where *n* denotes photon counts, the coincidence delay is given by $\tau$, and $\langle \cdot \rangle$ denotes a time average. For a single or few emitters, an anti-bunching dip in the coincidence rate is expected at $\tau = 0$. As seen in Fig. 2(b), the measured correlation function shows no sign of antibunching and is consistent with a relatively large number of independent +---PAGE_BREAK--- + +FIG. 2. (a) Measured spectrum of the fiber coupled CL. (b) Measured second-order correlation function $g^{(2)}(\tau)$ for a time difference $\tau$ between detection events. + +photon emitters within the excitation volume. + +Next, we consider scans made of the fiber over its cross section for fiber diameters between 200 and 1000 nm. Fig. 3(a) shows raw count rates (discrete points) joined by lines to guide the eye. It is notable that a large peak is observed at $2a = 400$ nm relative to the other diameters. This is due to the increased mode confinement at this diameter. Fig. 3(b) shows the same experimental results normalized to allow easier comparison. In each case, curves showing values of $\bar{\rho}_g(a, \delta, y)$ for $\delta = 10$ nm convolved with a Gaussian profile with a standard deviation of 10 nm to account for the broad electron cascade process inside the silica. For these curves, we fitted the value of the amplitude and center position to the data. The fiber diameter was set to its experimentally measured value in the theory. Note that the colors of the points and curves correspond to the data shown in the same color in Fig. 3(a). Error bars show $\pm 1$ standard deviation over ten intensity measurements. + +The data show that the CL intensity varies only slowly across the fiber cross section. This is expected considering the circular symmetry of the coupling, i.e., a randomly polarized emitter should couple with the same strength to the fundamental modes at any position within the fiber that is a constant radial distance from its center. However, due to the stopping position on the x axis being dependent on y, the distance from the fiber center at which CL occurs changes with the change becoming larger as the penetration depth increases. + +Finally, we measured the waveguide coupled CL at different diameters using beam spot illumination at 0.5 keV ($\delta \approx 10$ nm [15]) and 2 keV ($\delta \approx 175$ nm [15]). Results of these measurements are shown in Fig. 4. The PLDOS curve is calculated at $y=0$ for the respective values of $\delta$ given above. The experimental results show generally good qualitative and quantitative agreement with the calculated PLDOS curve. In particular, the difference in the PLDOS peak position and the difference in the peak widths is clearly reproduced by the data. For the 0.5 keV data, we observe a peak at $s = 1.4$ whereas for 2.0 keV the peak occurs at $s = 1.9$. This corresponds to a difference in radius of 100 nm. + +#### IV. DISCUSSION + +In this work, we defined the PLDOS for the fundamental mode of an optical fiber and experimentally evaluated the PLDOS by measuring CL coupled directly to the fiber fundamental modes. Using this technique, we made the first complete measurements of the PLDOS dependence on the size parameter around the diffraction limit. We clearly demonstrated the different PLDOS behavior for points near the fiber surface and nearer to the fiber center. Although previous CL measurements of photonic crystal waveguide modes do exist, they have relied on intrinsic losses or leaky modes which coupled to the far field [7]. Likewise, although the coupling efficiency from point emitters to the modes of a fiber has been measured, these measurements suffered from large systematic errors and did not reveal the full behavior of the PLDOS itself [16]. In contrast we are able to clearly measure the difference in PLDOS behavior near the fiber surface and nearer to the fiber center even though the respective PLDOS peak positions differ by a fiber radius of just 100 nm. + +This work successfully enlarges the domain in which CL spectroscopy may be applied, from its original application to modes with a radiative component to the case of completely bound photonic states of which the modes of a waveguide are one example. It should also be possible to use our technique to couple electron beam induced luminescence from more general non-radiative modes which do not couple to the far field. Such modes can couple via the evanescent field of the optical fiber taper to its guided modes and thus be detected as in the present experiment, opening up CL spectroscopy to regimes which could traditionally only be measured using electron energy loss (EEL) methods. Due to the much less rigorous requirements for sample preparation and electron beam energy required for CL spectroscopy as compared with EEL spectroscopy, this is a significant addition to the electron spectroscopy toolbox. + +In terms of applications typical fiber coupled photon sources up to now have used optically excited emitters [16–18]. Our method should provide a new route to achieving waveguide-coupled, electrically driven photon sources [19–21]. In particular, the ability to simultaneously image the nanostructure surface and excite fiber coupled cathode luminescence will allow a more deterministic approach even for non-deterministically assembled composite nanodevices created by combining nanowaveguides with colloidal nanocrystals. + +For the above reasons, we believe that the technique detailed here can open new opportunities to study fundamental aspects of nano-optics by measuring PLDOS through waveguide modes, while also providing a new platform for applications. + +This work was supported by the Nano-Quantum Information Research Division of Tokyo University of Science. Part of this work was supported by JST CREST (Grant Number JPMJCR18I5). +---PAGE_BREAK--- + +FIG. 3. Spot scans perpendicular to the optical fiber axis for electron energies of 0.5 keV. (a) Shows unnormalized data (discrete points) for five different fiber diameters with lines connecting points to guide the eye. (b) Shows the same data normalized and fitted by $\bar{p}_g(a, \delta, y)$ convolved with a Gaussian beam profile. From top to bottom, the data shown is for $2a = 200, 400, 600, 800,$ and $1000$ nm. Theoretical curves for $\delta = 10$ nm are shown for each case. + +FIG. 4. Measurement of relative PLDOS as a function of diameter. Circles show measurements made using a stationary electron beam of energy 0.5 keV at the fiber center. The measurements shown are the averaged raw data, with error bars showing the standard deviation over ten separate measurements. The red curve shows $\bar{p}_g(a, \delta = 10 \text{ nm}, y = 0)$. Triangles show similar measurements, but for a beam energy of 2.0 keV, which corresponds to $\delta = 175$ nm. The theoretical value of $\bar{p}$ in this case is shown by the magenta curve. + +## Appendix A: Fiber guided modes + +Treatments of the guided modes of step-index optical fibers may be found in a number of places [22, 23]. For convenience, we present a treatment of the mode functions that follows references [11, 24]. + +The wave equation in cylindrical coordinates for the z component of an electromagnetic mode $E(r, \phi)$ propagating along the z-axis with radial coordinate r and azimuthal coordinate $\phi$ is + +$$ \frac{\partial^2 E_z}{\partial r^2} + \frac{1}{r} \frac{\partial E_z}{\partial r} + \frac{1}{r^2} \frac{\partial^2 E_z}{\partial \phi^2} + [k^2 n^2 - \beta^2] E_z = 0, \quad (\text{A1}) $$ + +where $k = 2\pi/\lambda$ is the free space wave number, $n = n(r)$ is the refractive index, and $\beta$ is the mode propagation constant. Setting $E(r, \phi) = e(r)e_{\phi}(\phi)$, and taking $e_{\phi}(\phi) = \exp(im\phi)$ (requiring integer $m$), the radial wave equation is found to be + +$$ \frac{\partial^2 e_z}{\partial r^2} + \frac{1}{r} \frac{\partial e_z}{\partial r} + \left[ \chi^2 - \frac{m^2}{r^2} \right] e_z = 0, \quad (\text{A2}) $$ + +where $\chi^2 = k^2 n^2 - \beta^2$. Specializing to a step index fiber of radius $a$ where the core index is $n_{\text{co}}$ and the cladding index is $n_{\text{cl}}$, we split $\chi^2$ into two cases: $h^2 = k^2 n_{\text{co}}^2 - \beta^2$ in the core, and $q^2 = \beta^2 - k^2 n_{\text{cl}}^2$ in the cladding. Full consideration of boundary conditions restricts the solutions to + +$$ e_z = A \frac{2q K_m(qa)}{\beta J_m(qa)} J_m(qr), \quad r \le a, \quad (\text{A3}) $$ + +and + +$$ e_z = A \frac{2q}{\beta} K_m(qr), \quad r > a, \quad (\text{A4}) $$ + +for an arbitrary amplitude $A$. It can be shown that the radial and azimuthal components can be derived from $e_z$. $J_m$ and $K_m$ are Bessel functions of the first kind and modified Bessel functions of the second kind respectively, with order $m$. + +Restricting ourselves to the fundamental mode with $m=1$, and taking a clockwise circular polarization, the mode function components are + +$$ e_r = iA \frac{q K_1(qa)}{h J_1(qa)} [(1-s)J_0(hr) - (1+s)J_2(hr)] $$ + +$$ e_{\phi} = -A \frac{q K_1(qa)}{h J_1(qa)} [(1-s)J_0(hr) - (1+s)J_2(hr)] $$ + +$$ e_z = A \frac{2q K_1(qa)}{\beta J_1(qa)} J_1(qr) $$ +---PAGE_BREAK--- + +in the core and + +$$ +\begin{align*} +e_r &= iA[(1-s)K_0(hr) - (1+s)K_2(hr)] \\ +e_\phi &= -A[(1-s)K_0(hr) - (1+s)K_2(hr)] \\ +e_z &= A \frac{2q}{\beta} K_1(qr) +\end{align*} +$$ + +in the cladding. Here, we have $s = (1/q^2a^2 + 1/h^2a^2)/(J_1'(ha)/haJ_1(ha) + K_1'(qa)/qaK_1(qa))$. + +To produce the mode functions, we choose $A$ so that $\int d^2rn(r)^2|e|^2=1$, where the integral is taken over the entire $r-\phi$ plane. For brevity, we omit the expression for the integral, along with the eigenvalue equation required to find $\beta$. The appropriate expressions may be found elsewhere [11, 24]. We note that the left hand side of the normalization condition is related to but not identical to the mode power. + +Inside the fiber core (as is the case in the current work) we find + +$$ +\begin{align*} +|\mathbf{e}|^2 &= |e_r|^2 + |e_\phi|^2 + |e_z|^2 \\ +&= 2A^2 \frac{q^2 K_1^2(qa)}{h^2 J_1^2(ha)} \left[ (1-s)^2 J_0^2(hr) + \frac{h^2}{\beta^2} J_1^2(hr) + (1+s)^2 J_2^2(hr) \right]. +\end{align*} +$$ + +In order to make clearer the contributions to the PL-DOS, we divide the mode function intensity into r inde- +pendent and dependent parts as follows: + +$$ +|\mathbf{e}|^2 = A^2(k, a)F^2(k, a, r), \quad (\text{A5}) +$$ + +where + +$$ +A^2(k,a) = A^2 \frac{q^2 K_1^2(qa)}{h^2 J_1^2(ha)} +$$ + +and + +$$ +F(k, a, r) = (1-s)^2 J_0^2(\mathrm{ur}/a) + \frac{h^2}{\beta^2} J_1^2(\mathrm{ur}/a) + (1+s)^2 J_2^2(\mathrm{ur}/a), +$$ + +where $u = ah$. + +From Fig. 5, it may be seen that $A(k, a)$ (black curve) has a peaked form and is responsible for the overall shape of the PLDOS, as discussed in the main text. $F(k, a, r)$ for a set value of $r/a$ is a decaying function of the size parameter $s$, with the decay rate being smaller at the fiber center ($r=0$, magenta line in Fig. 5) than at the fiber surface ($r=a$, red line in Fig. 5). When multiplied by $A(k, a)$, this behavior of the $F$ function explains both the shift in the PLDOS peak depending on $r$ and the width of the PLDOS peak. + +**Appendix B: Details of the experiment** + +The experimental setup is depicted schematically in +Fig. 6. We used the electron beam of a scanning electron +microscope (LEO 1530VP, Carl Zeiss) to excite CL in +our sample. The sample chamber was evacuated with + +FIG. 5. $A(k,a)$ (black curve), $F(k,a,r=0)$ (magenta curve) and $F(k,a,r=a)$ (red curve). + +a turbo-molecular pump down to $1 \times 10^{-3}$ Pa. The primary-electron column is a Gemini type which achieves high resolution for low energy electrons compared to a conventional SEM [25]. A schottky field emission electron source (SFE) is installed in the SEM gun chamber. The SFE has a very low beam noise and notable long term beam current stability. Primary SEM observations were made in an electron energy range of 0.5 – 2.0 keV. The beam current was measured using a Faraday cup yielding approximately 40 pA. The electron beam profile was evaluated using Au-Pd coated polystyrene latex spheres, of 90 nm in diameter [26, 27]. The spatial resolution (20/80% edge profile) was about 5 nm in the electron energy range used in the experiment. The electron beam was used to excite luminescence in an optical fiber taper (see below) using either a stationary spot excitation mode, or a sweep excitation mode, where the electron beam was scanned over the fiber, allowing imaging by detection of secondary electrons. + +Regarding the optical setup, the tapered fiber was manufactured from a commercial single mode fiber (780 HP) using a heat and pull technique [28]. Tapered fibers used in the experiment had a transmission of at least 90% and a typical transmission of 95%. The fiber was mounted in the SEM and its output was spliced to a standard optical fiber which passed out of the SEM through a homemade feedthrough system [29]. Regarding the mounting of the fiber taper: we used a UV cured adhesive to fix the fiber to an aluminium mount at two points maximally far from the taper center. To suppress vibrations of the fiber, we also added adhesive to one side of the taper closer to the taper center, meaning that fluorescence could only be measured through one of the fiber outputs, due to strong absorption and scattering caused by the adhesive. We note that CL can still be induced in the event of fiber vibrations, but precise measurement of the fiber diameter, as required for the current experiment, is difficult. + +For CL spectrum observation, the output fiber was +connected to a spectrometer (ACTON Spectra Pro 2300, + + +---PAGE_BREAK--- + +FIG. 6. Experimental setup. Electrons produced by SEM gun are focussed and incident on an tapered, vacuum clad optical fiber which is mounted in the SEM vacuum chamber. The optical fiber tapers adiabatically into a standard optical fiber which passes through a feedthrough and can be connected to one of two measurement systems. Measurement system 1 allows the measurement of the CL spectrum. Measurement system 2 allows the measurement of CL intensity, polarization and the correlation of CL photons. Acronyms used are explained in the Key. + +Princeton Instruments) equipped with a CCD detector (Pixis 100BR, Princeton Instruments) to measure the wavelength as depicted by Fig. 6, Measurement System 1. In order to measure the intensity of CL, photon polarization, and photon correlations, we used Measurement System 2 as shown in Fig. 6. We used a fiber u-bench setup with a polarizing beam splitter installed whose outputs were coupled to multimode fibers which were in turn connected to single photon counting mod- + +ules (SPCM-AQRH-14-FC, Excelitas). Count rates and photon correlation measurements were made using a two channel counter / correlator (TimeTagger20, Swabian Instruments). + +Note that in all optical detection experiments, we spliced the output of the main fiber (780HP), single mode above 780 nm in wavelength) to a fiber which was single-mode at our operating wavelength (630HP) in order to guarantee that we only measured light coupled to the fundamental mode of the fiber. + +[1] L. Novotny and B. Hecht, *Principles of nano-optics* (Cambridge university press, 2012). + +[2] I. Aharonovich, D. Englund, and M. Toth, Nature Photonics **10**, 631 (2016). + +[3] B. Khanaliloo, H. Jayakumar, A. C. Hryciw, D. P. Lake, H. Kaviani, and P. E. Barclay, Physical Review X **5**, 041051 (2015). + +[4] A. H. Yang, S. D. Moore, B. S. Schmidt, M. Klug, M. Lipson, and D. Erickson, Nature **457**, 71 (2009). + +[5] F. G. De Abajo, Reviews of modern physics **82**, 209 (2010). + +[6] A. Polman, M. Kociak, and F. J. G. de Abajo, Nature materials **18**, 1158 (2019). + +[7] B. J. Brenny, D. M. Beggs, R. E. van der Wel, L. Kuipers, and A. Polman, ACS Photonics **3**, 2112 (2016). + +[8] A. C. Atre, B. J. Brenny, T. Coenen, A. García-Etxarri, A. Polman, and J. A. Dionne, Nature nanotechnology **10**, 429 (2015). + +[9] R. Sapienza, T. Coenen, J. Renger, M. Kuttge, N. Van Hulst, and A. Polman, Nature materials **11**, 781 (2012). + +[10] A. Hörl, G. Haberfehlner, A. Trügler, F.-P. Schmidt, U. Hohenester, and G. Kothleitner, Nature communications **8**, 1 (2017). + +[11] F. Le Kien, S. D. Gupta, V. Balykin, and K. Hakuta, Physical Review A **72**, 032509 (2005). + +[12] G. Sigel Jr and M. Marrone, Journal of Non-Crystalline Solids **45**, 235 (1981). + +[13] T. Søndergaard and B. Tromborg, Physical Review A **64**, 033812 (2001). +---PAGE_BREAK--- + +[14] F. Le Kien, D. Kornovan, S. S. S. Hejazi, V. G. Truong, M. Petrov, S. N. Chormaic, and T. Busch, New Journal of Physics **20**, 093031 (2018). + +[15] B. Raftari, N. Budko, and K. Vuik, AIP Advances **8**, 015307 (2018). + +[16] R. Yalla, F. Le Kien, M. Morinaga, and K. Hakuta, Phys. Rev. Lett. **109**, 063602 (2012). + +[17] M. Fujiwara, K. Toubaru, T. Noda, H.-Q. Zhao, and S. Takeuchi, Nano letters **11**, 4362 (2011). + +[18] R. Yalla, M. Sadgrove, K. P. Nayak, and K. Hakuta, Physical review letters **113**, 143601 (2014). + +[19] E. Le Moal, S. Marguet, B. Rogez, S. Mukherjee, P. Dos Santos, E. Boer-Duchemin, G. Comtet, and G. Dujardin, Nano letters **13**, 4198 (2013). + +[20] L. Tizei and M. Kociak, Physical Review Letters **110**, 153604 (2013). + +[21] S. Meuret, L. Tizei, T. Cazimajou, R. Bourrellier, H. Chang, F. Treussart, and M. Kociak, Physical review letters **114**, 197401 (2015). + +[22] K. Okamoto, Fundamentals of optical waveguides (Academic press, 2006). + +[23] F. Le Kien, J. Liang, K. Hakuta, and V. Balykin, Optics Communications **242**, 445 (2004). + +[24] F. Le Kien, T. Busch, V. G. Truong, and S. N. Chormaic, Physical Review A **96**, 023835 (2017). + +[25] H. Jaksch and J. Martin, Fresenius' journal of analytical chemistry **353**, 378 (1995). + +[26] M. Irita, S. Yamazaki, H. Nakahara, and Y. Saito, in *IOP Conference Series: Materials Science and Engineering*, Vol. 304 (IOP Publishing, 2018) p. 012006. + +[27] M. Irita, H. Nakahara, and Y. Saito, e-Journal of Surface Science and Nanotechnology **16**, 84 (2018). + +[28] J. M. Ward, D. G. O'Shea, B. J. Shortt, M. J. Morrissey, K. Deasy, and S. G. Nic Chormaic, Review of Scientific Instruments **77**, 083105 (2006), https://doi.org/10.1063/1.2239033. + +[29] E. R. Abraham and E. A. Cornell, Appl. Opt. **37**, 1762 (1998). \ No newline at end of file diff --git a/samples_new/texts_merged/598288.md b/samples_new/texts_merged/598288.md new file mode 100644 index 0000000000000000000000000000000000000000..b39e101b4e7c2976898f38f4cb787a3f6abeef04 --- /dev/null +++ b/samples_new/texts_merged/598288.md @@ -0,0 +1,14996 @@ + +---PAGE_BREAK--- + +Harmonic +Oscillators and +Two-by-two Matrices +in Symmetry +Problems in Physics + +Edited by +Young Suh Kim + +Printed Edition of the Special Issue Published in Symmetry +---PAGE_BREAK--- + +# Harmonic Oscillators and Two-By-Two Matrices in Symmetry Problems in Physics + +Special Issue Editor +Young Suh Kim +---PAGE_BREAK--- + +Young Suh Kim +University of Maryland +USA + +*Editorial Office* +MDPI AG +St. Alban-Anlage 66 +Basel, Switzerland + +This edition is a reprint of the Special Issue published online in the open access journal *Symmetry* (ISSN 2073-8994) from 2014–2017 (available at: http://www.mdpi.com/journal/symmetry/special_issues/physics-matrices). + +For citation purposes, cite each article independently as indicated on the article page online and as indicated below: + +Author 1; Author 2. Article title. *Journal Name Year, Article number*, page range. + +First Edition 2017 + +ISBN 978-3-03842-500-7 (Pbk) +ISBN 978-3-03842-501-4 (PDF) + +Articles in this volume are Open Access and distributed under the Creative Commons Attribution license (CC BY), which allows users to download, copy and build upon published articles even for commercial purposes, as long as the author and publisher are properly credited, which ensures maximum dissemination and a wider impact of our publications. The book taken as a whole is © 2017 MDPI, Basel, Switzerland, distributed under the terms and conditions of the Creative Commons license CC BY-NC-ND (http://creativecommons.org/licenses/by-nc-nd/4.0/). +---PAGE_BREAK--- + +# Table of Contents + +
About the Special Issue EditorV
Preface to "Harmonic Oscillators and Two-by-two Matrices in Symmetry Problems in Physics"VII
+ +## Chapter 1 + +**Orlando Panella and Pinaki Roy** +Pseudo Hermitian Interactions in the Dirac Equation +Reprinted from: *Symmetry* **2014**, *6*(1), 103–110, doi: 10.3390/sym6010103 +3 + +**Ettore Minguzzi** +Spacetime Metrics from Gauge Potentials +Reprinted from: *Symmetry* **2014**, *6*(2), 164–170, doi: 10.3390/sym6020164 +9 + +**Andrea Quadri** +Quantum Local Symmetry of the D-Dimensional Non-Linear Sigma Model: A Functional Approach +Reprinted from: *Symmetry* **2014**, *6*(2), 234–255; doi: 10.3390/sym6020234 +15 + +**Lock Yue Chew and Ning Ning Chung** +Dynamical Relation between Quantum Squeezing and Entanglement in Coupled Harmonic Oscillator System +Reprinted from: *Symmetry* **2014**, *6*(2), 295–307; doi: 10.3390/sym6020295 +34 + +**F. De Zela** +Closed-Form Expressions for the Matrix Exponential +Reprinted from: *Symmetry* **2014**, *6*(2), 329–344; doi: 10.3390/sym6020329 +45 + +**Luis L. Sánchez-Soto and Juan J. Monzón** +Invisibility and PT Symmetry: A Simple Geometrical Viewpoint +Reprinted from: *Symmetry* **2014**, *6*(2), 396–408; doi: 10.3390/sym6020396 +59 + +**Sibel Başkal, Young S. Kim and Marilyn E. Noz** +Wigner's Space-Time Symmetries Based on the Two-by-Two Matrices of the Damped Harmonic Oscillators and the Poincaré Sphere +Reprinted from: *Symmetry* **2014**, *6*(3), 473–515; doi: 10.3390/sym6030473 +70 + +## Chapter 2 + +**Heung-Ryoul Noh** +Analytical Solutions of Temporal Evolution of Populations in Optically-Pumped Atoms with Circularly Polarized Light +Reprinted from: *Symmetry* **2016**, *8*(3), 17; doi: 10.3390/sym8030017 +111 + +**M. Howard Lee** +Local Dynamics in an Infinite Harmonic Chain +Reprinted from: *Symmetry* **2016**, *8*(4), 22; doi: 10.3390/sym8040022 +123 +---PAGE_BREAK--- + +**Christian Baumgarten** +Old Game, New Rules: Rethinking the Form of Physics +Reprinted from: *Symmetry* **2016**, *8*(5), 30; doi: 10.3390/sym8050030..................................................135 + +**Anaelle Hertz, Sanjib Dey, Véronique Hussin and Hichem Eleuch** +Higher Order Nonclassicality from Nonlinear Coherent States for Models with +Quadratic Spectrum +Reprinted from: *Symmetry* **2016**, *8*(5), 36; doi: 10.3390/sym8050036..................................................170 + +**Gabriel Amador, Kiara Colon, Nathalie Luna, Gerardo Mercado, Enrique Pereira and Erwin Suazo** +On Solutions for Linear and Nonlinear Schrödinger Equations with Variable Coefficients: A +Computational Approach +Reprinted from: *Symmetry* **2016**, *8*(6), 38; doi: 10.3390/sym8060038..................................................179 + +**Alexander Rauh** +Coherent States of Harmonic and Reversed Harmonic Oscillator +Reprinted from: *Symmetry* 2016, *8*(6), 46; doi:10.3390/sym8060046..................................................195 + +**Sibel Başkal, Young S. Kim and Marilyn E. Noz** +Entangled Harmonic Oscillators and Space-Time Entanglement +Reprinted from: *Symmetry* **2016**, *8*(7), 55; doi: 10.3390/sym8070055..................................................207 + +**Halina Grushevskaya and George Krylov** +Massless Majorana-Like Charged Carriers in Two-Dimensional Semimetals +Reprinted from: *Symmetry* **2016**, *8*(7), 60; doi: 10.3390/sym8070060..................................................233 + +Chapter 3 + +**Young S. Kim and Marilyn E. Noz** +Lorentz Harmonics, Squeeze Harmonics and Their Physical Applications +Reprinted from: *Symmetry* **2011**, *3*, 16–36; doi: 10.3390/sym3010016 ..................................................247 + +**Young S. Kim and Marilyn E. Noz** +Dirac Matrices and Feynman's Rest of the Universe +Reprinted from: *Symmetry* **2012**, *4*, 626–643; doi: 10.3390/sym4040626..................................................266 + +**Young S. Kim and Marilyn E. Noz** +Symmetries Shared by the Poincar?Group and the Poincar?Sphere +Reprinted from: *Symmetry* **2013**, *5*, 233–252; doi: 10.3390/sym5030233..................................................282 + +**Sibel Baskal, Young S. Kim and Marilyn E. Noz** +Wigner's Space-Time Symmetries Based on the Two-by-Two Matrices of the Damped Harmonic +Oscillators and the Poincar?Sphere +Reprinted from: *Symmetry* **2014**, *6*, 473–515; doi: 10.3390/sym6030473..................................................299 + +**Sibel Baskal, Young S. Kim and Marilyn E. Noz** +Loop Representation of Wigner's Little Groups +Reprinted from: *Symmetry* **2017**, *9*(7), 97; doi: 10.3390/sym9070097..................................................338 +---PAGE_BREAK--- + +About the Special Issue Editor + +**Young Suh Kim** Dr. Kim came to the United States from South Korea in 1954 after high school graduation, to become a freshman at the Carnegie Institute of Technology (now called Carnegie Mellon University) in Pittsburgh. In 1958, he went to Princeton University to pursue graduate studies in Physics and received his PhD degree in 1961. In 1962, he became an assistant professor of Physics at the University of Maryland at College Park near Washington, DC. In 2007, Dr. Kim became a professor emeritus at the same university and thus became a full-time physicist. Dr. Kim's thesis advisor at Princeton was Sam Treiman, but he had to go to Eugene Wigner when faced with fundamental problems in physics. During this process, he became interested in Wigner's 1939 paper on internal space-time symmetries of physics. Since 1978, his publications have been based primarily on constructing mathematical formulas for understanding this paper. In 1988, Dr. Kim noted that the same set of mathematical devices is applicable to squeezed states in quantum optics. Since then, he has also been publishing papers on optical and information sciences. +---PAGE_BREAK--- + + +---PAGE_BREAK--- + +Preface to "Harmonic Oscillators and Two-by-two Matrices in Symmetry Problems in Physics" + +This book consists of articles published in the two Special Issues entitled "Physics Based on Two-By-Two Matrices" and "Harmonic Oscillators in Modern Physics", in addition to the articles published by the issue editor that are not in those Special Issues. + +With a degree of exaggeration, modern physics is the physics of harmonic oscillators and two-by-two matrices. Indeed, they constitute the basic language for the symmetry problems in physics, and thus the main theme of this journal. There is nothing special about the articles published in these Special Issues. In one way or another, most of the articles published in this *Symmetry* journal are based on these two mathematical instruments. + +What is special is that the authors of these two Special Issues were able to recognize this aspect of the symmetry problems in physics. They are not the first to do this. In 1963, Eugene Wigner was awarded the Nobel prize for introducing group theoretical methods to physical problems. Wigner's basic scientific language consisted of two-by-two matrices. + +Paul A. M. Dirac's four-by-four matrices are two-by-two matrices of two-by-two matrices. In addition, Dirac had another scientific language. He was quite fond of harmonic oscillators. He used the oscillator formalism for the Fock space which is essential to second quantification and quantum field theory. The role of Gaussian functions in coherent and squeezed states in quantum optics is well known. In addition, the oscillator wave functions are used as approximations for many complicated wave functions in physics. + +Needless to say, spacial relativity and quantum mechanics are two of the greatest achievements in physics of the past century. Dirac devoted lifelong efforts to making quantum mechanics compatible with Einstein's spacial relativity. He was interested in oscillator wave functions that can be Lorentz-boosted. + +This journal will be publishing many interesting papers based on two-by-two matrices and harmonic oscillators. The authors will be very happy to acknowledge that they are following the examples of Dirac and Wigner. We all respect them. + +Young Suh Kim +*Special Issue Editor* +---PAGE_BREAK--- + + +---PAGE_BREAK--- + +# Chapter 1: +Two-ByTwo Matrices +---PAGE_BREAK--- + + +---PAGE_BREAK--- + +Article + +Pseudo Hermitian Interactions in the Dirac Equation + +Orlando Panella ¹,* and Pinaki Roy ² + +¹ INFN—Istituto Nazionale di Fisica Nucleare, Sezione di Perugia, Via A. Pascoli, Perugia 06123, Italy + +² Physics and Applied Mathematics Unit, Indian Statistical Institute, 203 Barrackpur Trunck Road Kolkata 700108, India; E-Mail: pinaki@isical.ac.in + +* E-Mail: orlando.panella@pg.infn.it; Tel.: +39-075-585-2762; Fax: +39-075-584-7296. + +Received: 31 July 2013; in revised form: 18 December 2013 / Accepted: 23 December 2013 / +Published: 17 March 2014 + +**Abstract:** We consider a (2 + 1)-dimensional massless Dirac equation in the presence of complex vector potentials. It is shown that such vector potentials (leading to complex magnetic fields) can produce bound states, and the Dirac Hamiltonians are η-pseudo Hermitian. Some examples have been explicitly worked out. + +**Keywords:** pseudo Hermitian Hamiltonians; two-dimensional Dirac Equation; complex magnetic fields + +# 1. Introduction + +In recent years, the massless Dirac equation in (2 + 1) dimensions has drawn a lot of attention, primarily because of its similarity to the equation governing the motion of charge carriers in graphene [1,2]. In view of the fact that electrostatic fields alone cannot provide confinement of the electrons, there have been quite a number of works on exact solutions of the relevant Dirac equation with different magnetic field configurations, for example, square well magnetic barriers [3–5], non-zero magnetic fields in dots [6], decaying magnetic fields [7], solvable magnetic field configurations [8], etc. On the other hand, at the same time, there have been some investigations into the possible role of non-Hermiticity and *PT* symmetry [9] in graphene [10–12], optical analogues of relativistic quantum mechanics [13] and relativistic non-Hermitian quantum mechanics [14], photonic honeycomb lattice [15], etc. Furthermore, the (2 + 1)-dimensional Dirac equation with non-Hermitian Rashba and scalar interaction was studied [16]. Here, our objective is to widen the scope of incorporating non-Hermitian interactions in the (2 + 1)-dimensional Dirac equation. We shall introduce η pseudo Hermitian interactions by using imaginary vector potentials. It may be noted that imaginary vector potentials have been studied previously in connection with the localization/delocalization problem [17,18], as well as *PT* phase transition in higher dimensions [19]. Furthermore, in the case of the Dirac equation, there are the possibilities of transforming real electric fields to complex magnetic fields and vice versa by the application of a complex Lorentz boost [20]. To be more specific, we shall consider η-pseudo Hermitian interactions [21] within the framework of the (2 + 1)-dimensional massless Dirac equation. In particular, we shall examine the exact bound state solutions in the presence of imaginary magnetic fields arising out of imaginary vector potentials. We shall also obtain the η operator, and it will be shown that the Dirac Hamiltonians are η-pseudo Hermitian. + +# 2. The Model + +The (2 + 1)-dimensional massless Dirac equation is given by: + +$$ H\psi = E\psi, \quad H = c\sigma \cdot P = c \begin{pmatrix} 0 & P_- \\ P_+ & 0 \end{pmatrix}, \quad \psi = \begin{pmatrix} \psi_1 \\ \psi_2 \end{pmatrix} \tag{1} $$ +---PAGE_BREAK--- + +where $c$ is the velocity of light and: + +$$P_{\pm} = (P_x \pm iP_y) = (p_x + A_x) \pm i(p_y + A_y) \quad (2)$$ + +In order to solve Equation (1), it is necessary to decouple the spinor components. Applying the operator, $\mathcal{H}$, from the left in Equation (1), we find: + +$$c^2 \begin{pmatrix} P_- P_+ & 0 \\ 0 & P_+ P_- \end{pmatrix} \psi = E^2 \psi \quad (3)$$ + +Let us now consider the vector potential to be: + +$$A_x = 0, \quad A_y = f(x) \quad (4)$$ + +so that the magnetic field is given by: + +$$B_z(x) = f'(x) \quad (5)$$ + +For the above choice of vector potentials, the component wave functions can be taken of the form: + +$$\psi_{1,2}(x,y) = e^{ik_y y} \phi_{1,2}(x) \quad (6)$$ + +Then, from (3), the equations for the components are found to be (in units of $\hbar = 1$): + +$$ \begin{aligned} \left[-\frac{d^2}{dx^2} + W^2(x) + W'(x)\right] \phi_1(x) &= \epsilon^2 \phi_1(x) \\ \left[-\frac{d^2}{dx^2} + W^2(x) - W'(x)\right] \phi_2(x) &= \epsilon^2 \phi_2(x) \end{aligned} \quad (7) $$ + +where $\epsilon = (E/c)$, and the function, $W(x)$, is given by: + +$$W(x) = k_y + f(x) \quad (8)$$ + +## 2.1. Complex Decaying Magnetic Field + +It is now necessary to choose the function, $f(x)$. Our first choice for this function is: + +$$f(x) = -(A + iB)e^{-x}, \quad -\infty < x < \infty \quad (9)$$ + +where $A > 0$ and $B$ are constants. This leads to a complex exponentially decaying magnetic field: + +$$B_z(x) = (A + iB)e^{-x} \quad (10)$$ + +For $B = 0$ or a purely imaginary number (such that $(A + iB) > 0$), the magnetic field is an exponentially decreasing one, and we recover the case considered in [7,8]. + +Now, from the second of Equation (7), we obtain: + +$$\left[-\frac{d^2}{dx^2} + V_2(x)\right] \phi_2 = (\epsilon^2 - k_y^2) \phi_2 \quad (11)$$ + +where: + +$$V_2(x) = k_y^2 + (A + iB)^2 e^{-2x} - (2k_y + 1)(A + iB) e^{-x} \quad (12)$$ +---PAGE_BREAK--- + +It is not difficult to recognize $V_2(x)$ in Equation (12) as the complex analogue of the Morse potential whose solutions are well known [22,23]. Using these results, we find: + +$$ +\begin{align} +E_{2,n} &= \pm c \sqrt{k_y^2 - (k_y - n)^2} \\ +\phi_{2,n} &= t^{k_y-n} e^{-t/2} L_n^{(2k_y-2n)}(t), \quad n = 0, 1, 2, \dots < [k_y] +\end{align} +\tag{13} +$$ + +where $t = 2(A + iB)e^{-x}$ and $L_n^{(a)}(t)$ denote generalized Laguerre polynomials. The first point to note here is that for the energy levels to be real, it follows from Equation (13) that the corresponding eigenfunctions are normalizable when the condition $k_y \ge 0$ holds. For $k_y < 0$, the wave functions are not normalizable, i.e., no bound states are possible. + +Let us now examine the upper component, $\phi_1$. Since $\phi_2$ is known, one can always use the +intertwining relation: + +$$ +cP_{-}\psi_{2} = E\psi_{1} \qquad (14) +$$ + +to obtain $\phi_1$. Nevertheless, for the sake of completeness, we present the explicit results for $\phi_1$. In this +case, the potential analogous to Equation (12) reads: + +$$ +V_1(x) = k_y^2 + (A + iB)^2 e^{-2x} - (2k_y - 1)(A + iB) e^{-x} \quad (15) +$$ + +Clearly, $V_1(x)$ can be obtained from $V_2(x)$ by the replacement $k_y \rightarrow k_y - 1$, and so, the solutions can be +obtained from Equation (13) as: + +$$ +\begin{gather*} +E_{1,n} = \pm c \sqrt{k_y^2 - (k_y - n - 1)^2} \\ +\phi_{1,n} = t^{k_y-n-1} e^{-t/2} L_n^{(2k_y-2n-2)}(t), \quad n=1,2,\dots, [k_y-1] +\end{gather*} +\tag{16} +$$ + +Note that the *n* = 0 state is missing from the spectrum Equation (16), so that it is a singlet state. +Furthermore, *E*2,*n*+1 = *E*1,*n*, so that the ground state is a singlet, while the excited ones are doubly +degenerate. Similarly, the negative energy states are also paired. In this connection, we would like to +note that {*H*, σ3} = 0, and consequently, except for the ground state, there is particle hole symmetry. +The wave functions for the holes are given by σ3ψ. The precise structure of the wave functions of the +original Dirac equation are as follows (we present only the positive energy solutions): + +$$ +\begin{equation} +\begin{aligned} +E_0 &= 0, & \psi_0 &= \begin{pmatrix} 0 \\ \phi_{2,0} \end{pmatrix} \\ +E_{n+1} &= c \sqrt{k_y^2 - (k_y - n - 1)^2}, & \psi_{n+1} &= \begin{pmatrix} \phi_{1,n} \\ \phi_{2,n+1} \end{pmatrix}, +\end{aligned} +\tag{17} +\end{equation} +$$ + +It is interesting to note that the spectrum does not depend on the magnetic field. Furthermore, the dispersion relation is no longer linear, as it should be in the presence of interactions. It is also easily checked that when the magnetic field is reversed, i.e., $A \to -A$ and $B \to -B$ with the simultaneous change of $k_y \to -k_y$, the two potentials $V_{1,2}(x) = W(x) \pm W'(x)$ go one into each other, $V_1(x) \leftrightarrow V_2(x)$. Therefore, the solutions are correspondingly interchanged, $\phi_{1,n} \leftrightarrow \phi_{2,n}$ and $E_{1,n} \leftrightarrow E_{2,n}$, but retain the same functional form as in Equations (13) and (16). + +Therefore, we find that it is indeed possible to create bound states with an imaginary vector potential. We shall now demonstrate the above results for a second example. +---PAGE_BREAK--- + +## 2.2. Complex Hyperbolic Magnetic Field + +Here, we choose $f(x)$, which leads to an effective potential of the complex hyperbolic Rosen-Morse type: + +$$f(x) = A \tanh(x - i\alpha), \quad -\infty < x < \infty, \quad A \text{ and } \alpha \text{ are real constants} \tag{18}$$ + +In this case, the complex magnetic field is given by: + +$$B_z(x) = A \sech^2(x - i\alpha) \tag{19}$$ + +Note that for $\alpha = 0$, we get back the results of [8,24]. Using Equation (18) in the second half of Equation (7), we find: + +$$[-\frac{d^2}{dx^2} + U_2(x)] \phi_2 = (\epsilon^2 - k_y^2 - A^2)\phi_2 \tag{20}$$ + +where + +$$U_2(x) = k_y^2 - A(A+1) \operatorname{sech}^2(x - i\alpha) + 2Ak_y \tanh(x - i\alpha) \tag{21}$$ + +This is the Hyperbolic Rosen-Morse potential with known energy values and eigenfunctions. In the present case, the eigenvalues and the corresponding eigenfunctions are given by [23,25]: + +$$E_{2,n} = \pm c \sqrt{A^2 + k_y^2 - (A-n)^2 - \frac{A^2 k_y^2}{(A-n)^2}}, \quad n = 0, 1, 2, \dots < [A - \sqrt{Ak_y}] \tag{22}$$ + +$$\phi_{2,n} = (1-t)^{s_1/2} (1+t)^{s_2/2} P_n^{(s_1,s_2)}(t)$$ + +where $P_n^{(a,b)}(z)$ denotes Jacobi polynomials and: + +$$t = \tanh x, \quad s_{1,2} = A - n \pm \frac{Ak_y}{A-n} \tag{23}$$ + +The energy values corresponding to the upper component of the spinor can be found out by replacing $A$ by $(A-1)$, and $\phi_1$ can be found out using relation Equation (14). + +# 3. η-Pseudo Hermiticity + +Let us recall that a Hamiltonian is η-pseudo Hermitian if [21]: + +$$\eta H \eta^{-1} = H^{\dagger} \tag{24}$$ + +where $\eta$ is a Hermitian operator. It is known that eigenvalues of a $\eta$-pseudo Hermitian Hamiltonian are either all real or are complex conjugate pairs [21]. In view of the fact that in the present examples, the eigenvalues are all real, one is tempted to conclude that the interactions are $\eta$ pseudo Hermitian. To this end, we first consider case 1, and following [26], let us consider the Hermitian operator: + +$$\eta = e^{-\theta p_x}, \quad \theta = \arctan \frac{B}{A} \tag{25}$$ + +Then, it follows that: + +$$\eta c \eta^{-1} = c, \quad \eta p_x \eta^{-1} = p_x, \quad \eta V(x) \eta^{-1} = V(x + i\theta) \tag{26}$$ +---PAGE_BREAK--- + +We recall that in both the cases considered here, the Hamiltonian is of the form: + +$$H = c\sigma \cdot P = c \begin{pmatrix} 0 & P_{-} \\ P_{+} & 0 \end{pmatrix} \qquad (27)$$ + +where, for the first example: + +$$P_{\pm} = p_x \pm ip_y \pm i(A + iB)e^{-x} \qquad (28)$$ + +Then: + +$$H^{\dagger} = c \begin{pmatrix} 0 & P_{+}^{\dagger} \\ P_{-}^{\dagger} & 0 \end{pmatrix} \qquad (29)$$ + +Now, from Equation (28), it follows that: + +$$P_{+}^{\dagger} = p_{x} - ip_{y} - i(A - iB)e^{-x}, \quad P_{-}^{\dagger} = p_{x} + ip_{y} + i(A - iB)e^{-x} \qquad (30)$$ + +and using Equation (26), it can be shown that: + +$$\eta P_{+}\eta^{-1} = p_{x} + ip_{y} + i(A - iB)e^{-x} = P_{-}^{\dagger}, \quad \eta P_{-}\eta^{-1} = p_{x} - ip_{y} - i(A - iB)e^{-x} = P_{+}^{\dagger} \qquad (31)$$ + +Next, to demonstrate the pseudo Hermiticity of the Dirac Hamiltonian Equation (27), let us consider +the operator $\eta' = \eta \cdot I_2$, where $I_2$ is the $(2 \times 2)$ unit matrix. Then, it can be shown that: + +$$\eta' H \eta'^{-1} = H^{\dagger} \qquad (32)$$ + +Thus, the Dirac Hamiltonian with a complex decaying magnetic field Equation (10) is $\eta$-pseudo Hermitian. + +For the magnetic field given by Equation (19), the operator, $\eta$, can be found by using relations Equation (26). After a straightforward calculation, it can be shown that the $\eta$ operator is given by: + +$$\eta = e^{-2\alpha p_x} \qquad (33)$$ + +so that, in this second example, also, the Dirac Hamiltonian is $\eta$-pseudo Hermitian. + +**4. Conclusions** + +Here, we have studied the (2 + 1)-dimensional massless Dirac equation (we note that if a massive particle of mass *m* is considered, the energy spectrum in the first example would become *E**n* = *c*√(*k**y*2 + *m*2*c*2 − (*k**y* − *n*)2). Similar changes will occur in the second example, too), in the presence of complex magnetic fields, and it has been shown that such magnetic fields can create bound states. It has also been shown that Dirac Hamiltonians in the presence of such magnetic fields are η-pseudo Hermitian. We feel it would be of interest to study the generation of bound states using other types of magnetic fields, e.g., periodic magnetic fields. + +**Acknowledgments:** One of us (P. R.) wishes to thank INFN Sezione di Perugia for supporting a visit during which part of this work was carried out. He would also like to thank the Physics Department of the University of Perugia for its hospitality. + +**Conflicts of Interest:** The authors declare no conflict of interest. + +**References** + +1. Novoselov, K.S.; Geim, A.K.; Morozov, S.V.; Jiang, D.; Zhang, Y.; Dubonos, S.V.; Grigorieva, I.V.; Firsov, A.A. Electric field effect in atomically thin carbon films. *Science* **2004**, *306*, 666–669. +2. Novoselov, K.S.; Geim, A.K.; Morozov, S.V.; Jiang, D.; Katsnelson, M.I.; Grigorieva, I.V.; Dubonos, S.V.; Firsov, A.A. Two-dimensional gas of massless Dirac fermions in graphene. *Nature* **2005**, *438*, 197–200. +---PAGE_BREAK--- + +3. De Martino, A.; Dell'Anna, L.; Egger, R. Magnetic confinement of massless dirac fermions in graphene. *Phys. Rev. Lett.* **2007**, 98, 066802:1–066802:4. + +4. De Martino, A.; Dell'Anna L.; Eggert, R. Magnetic barriers and confinement of Dirac-Weyl quasiparticles in graphene. *Solid State Commun.* **2007**, 144, 547–550. + +5. Dell'Anna, L.; de Martino, A. Multiple magnetic barriers in graphene. *Phys. Rev. B* **2009**, 79, 045420:1–045420:9. + +6. Wang, D.; Jin, G. Bound states of Dirac electrons in a graphene-based magnetic quantum dot. *Phys. Lett. A* **2009**, 373, 4082–4085. + +7. Ghosh, T.K. Exact solutions for a Dirac electron in an exponentially decaying magnetic field. *J. Phys. Condens. Matter* **2009**, 21, doi:10.1088/0953-8984/21/4/045505. + +8. Kuru, S; Negro, J.M.; Nieto, L.M. Exact analytic solutions for a Dirac electron moving in graphene under magnetic fields. *J. Phys. Condens. Matter* **2009**, 21, doi:10.1088/0953-8984/21/45/455305. + +9. Bender, C.M.; Boettcher, S. Real spectra in non-hermitian hamiltonians having PT symmetry. *Phys. Rev. Lett.* **1988**, 80, 5243–5246. + +10. Fagotti, M; Bonati, C.; Logoteta, D.; Marconcini, P.; Macucci, M. Armchair graphene nanoribbons: PT-symmetry breaking and exceptional points without dissipation. *Phys. Rev. B* **2011**, 83, 241406:1–241406:4. + +11. Szameit, A.; Rechtsman, M.C.; Bahat-Treidel, O.; Segev, M. PT-Symmetry in heoneycomb photonic lattices. *Phys. Rev. A* **2011**, 84, 021806(R):1–021806(R):5. + +12. Esaki, K.; Sato, M.; Hasebe, K.; Kohmoto, M. Edge states and topological phases in non-Hermitian systems. *Phys. Rev. B* **2011**, 84, 205128:1–205128:19. + +13. Longhi, S. Classical simulation of relativistic quantum mechanics in periodic optical structures. *Appl. Phys. B* **2011**, 104, 453–468. + +14. Longhi, S. Optical realization of relativistic non-hermitian quantum mechanics. *Phys. Rev. Lett.* **2010**, 105, 013903:1–013903:4. + +15. Ramezani, H.; Kottos, T.; Kovanis, V.; Christodoulides, D.N. Exceptional-point dynamics in photoni honeycomb lattices with PT-symmetry. *Phys. Rev. A* **2012**, 85, 013818:1–013818:6. + +16. Mandal, B.P.; Gupta, S. Pseudo-hermitian interactions in Dirac theory: Examples. *Mod. Phys. Lett. A* **2010**, 25, 1723–1732. + +17. Hatano, N.; Nelson, D. Localization transitions in non-hermitian quantum mechanics. *Phys. Rev. Lett.* **1996**, 77, 570–573. + +18. Feinberg, J.; Zee, A. Non-Hermitian localization and delocalization. *Phys. Rev. E* **1999**, 59, 6433–6443. + +19. Mandal, B.P.; Mourya, B.K.; Yadav, R.K. PT phase transition in higher-dimensional quantum systems. *Phys. Lett. A* **2013**, 377, 1043–1046. + +20. Tan, L.Z.; Park, C.-H.; Louie, S.G. Graphene Dirac fermions in one dimensional field profiles; Transforming magnetic to electric field. *Phys. Rev. B* **2010**, 81, 195426:1–195426:8. + +21. Mostafazadeh, A. Pseudo-hermiticity versus PT-symmetry III: Equivalence of pseudo-Hermiticity and the presence of antilinear symmetries. *J. Math. Phys.* **2002**, 43, 3944–3951. + +22. Flügge, S. *Practical Quantum Mechanics*; Springer-Verlag: Berlin, Germany, 1974. + +23. Cooper, F.; Khare, A; Sukhatme, U. *Supersymmetry in Quantum Mechanics*; World Scientific Publishing Co. +Pte. Ltd.: Singapore, 2001. + +24. Milpas, E.; Torres, M.; Murguía, G. Magnetic field barriers in graphene: An analytically solvable model. +*J. Phys. Condens. Matter* **2011**, 23, 245304:1–245304:7. + +25. Rosen, N.; Morse, P.M. On the vibrations of polyatomic molecules. *Phys. Rev.* **1932**, 42, 210–217. + +26. Ahmed, Z. Pseudo-hermiticity of hamiltonians under imaginary shift of the coordinate: Real spectrum of complex potentials. *Phys. Lett. A* **2001**, 290, 19–22. + +© 2014 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +# Spacetime Metrics from Gauge Potentials + +Ettore Minguzzi + +Dipartimento di Matematica e Informatica "U. Dini", Università degli Studi di Firenze, Via S. Marta 3, I-50139 +Firenze, Italy; E-Mail: ettore.minguzzi@unifi.it; Tel./Fax: +39-055-4796-253 + +Received: 27 January 2014; in revised form: 21 March 2014 / Accepted: 24 March 2014 / +Published: 27 March 2014 + +**Abstract:** I present an approach to gravity in which the spacetime metric is constructed from a non-Abelian gauge potential with values in the Lie algebra of the group $U(2)$ (or the Lie algebra of quaternions). If the curvature of this potential vanishes, the metric reduces to a canonical curved background form reminiscent of the Friedmann $S^3$ cosmological metric. + +**Keywords:** gauge theory; G-structure; teleparallel theory + +## 1. Introduction + +The observational evidence in favor of Einstein's general theory of relativity has clarified that the spacetime manifold is not flat, and hence that it can be approximated by the flat Minkowski spacetime only over limited regions. Quantum Field Theory, and in particular the perturbative approach through the Feynman's integral, has shown the importance of expanding near a "classical" background configuration. Although we do not have at our disposal a quantum theory of gravity, it would be natural to take a background configuration which approximates as much as possible the homogeneous curved background that is expected to take place over cosmological scales accordingly to the cosmological principle. Therefore, it is somewhat surprising that most classical approaches to quantum gravity start from a perturbation of Minkowski's metric in the form $g_{\mu\nu} = \eta_{\mu\nu} + h_{\mu\nu}$. This approach is ill defined in general unless the manifold is asymptotically flat. Indeed, the expansion depends on the chosen coordinate system, a fact which is at odds with the principle of general covariance. + +Expanding over the flat metric is like Taylor expanding a function by taking the first linear approximation near a point. It is clear that the approximation cannot be good far from the point and that no firm global conclusion can be drawn from similar approaches. A good global expansion should be performed in a different way, taking into account the domain of definition of the function. So, a function defined over an interval would be better approximated with a Fourier series than with a Taylor expansion. Despite of these simple analogies, much research has been devoted to quantum gravity by means of expansions of the form $g = \eta + h$, possibly because of the lack of alternatives. + +Actually, some years ago [1] I proposed a gauge approach to gravity that solves this problem in a quite simple way and which, I believe, deserves to be better known. + +To start with let us observe that general relativity seems to privilege in its very formalism the flat background. Indeed, the Riemann curvature $\mathcal{R}$ measures the extent by which the spacetime is far from flat, namely far from the background + +$$ \mathcal{R} = 0 \Leftrightarrow (M,g) \text{ is flat.} $$ + +If the true background is not the flat Minkowski space then as a first step one would have to construct a different curvature $F$ with the property that + +$$ F = 0 \Leftrightarrow (M,g) \text{ takes the canonical background shape.} $$ + +It is indeed possible to accomplish this result. Let us first introduce some notations. +---PAGE_BREAK--- + +## 2. Some Notations from Gauge Theory + +Gauge theories were axiomatized in the fifties by Ehresmann [2] as connections over principal bundles. Since I need to fix the notation, here I shortly review that setting. A principal bundle is given by a differentiable manifold (the bundle) $P$, a differentiable manifold (the base) $M$, a projection + +$$ \pi: P \to M \qquad (1) $$ + +a Lie group $G$, and a right action of $G$ on $P$ + +$$ p \to pg \quad p \in P, \ g \in G \qquad (2) $$ + +such that $M = P/G$, i.e., $M$ is the orbit space. Moreover, the fiber bundle $P$ is locally the product $P = M \times G$. To be more precise, given a point $m \in M$ there is an open set $U$ of $m$, such that $\pi^{-1}(U)$ is diffeomorphic to $U \times G$ and the diffeomorphism preserves the right action. If this property holds also globally the principal bundle is called trivial. The set $\pi^{-1}(m)$ is the fiber of $m$ and it is diffeomorphic to $G$. Let $\mathcal{G}$ be the Lie algebra of $G$, and let $\tau_a$ be a base of generators + +$$ [\tau_a, \tau_b] = f_{ab}^c \tau_c \qquad (3) $$ + +Let $p \in P$ be a point of the principal bundle; it can be considered as an application $p: G \to P$ which acts as $g \to pg$. The fundamental fields (We follow mostly the conventions of Kobayashi-Nomizu. The upper star * indicates the pull-back when applied to a function, the fundamental field when applied to a generator, and the horizontal lift when applied to a curve or a tangent vector on the base.) $\tau_a^*$ over $P$ are defined in $p$ as the push-forward of the group generators: $\tau_a^* = p_*\tau_a$. They are vertical fields in the sense that they are in the ker of $\pi: \pi_*\tau_a^* = 0$. They form a base of the vertical tangent space at $p$. + +A connection over $P$ is a 1-form $\omega: P \to \mathcal{G}$ with the following properties: + +(a) $\omega(X^*) = X \quad X \in \mathcal{G}$ + +(b) $R_g^*\omega = g^{-1}\omega g$ + +The tangent space at $p$ is split into the sum of two subspaces: the vertical space, that is the ker of $\pi$, and the horizontal space, that is the ker of $\omega$ + +$$ T_p P = H_p \oplus V_p \qquad (4) $$ + +Let $U$ be an open set of $M$. A section $\sigma$ is a function $\sigma: U \to \pi^{-1}(U)$ such that $\pi \circ \sigma = I_U$. The gauge potential depends on the section and is defined by + +$$ A = \tau_a A_\mu^a dx^\mu = \sigma^* \omega \qquad (5) $$ + +where $\{x^\mu\}$ are coordinates on the base. A change of section is sometimes called gauge transformation. The curvature is defined by (The exterior product is defined through $\alpha \wedge \beta = \alpha \otimes \beta - \beta \otimes \alpha$ where $\alpha$ and $\beta$ are 1-forms. As a consequence $\omega \wedge \omega = [\omega, \omega]$) + +$$ \Omega = d\omega h = d\omega + \omega \wedge \omega \qquad (6) $$ + +where $h$ projects the vector arguments to the horizontal space [2]. The field strength is defined by $F = \tau_a F_{\mu\nu}^a dx^\mu dx^\nu = \sigma^*\Omega$. In other words + +$$ F_{\mu\nu}^a = \partial_\mu A_\nu^a - \partial_\nu A_\mu^a + f_{bc}^a A_\mu^b A_\nu^c \qquad (7) $$ +---PAGE_BREAK--- + +Given a section one can construct a system of coordinates over $P$ in a canonical way. Simply let $(x, g)$ be the coordinates of the point $p = \sigma(x)g$. In this coordinates the connection can be rewritten + +$$\omega = g^{-1} dg + g^{-1} A g \qquad (8)$$ + +and the curvature can be rewritten + +$$\Omega = g^{-1} F_g \qquad (9)$$ + +indeed the form of the connection given here satisfies both the requirements above and $A = \sigma^*\omega$. From these last equations one easily recovers the gauge transformation rules after a change of section $\sigma' = \sigma u(x)$ ($g' = u^{-1}(x)g$), that is + +$$A'_{\mu} = u^{-1} A_{\mu} u + u^{-1} \partial_{\mu} u \qquad (10)$$ + +$$F'_{\mu\nu} = u^{-1} F_{\mu\nu} u \qquad (11)$$ + +### 3. The Background Metric + +We are used to define a manifold through charts $\phi: U \to \mathbb{R}^4$, $U \subset M$, taking values on $\mathbb{R}^4$. Let us instead take them with value in a four-dimensional canonical manifold with enough structure to admit some natural metric. We shall use a matrix Lie group $G$, but we do not really want to give any special role to the identity of $G$. We shall see later how to solve this problem. The metric $g$ has to be constructed as a small departure from that naturally present in $G$ and which plays the role of background metric. + +We take as background metric the expression + +$$g_B = I_g(\theta, \theta) \qquad (12)$$ + +where $\theta$ is the Maurer-Cartan form of the group [2], that is $\theta = g^{-1}dg$, and $I_g$ is an adjoint invariant quadratic form on the Lie algebra $G$, which might depend on $g \in G$. The Maurer-Cartan form has the effect of mapping an element $v \in T_g G$ to the Lie algebra element whose fundamental vector field at $g$ is $v$. + +Of course, we demand that $g_B$ be a Lorentzian metric in a four-dimensional Lie group, and furthermore we want it to represent an isotropic cosmological background, thus $G$ has to contain the $SO(3)$ subgroup. We are lead to the Abelian group of translations $T_4$ or to the group $U(2)$ (or equivalently the group of quaternions since it shares with $U(2)$ the Lie algebra). In what follows we shall only consider the latter group, the case of the Abelian translation group being simpler. + +Thus let us consider the group $U(2)$. Every matrix of this group reads $u = e^{i\lambda r}$ with $0 \le \lambda \le \pi$ where $r \in SU(2)$ (while a quaternion reads $e^{\lambda r}, \lambda \in \mathbb{R}$) + +$$r = \begin{pmatrix} r_0 + i r_3 & r_2 + i r_1 \\ -r_2 + i r_1 & r_0 - i r_3 \end{pmatrix}, \qquad \sum_{\mu=0}^{3} r_{\mu}^{2} = 1 \qquad (13)$$ + +The Lie algebra of $U(2)$ is that of anti-hermitian matrices $A$ which read + +$$A = i \begin{pmatrix} a^0 + a^3 & a^1 - ia^2 \\ a^1 + ia^2 & a^0 - a^3 \end{pmatrix} \qquad (14)$$ + +By adjoint invariance of $I_g$ we mean $I_{u'gu^\dagger}(uAu^\dagger, uAu^\dagger) = I_g(A, A)$, for any $u, u' \in U(2)$. Clearly, the adjoint invariance for the Abelian subgroup $U(1)$ is guaranteed because for $u \in U(1)$, $uAu^\dagger = A$, $u'gu^\dagger = g$. The expressions that satisfy this invariance property are + +$$I_g(A, A) = \frac{\alpha(\lambda)}{2} (\operatorname{tr} A)^2 - \frac{\beta(\lambda)}{2} \operatorname{tr}(A^2) \qquad (15)$$ +---PAGE_BREAK--- + +$$I_g(A, A) = -2\alpha(\lambda)(a^0)^2 + \beta(\lambda)[(a^0)^2 + (a^1)^2 + (a^2)^2 + (a^3)^2] \quad (16)$$ + +where $\alpha$ and $\beta$ are functions of the phase of $g = e^{i\lambda}r$, $r \in SU(2)$ (which is left invariant under adjoint transformations). We get a Lorentzian metric for $2\alpha > \beta$ and $\beta > 0$. With the simple choice $\alpha = \beta = 1$ we get + +$$I_g(A, A) = \det A = -(a^0)^2 + (a^1)^2 + (a^2)^2 + (a^3)^2 \quad (17)$$ + +Notice that $\text{tr}(r^\dagger dr) = 0$ and + +$$\operatorname{tr}(r^\dagger dr r^\dagger dr) = -\operatorname{tr}(dr^\dagger dr) = -2 \det(r^\dagger dr) = -2 \sum_{\mu=0}^{3} dr_\mu^2 \quad (18)$$ + +Let us recall that $\theta = \phi^\dagger d\phi$ where the group element $\phi$ reads $\phi = re^{i\lambda}$. Thus using $\text{tr}(r^\dagger dr) = 0$ we find for the background metric + +$$ +\begin{align*} +g_B = I_g(\theta, \theta) &= I \left( r^\dagger dr + i d\lambda, r^\dagger dr + i d\lambda \right) = \\ +&= -I (d\lambda, d\lambda) + I(r^\dagger dr, r^\dagger dr) = -(2\alpha - \beta)d\lambda^2 - \frac{\beta}{2}\operatorname{tr}(r^\dagger dr r^\dagger dr) = \\ +&= -(2\alpha - \beta)d\lambda^2 + \beta(dr_0^2 + dr_1^2 + dr_2^2 + dr_3^2) +\end{align*} +$$ + +Recalling the constraint $\sum_{\mu=0}^{3} r_{\mu}^{2} = 1$ we find a background metric which coincides with Friedmann's with a $S^3$ section. + +More specifically, let $\sigma_0 = I$, and let $\sigma_i$, $i = 1, 2, 3$, be the Pauli matrices. Let $\tau_\mu = i\sigma_\mu$ be a base for the Lie algebra of $U(2)$. Let us parametrize $\phi \in U(2)$ through + +$$\phi = e^{i\lambda\sigma_0} r = e^{i\lambda\sigma_0} e^{i\chi(\tau_1 \sin\theta \cos\varphi + \tau_2 \sin\theta \sin\varphi + \tau_3 \cos\theta)} \quad (19)$$ + +then the background metric reads + +$$g_B = -dt^2 + a^2(t) (d\chi^2 + \sin^2\chi(d\theta^2 + \sin^2\theta d\varphi^2)) \quad (20)$$ + +where + +$$t = \int_0^\lambda d\lambda' \sqrt{2\alpha(\lambda') - \beta(\lambda')} \quad (21)$$ + +and + +$$a^2(t) = \beta(\lambda(t)) \quad (22)$$ + +These calculations, first presented in [1], show that the Friedmann metric appears rather naturally from the study of the $U(2)$ group. Of course, since this argument depends only on the Lie algebra rather than the group structure, it can be repeated for the group of quaternions [3]. + +**4. Perturbing the Background** + +In this section we shall suppose that $I_g$ does not depend on $g$, namely that $\alpha$ and $\beta$ are constants, this means that we ignore the time dependence of the cosmological background. + +We mentioned that we wish to use charts $\phi: U \to G, U \subset M$, with value in a group $G$ but that we do not want to assign to the identity of $G$ any special role. To that end, let us assume for simplicity that $M$ is simply connected, and let us introduce a trivial bundle $P$ endowed with a flat connection $\tilde{\omega}$. The connection being flat is integrable, thus given an horizontal section $\tilde{\sigma}: M \to P$, and parametrizing every point of $P$ through $p(x, g) = \tilde{\sigma}(x)g$, we obtain a splitting $P \sim M \times G$. In this way the identity of $G$ does not play any special role since it refers to different points of $P$ depending on the choice of section $\tilde{\sigma}$. +---PAGE_BREAK--- + +A second section $\sigma: M \to P$ is now related to the former by $\sigma(x)\phi^{-1}(x) = \tilde{\sigma}(x)$, where $\phi: M \to G$ +is the chart we were looking for. In order to be interpreted as a chart, $\phi$ has to be injective. The idea is +to define the metric + +$$g = I(\tilde{A} - A, \tilde{A} - A)$$ + +where $\tilde{A} = \sigma^*\tilde{\omega}$ is the potential of the flat connection and $A = \sigma^*\omega$ is the potential of a possibly non-trivial connection. From the transformation rule for the potential (10) we obtain + +$$\tilde{A} = \phi^{-1}(x) d\phi(x)$$ + +Let us show that the metric so defined satisfies the property $F = 0 \Rightarrow$ background metric. Suppose that $F = 0$ then $\sigma$ can be chosen in such a way that $A = 0$, thus the metric becomes + +$$F=0 \quad \Rightarrow \quad g = I(\phi^{-1}(x)d\phi(x), \phi^{-1}(x)d\phi(x)) = I(\phi^*\theta, \phi^*\theta) = \phi^*g_B \qquad (23)$$ + +that is, up to a coordinate change the metric coincides with the background metric. + +We observe that $A = \tau_a A_\mu^a dx^\mu$ has 16 components, namely the same number of components as the metric. However, we have an additional degree of freedom given by $\phi(x)$. This function can be completely removed using the invertibility of this map, namely using the coordinates $\phi^\mu$ on the Lie group to parametrize $M$. In this way the metric reads + +$$g = I(\phi^{-1}d\phi - \tau_a A_\mu^a(\phi)d\phi^\mu, \phi^{-1}d\phi - \tau_a A_\mu^a(\phi)d\phi^\mu)$$ + +these coordinates are referred as *internal coordinates*. In internal coordinates any gauge transformation +induces a coordinate transformation. For instance, the gauge potential transforms as + +$$\tau_a A_c'^a = \{u^{-1}\tau_a A_b^c u + u^{-1}\partial_b u\} \frac{\partial \phi^b}{\partial \phi_c'} \quad (24)$$ + +and the transformation law for the curvature becomes + +$$F'_{ab} = u^{-1} F_{cd} u \frac{\partial \phi^c}{\partial \phi'^a} \frac{\partial \phi^d}{\partial \phi'^b} \qquad (25)$$ + +where $\sigma' = \sigma u$ and the matrix $u(\phi)$ is related to the transformation $\phi'^a(\phi^b)$ by the product $\phi' = \phi u(\phi)$. +In the same way it can be shown, for example, that the spacetime metric transforms as a tensor +under (24). + +One can further ask whether the Einstein equations can be rephrased as dynamical equations +for the potential $A$. The answer is affirmative and passes through the vierbein reformulation of the +Einstein-Hilbert Lagrangian. + +We recall that a tetrad field (vierbein) $e_a = e_a^\mu \partial_\mu$, is a set of four vector fields $e_a$ such that +$g_{\mu\nu} = \eta_{ab} e_a^\mu e_b^\nu$. The inverse $e_a^\mu$ is defined through $e_a^\mu e_\nu^a = \delta_\nu^\mu$. The Einstein Lagrangian can be rewritten + +$$-\frac{\sqrt{-g}}{16\pi} R = \frac{1}{8\pi} v_{,\nu} v^{\nu} + \frac{\sqrt{-g}}{16\pi} \left\{ \frac{1}{4} C^{abc} C_{abc} - C_{ac}^{a} C_{b}^{b c} + \frac{1}{2} C^{abc} C_{bac} \right\} \quad (26)$$ + +where the first term on the right-hand side is a total divergence and + +$$C_{ab}^{c} = e_{a}^{c} (\partial_{a} e_{b}^{c} - \partial_{b} e_{a}^{c}) = e_{a}^{c} e_{b}^{v} (\partial_{v} e_{\mu}^{c} - \partial_{\mu} e_{v}^{c}) \qquad (27)$$ + +In order to obtain a dynamics for $A$ we select a base $\tau_a$ for the Lie algebra such that + +$$I(\tau_a, \tau_b) = \eta_{ab}$$ +---PAGE_BREAK--- + +where $\eta_{ab}$ is the Minkowski metric. Then we make a gauge transformation so as to send the flat potential $\tilde{A}$ to zero. This gauge is called the *OT gauge*. Since $g = I(\tau_a A_\mu^a dx^\mu, \tau_a A_\mu^a dx^\mu)$, the vierbein becomes coincident with the potential + +$$e_\mu^a = A_\mu^a$$ + +so the field equations can be ultimately expressed in terms of $A_\mu^a$. We have observed above that with $F=0$ the metric becomes that of the Einstein static Universe which is not a solution of the dynamical equations (without cosmological constant). One could wish to obtain a realistic cosmological solution for $F=0$. At the moment I do not know how to modify the theory so as to accomplish this result (but observe that we never changed the dynamics which is always that given by the Einstein's equations). However, our framework might not need any modification. It can be shown [1] that the scale factor $a$ in front of the Einstein static Universe metric is actually the coupling constant for this theory so the expansion of the Universe could be an effect related to the renormalization of the theory. + +In the Abelian case $T_4$ (not in the $U(2)$ case) the Lagrangian can also be expressed in terms of the curvature (7). Indeed, since $f_{ab}^c = 0$ the curvature becomes coincident with the tensors $C_{bc}^a$ entering the above expression of the Lagrangian (however, observe that the potential still enters the metric and the vierbeins which are used to raise the indices of the curvature). The final expression is quadratic in the curvature $F$ and is related to the teleparallel approach to general relativity [4–7]. Issues related to the renormalizability of the dynamics determined by (26) have yet to be fully studied. + +The *OT gauge* approach has been used to infer the dynamics and is complementary to the *internal coordinates* approach mentioned above. Indeed, while the latter allows us to interpret the map $\phi: U \to G, U \subset M$, as a chart with values in $G$, the *OT frame* approach sends $\phi$ to the identity, so in the new gauge the non-injective map $\phi$ cannot be interpreted as a chart. Thus, after having developed the dynamics in the *OT gauge* we would have to make a last gauge transformation to reformulate it in internal coordinates. + +**Acknowledgments:** This work has been partially supported by GNFM of INDAM. + +**Conflicts of Interest:** The author declares no conflicts of interest. + +## References + +1. Minguzzi, E. Gauge invariance in teleparallel gravity theories: A solution to the background structure problem. Phys. Rev. D **2002**, 65, 084048. doi:10.1103/PhysRevD.65.084048. + +2. Kobayashi, S.; Nomizu, K. Foundations of Differential Geometry. In *Interscience Tracts in Pure and Applied Mathematics*; Interscience Publishers: New York, NY, USA, 1963; Volume I. + +3. Trifonov, V. Natural Geometry of Nonzero Quaternions. Int. J. Theor. Phys. **2007**, *46*, 251–257. + +4. Cho, Y.M. Einstein Lagrangian as the translational Yang-Mills Lagrangian. Phys. Rev. D **1976**, *14*, 2521–2525. + +5. Hayashi, K.; Shirafuji, T. New general relativity. Phys. Rev. D **1979**, *19*, 3524–3553. + +6. Rodrigues, W.A., Jr.; de Souza, Q.A.G.; da Rocha, R. Conservation Laws on Riemann-Cartan, Lorentzian and Teleparallel Spacetimes. Bull. Soc. Sci. Lett. Lodz. Ser. Rech. Deform. **2007**, *52*, 37–65, 66–77. + +7. Aldrovandi, R.; Pereira, J.G. Teleparallel Gravity. In *Fundamental Theories of Physics*; Springer: Berlin, Germany, 2013; Volume 173. + +© 2014 by the author. Licensee MDPI, Basel, Switzerland. This article is an open access article distributed under the terms and conditions of the Creative Commons Attribution (CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Review + +# Quantum Local Symmetry of the *D*-Dimensional +Non-Linear Sigma Model: A Functional Approach + +Andrea Quadri ¹,² + +¹ Istituto Nazionale di Fisica Nucleare (INFN), Sezione di Milano, via Celoria 16, I-20133 Milano, Italy; E-Mail: andrea.quadri@mi.infn.it; Tel.: +39-2-5031-7287; Fax: +39-2-5031-7480 + +² Dipartimento di Fisica, Università di Milano, via Celoria 16, I-20133 Milano, Italy + +Received: 27 February 2014; in revised form: 31 March 2014 / Accepted: 11 April 2014 / +Published: 17 April 2014 + +**Abstract:** We summarize recent progress on the symmetric subtraction of the Non-Linear Sigma Model in *D* dimensions, based on the validity of a certain Local Functional Equation (LFE) encoding the invariance of the SU(2) Haar measure under local left transformations. The deformation of the classical non-linearly realized symmetry at the quantum level is analyzed by cohomological tools. It is shown that all the divergences of the one-particle irreducible (1-PI) amplitudes (both on-shell and off-shell) can be classified according to the solutions of the LFE. Applications to the non-linearly realized Yang-Mills theory and to the electroweak theory, which is directly relevant to the model-independent analysis of LHC data, are briefly addressed. + +**Keywords:** Non-Linear Sigma Model; quantum symmetries; renormalization; Becchi–Rouet–Stora–Tyutin (BRST) + +## 1. Introduction + +The purpose of this paper is to provide an introduction to the recent advances in the study of the renormalization properties of the SU(2) Non-Linear Sigma Model (NLSM) and of the quantum deformation of the underlying non-linearly realized classical SU(2) local symmetry. The results reviewed here are based mainly on References [1–19]. + +The linear sigma model was originally proposed a long time ago in [20] in the context of elementary particle physics. In this model the pseudoscalar pion fields $\vec{\phi}$ form a chiral multiplet together with a scalar field $\sigma$, with $(\sigma, \vec{\phi})$ transforming linearly as a vector under $O(4) \sim \text{SU}(2) \times \text{SU}(2)/Z_2$. If one considers instead the model on the manifold defined by + +$$ \sigma^2 + \vec{\phi}^2 = f_{\pi}^2, \quad \sigma > 0 \qquad (1) $$ + +one obtains a theory where the chiral group $SO(4) \sim \text{SU}(2) \times \text{SU}(2)$ (with $SO(4)$ selected by the positivity condition on $\sigma$) is spontaneously broken down to the isotopic spin group $\text{SU}(2)$. The composite field $\sigma$ has a non-vanishing expectation value $f_\pi$ (to be identified with the pion decay constant), while the pions are massless. Despite the fact that this is only an approximate description (since in reality the pions are massive and chiral $\text{SU}(2) \times \text{SU}(2)$ is not exact, even before being spontaneously broken), the approach turned out to be phenomenologically quite successful and paved the way to the systematic use of effective field theories as a low energy expansion. + +The first step in this direction was to obtain a phenomenological lagrangian directly, by making use of a pion field with non-linear transformation properties dictated by chiral symmetry from the beginning. After the seminal work of Reference [21] for the chiral $\text{SU}(2) \times \text{SU}(2)$ group, non-linearly realized symmetries were soon generalized to arbitrary groups in [22,23] and have since then become a very popular tool [24]. +---PAGE_BREAK--- + +Modern applications involve, e.g., Chiral Perturbation Theory [25–28], low energy electroweak theories [29] as well as gravity [30]. + +Effective field theories usually exhibit an infinite number of interaction terms, that can be organized according to the increasing number of derivatives. By dimensional arguments, the interaction terms must then be suppressed by some large mass scale M (so that one expects that the theory is reliable at energies well below M) (For a modern introduction to the problem, see e.g., [31]). In the spirit of the phenomenological lagrangians, the tree-level effective action is used to compute physical quantities up to a given order in the momentum expansion. Only a finite number of derivative interaction vertices contribute to that order, thus allowing to express the physical observables one is interested in through a finite number of parameters (to be eventually fixed by comparison with experimental data). Then the theory can be used to make predictions at the given order of accuracy in the low-energy expansion. + +The problem of the mathematically consistent evaluation of quantum corrections in this class of models has a very long history. On general grounds, the derivative couplings tend to worsen the ultraviolet (UV) behavior of the theory, since UV divergent contributions arise in the Feynman amplitudes that cannot be compensated by a multiplicative renormalization of the fields and a redefinition of the mass parameters and the coupling constants in the classical action (truncated at some given order in the momentum expansion). Under these circumstances, one says that the theory is non-renormalizable (A compact introduction to renormalization theory is given in [32]). + +It should be stressed that the key point here is the instability of the classical action: no matter how many terms are kept in the derivative expansion of the tree-level action, there exists a sufficiently high loop order where UV divergences appear that cannot be reabsorbed into the classical action. On the other hand, if in a non-anomalous and non-renormalizable gauge theory one allows for *infinitely many* terms in the classical action (all those compatible with the symmetries of the theory), then UV divergences can indeed be reabsorbed by preserving the Batalin-Vilkovisky master equation [33] and the model is said to be renormalizable in the modern sense [34]. + +Sometimes symmetries are so powerful in constraining the UV divergences that the non-linear theory proves to be indeed renormalizable (although not by power-counting), like for instance the NLSM in two dimensions [35,36] (For a more recent introduction to the subject, see e.g., [37]). + +In four dimensions the situation is much less favorable. It has been found many years ago that already at one loop level in the four-dimensional NLSM there exists an infinite number of one-particle irreducible (1-PI) divergent pion amplitudes. Many attempts were then made in the literature in order to classify such divergent terms. Global SU(2) chiral symmetry is not preserved already at one loop level [38–40]. Moreover it turns out that some of the non-symmetric terms can be reabsorbed by a redefinition of the fields [40–43], however in the off-shell four-point $\phi_a$ amplitudes some divergent parts arise that cannot be reabsorbed by field redefinitions unless derivatives are allowed [40]. These technical difficulties prevented such attempts to evolve into a mathematically consistent subtraction procedure. + +More recently it has been pointed out [1] that one can get the full control on the ultraviolet divergences of the $\phi$'s-amplitudes by exploiting the constraints stemming from the presence of a certain local symmetry, associated with the introduction of a SU(2) background field connection into the theory. This symmetry in encoded in functional form in the so-called Local Functional Equation (LFE) [1]. It turns out that the fundamental divergent amplitudes are not those associated with the quantum fields of the theory, namely the pions, but those corresponding to the background connection and to the composite operator implementing the non-linear constraint [1,2]. These amplitudes are named ancestor amplitudes. + +At every order in the loop expansion there is only a finite number of divergent ancestor amplitudes. They uniquely fix the divergent amplitudes involving the pions. Moreover, non-renormalizability of this theory in four dimensions can be traced back to the instability of the classical non-linear +---PAGE_BREAK--- + +local symmetry, that gets deformed by quantum corrections. These results hold for the full off-shell amplitudes [3]. + +A comment is in order here. In Reference [4] it has been argued that Minimal Subtraction is a symmetric scheme, fulfilling all the symmetries of the NLSM in the LFE approach. This in particular entails that all finite parts of the needed higher order counterterms are consistently set to zero. It should be stressed that this is not the most general solution compatible with the symmetries and the WPC, that is commonly used in the spirit of the most popular effective field theory point of view. Indeed, these finite parts are constrained neither by the LFE nor by the WPC and thus, mathematically, they can be freely chosen, as far as they are introduced at the order prescribed by the WPC and without violating the LFE. + +The four dimensional SU(2) NLSM provides a relatively simple playground where to test the approach based on the LFE, that can be further generalized to the SU(N) case (and possibly even to a more general Lie group). + +Moreover, when the background vector field becomes dynamical, the SU(2) NLSM action allows one to generate a mass term for the gauge field $\partial_a$ la Stückelberg [44,45]. The resulting non-linear implementation of the spontaneous symmetry breaking mechanism (as opposed to the linear Higgs mechanism) is widely used in the context of electroweak low energy effective field theories, that are a very important tool in the model-independent analysis of LHC data [46-49]. + +## 2. The Classical Non-Linear Sigma Model + +The classical SU(2) NLSM in $D$ dimensions is defined by the action + +$$S_0 = \int d^D x \frac{m_D^2}{4} \mathrm{Tr} (\partial_\mu \Omega^\dagger \partial^\mu \Omega) \quad (2)$$ + +where the matrix $\Omega$ is a SU(2) group element given by + +$$\Omega = \frac{1}{m_D} (\phi_0 + i\phi_a \tau_a), \quad \Omega^\dagger \Omega = 1, \det \Omega = 1, \quad \phi_0^2 + \phi_a^2 = m_D^2 \quad (3)$$ + +In the above equation $\tau_a, a = 1,2,3$ are the Pauli matrices and $m_D = m^{D/2-1}$ is the mass scale of the theory. $m$ has mass dimension 1. $\phi_a$ are the three independent fields parameterizing the matrix $\Omega$, while we choose the positive solution of the non-linear constraint, yielding + +$$\phi_0 = \sqrt{m_D^2 - \phi_a^2} \quad (4)$$ + +In components one finds + +$$S_0 = \int d^D x \left( \frac{1}{2} \partial_{\mu} \phi_a \partial^{\mu} \phi_a + \frac{1}{2} \frac{\phi_a \partial_{\mu} \phi_a \phi_b \partial^{\mu} \phi_b}{\phi_0^2} \right) \quad (5)$$ + +The model therefore contains non-polynomial, derivative interactions for the massless scalars $\phi_a$. Equation (2) is invariant under a global SU(2)$_L \times$ SU(2)$_R$ chiral transformation + +$$\Omega' = U\Omega V^{\dagger}, \quad U \in \mathrm{SU}(2)_L, \quad V \in \mathrm{SU}(2)_R \quad (6)$$ + +We notice that such a global transformation is non-linearly realized, as can be easily seen by looking at its infinitesimal version. E.g., for the left transformation one finds: + +$$\delta\phi_a = \frac{1}{2}\alpha\phi_0(x) + \frac{1}{2}\epsilon_{abc}\phi_b(x)\alpha_c, \qquad \delta\phi_0(x) = -\frac{1}{2}\alpha\phi_a(x) \quad (7)$$ + +Since $\phi_0$ is given by Equation (4), the first term in the r.h.s. of $\delta\phi_a$ is non-linear (and even non-polynomial) in the quantum fields. +---PAGE_BREAK--- + +Perturbative quantization of the NLSM requires to carry out the path-integral + +$$Z[J] = \int \mathcal{D}\phi_a \exp (iS_0[\phi_a] + i \int d^D x J_a \phi_a) \quad (8)$$ + +by expanding around the free theory and by treating the second term in the r.h.s. of Equation (5) as an interaction. Notice that in Equation (8) the sources $J_a$ are coupled to the fields $\phi_a$ over which the path-integral is performed. In momentum space the propagator for the $\phi_a$ fields is + +$$\Delta_{\phi_a \phi_b} = i \frac{\delta_{ab}}{p^2} \qquad (9)$$ + +The mass dimension of the $\phi_a$ is therefore $D/2 - 1$, in agreement with Equation (3). + +The presence of two derivatives in the interaction term is the cause (in dimensions greater than 2) of severe UV divergences, leading to the non-renormalizability of the theory. + +### 3. The Approach based on the Local Functional Equation + +Some years ago it was recognized that the most effective classification of the UV divergences (both for on-shell and off-shell amplitudes) of the NLSM cannot be achieved in terms of the quantized fields $\phi_a$, as it usually happens in power-counting renormalizable theories, but rather through the so-called ancestor amplitudes, i.e., the Green's functions of certain composite operators, whose knowledge completely determines the amplitudes involving at least one $\phi_a$-leg. This property follows as a consequence of the existence of an additional local functional identity, the so-called Local Functional Equation (LFE) [1]. + +The LFE stems from the *local* SU(2)$_L$-symmetry that can be established from the gauge transformation of the flat connection $F_\mu$ associated with the matrix $\Omega$: + +$$F_{\mu} = i\Omega\partial_{\mu}\Omega^{\dagger} = \frac{1}{2}F_{a\mu}\tau^{a} \qquad (10)$$ + +i.e., the local SU(2)-transformation of $\Omega$ + +$$\Omega' = U\Omega \qquad (11)$$ + +induces a gauge transformation of the flat connection, namely + +$$F'_{\mu} = U F_{\mu} U^{\dagger} + i U \partial_{\mu} U^{\dagger} \qquad (12)$$ + +$S_0$ in Equation (2) is not invariant under local SU(2)$_L$ transformations; however it is easy to make it invariant, once one realizes that it can be written as + +$$S_0 = \int d^D x \frac{m_D^2}{4} \mathrm{Tr}(F_\mu^2) \qquad (13)$$ + +Since $F_\mu$ transforms as a gauge connection, one can introduce an additional external classical vector source $\tilde{J}_\mu = \frac{1}{2}\tilde{J}_{a\mu} \tau^a$ and replace $S_0$ with + +$$S = \int d^D x \frac{m_D^2}{4} \mathrm{Tr} (F_\mu - \tilde{J}_\mu)^2 \qquad (14)$$ +---PAGE_BREAK--- + +If one requires that $\tilde{f}_{a\mu}$ transforms as a gauge connection under the local SU(2)$_L$ group, $S$ in Equation (14) is invariant under a local SU(2)$_L$ symmetry given by + +$$ \begin{aligned} \delta\phi_a &= \frac{1}{2}\alpha_a\phi_0 + \frac{1}{2}\epsilon_{abc}\phi_b\alpha_c, & \delta\phi_0 &= -\frac{1}{2}\alpha_a\phi_a \\ \delta\tilde{f}_{a\mu} &= \partial_\mu\alpha_a + \epsilon_{abc}\tilde{f}_{b\mu}\alpha_c \end{aligned} \qquad (15) $$ + +Notice that in the above equation $\alpha_a$ is a local parameter. + +In order to implement the classical local SU(2)$_L$ invariance at the quantum level, one needs to define the composite operator $\phi_0$ in Equation (4) by coupling it in the classical action to an external source $K_0$ through the term + +$$ S_{\text{ext}} = \int d^D x K_0 \phi_0 \qquad (16) $$ + +$K_0$ is invariant under $\delta$. + +The important observation now is that the variation of full one-particle irreducible (1-PI) vertex functional $\Gamma^{(0)} = S + S_{\text{ext}}$ is linear in the quantized fields $\phi_a$, i.e., + +$$ \delta\Gamma^{(0)} = -\frac{1}{2} \int d^Dx \alpha_a(x) K_0(x) \phi_a(x) \qquad (17) $$ + +By taking a derivative of both sides of the above equation w.r.t. $\alpha_a(x)$ one obtains the LFE for the tree-level vertex functional $\Gamma^{(0)}$: + +$$ W_a(\Gamma^{(0)}) = -\partial_\mu \frac{\delta\Gamma^{(0)}}{\delta\tilde{f}_{a\mu}} + \epsilon_{acb} J_{c\mu} \frac{\delta\Gamma^{(0)}}{\delta\tilde{f}_{b\mu}} + \frac{1}{2} \frac{\delta\Gamma^{(0)}}{\delta K_0(x)} \frac{\delta\Gamma^{(0)}}{\delta\phi_a(x)} + \frac{1}{2} \epsilon_{abc} \phi_c(x) \frac{\delta\Gamma^{(0)}}{\delta\phi_b(x)} = -\frac{1}{2} K_0(x) \phi_a(x) \quad (18) $$ + +Notice that the $\phi_0$-term, entering in the variation of the $\phi_a$ field, is generated by $\frac{\delta\Gamma^{(0)}}{\delta K_0(x)}$. The advantage of this formulation resides in the fact that it is suitable to be promoted at the quantum level. Indeed by defining the composite operator $\phi_0$ by taking functional derivatives w.r.t. its source $K_0$, one is able to control its renormalization, once radiative corrections are included [50]. + +In the following Section we are going to give a compact and self-contained presentation of the algebraic techniques used to deal with bilinear functional equations like the LFE in Equation (18). + +# 4. Ancestor Amplitudes and the Weak Power-Counting + +We are going to discuss in this Section the consequences of the LFE for the full vertex functional. The imposition of a quantum symmetry in a non-power-counting renormalizable theory is a subtle problem, since in general there is no control on the dimensions of the possible breaking terms as strong as the one guaranteed by the Quantum Action Principle (QAP) in the renormalizable case. Let us discuss the latter case first. + +## 4.1. Renormalizable Theories and the Quantum Action Principle + +If the tree-level functional $\Gamma^{(0)}$ is power-counting renormalizable, the renormalization procedure [51] provides a way to compute all higher-order terms in the loop expansion of the full vertex functional $\Gamma[\Phi, \chi] = \sum_{n=0}^{\infty} \hbar^n \Gamma^{(n)}[\Phi, \chi]$, depending on the set of quantized fields $\Phi$ and external sources collectively denoted by $\chi$, by fixing order by order only a finite set of action-like normalization conditions. One says that the classical action is therefore stable under radiative corrections, namely the number of free parameters does not increase with the loop order. + +This procedure is a recursive one, since it allows to construct $\Gamma^{(n)}$ once $\Gamma^{(j)}$, $j < n$ are known. From a combinatorial point of view, it turns out that $\Gamma$ is the generating functional of the 1-PI renormalized Feynman amplitudes. +---PAGE_BREAK--- + +A desirable feature of power-counting renormalizable theories is that the dependence of 1-PI Green's functions under an infinitesimal variations of the quantized fields and of the parameters of the model is controlled by the so-called Quantum Action Principle (QAP) [52–55] and can be expressed as the insertion of certain *local* operators with UV dimensions determined by their tree-level approximation (i.e., a polynomial in the fields, the external sources and derivatives thereof). + +Let us now consider a certain symmetry $\delta$ of the tree-level $\Gamma^{(0)}$ classical action. Under the condition that the symmetry $\delta$ is non-anomalous [56], it can be extended to the full vertex functional $\Gamma$. In many cases of physical interest the proof that the symmetry is non-anomalous can be performed by making use of cohomological tools. Namely one writes the functional equation associated with the $\delta$-invariance of the tree-level vertex functional as follows: + +$$ S(\Gamma^{(0)}) = \int d^D x \sum_{\Phi} \frac{\delta\Gamma^{(0)}}{\delta\Phi(x)} \frac{\delta\Gamma^{(0)}}{\delta\Phi^{*}(x)} = 0 \quad (19) $$ + +where $\Phi^*$ is an external source coupled in the tree-level vertex functional to the $\delta$-transformation of $\Phi$ and the sum is over the quantized fields. $\Phi^*$ are known as antifields [33]. If $\delta$ is nilpotent (as it happens, e.g., for the Becchi-Rouet-Stora-Tyutin (BRST) operator [57–59] in gauge theories), the recursive proof of the absence of obstructions to the fulfillment of Equation (19) works as follows. Suppose that Equation (19) is satisfied up to order $n-1$ in the loop expansion. Then by the QAP the $n$-th order breaking + +$$ \Delta^{(n)} = \int d^D x \sum_{\Phi} \left( \frac{\delta\Gamma^{(0)}}{\delta\Phi(x)} \frac{\delta\Gamma^{(n)}}{\delta\Phi^{*}(x)} + \frac{\delta\Gamma^{(n)}}{\delta\Phi(x)} \frac{\delta\Gamma^{(0)}}{\delta\Phi^{*}(x)} + \sum_{j=1}^{n-1} \frac{\delta\Gamma^{(j)}}{\delta\Phi(x)} \frac{\delta\Gamma^{(n-j)}}{\delta\Phi^{*}(x)} \right) \quad (20) $$ + +is a polynomial in the fields, the external sources and their derivatives. The term involving $\Gamma^{(n)}$ in Equation (20) allows to define the linearized operator $S_0$ according to + +$$ S_0(\Gamma^{(n)}) = \int d^D x \sum_{\Phi} \left( \frac{\delta\Gamma^{(0)}}{\delta\Phi(x)} \frac{\delta\Gamma^{(n)}}{\delta\Phi^{*}(x)} + \frac{\delta\Gamma^{(n)}}{\delta\Phi(x)} \frac{\delta\Gamma^{(0)}}{\delta\Phi^{*}(x)} \right) \quad (21) $$ + +$S_0$ is also nilpotent, as a consequence of the nilpotency of $\delta$ and of the tree-level invariance in Equation (19). By exploiting this fact and by applying $S_0$ on both sides of Equation (20) one finds + +$$ S_0(\Delta^{(n)}) = 0 \quad (22) $$ + +provided that the Wess-Zumino consistency condition [60] + +$$ S_0 \left( \sum_{j=1}^{n-1} \frac{\delta \Gamma^{(j)}}{\delta \Phi(x)} \frac{\delta \Gamma^{(n-j)}}{\delta \Phi^*(x)} \right) = 0 \quad (23) $$ + +holds. This is the case, e.g., for the BRST symmetry and the associated master Equation (19), since Equation (23) turns out to be a consequence of a generalized Jacobi identity for the Batalin-Vilkovisky bracket for the conjugated variables $(\Phi, \Phi^*)$ [33]. + +The problem of establishing whether the functional identity + +$$ S(\Gamma) = 0 \quad (24) $$ + +holds at order $n$ then boils down to prove that the most general solution to Equation (22) is of the form + +$$ \Delta^{(n)} = -S_0(\Xi^{(n)}) \quad (25) $$ +---PAGE_BREAK--- + +since then $\Gamma^{(n)} = \Gamma^{(n)} + \Xi^{(n)}$ will fulfill Equation (24) at order $n$ in the loop expansion. I.e., the problem reduces to the computation of the cohomology $H(S_0)$ of the operator $S_0$ in the space of integrated local polynomials in the fields, the external sources and their derivatives. Two $S_0$-invariant integrated local polynomials $\mathcal{J}_1$ and $\mathcal{J}_2$ belong to the same cohomology class in $H(S_0)$ if and only if + +$$ \mathcal{J}_1 = \mathcal{J}_2 + S_0(\mathcal{K}) \qquad (26) $$ + +for some integrated local polynomial $\mathcal{K}$. In particular, $H(S_0)$ is empty if the only cohomology class is the one of the zero element, so that the condition that $\mathcal{J}_1$ is $S_0$-invariant implies that + +$$ \mathcal{J}_1 = S_0(\mathcal{K}) \qquad (27) $$ + +for some $\mathcal{K}$. Hence if one can prove that the cohomology of the operator $S_0$ is empty in the space of breaking terms, then Equation (25) must be fulfilled by some choice of the functional $\Xi^{(n)}$. Moreover it must be checked that the UV dimensions of the possible counterterms $\Xi^{(n)}$ are compatible with the action-like condition, so that renormalizability of the theory is not violated. An extensive review of BRST cohomologies for gauge theories is given in [61]. + +## 4.2. Non-Renormalizable Theories + +The QAP does not in general hold for non-renormalizable theories. This does not come as a surprise, since the appearance of UV divergences with higher and higher degree, as one goes up with the loop order, prevents to characterize the induced breaking of a functional identity in terms of a polynomial of a given finite degree (independent of the loop order). + +Moreover for the NLSM another important difference must be stressed: the basic Green's functions of the theory are not those of the quantized fields $\phi_a$, but those of the flat connection coupled to the external vector source $\tilde{j}_{a\mu}$ and of the non-linear constraint $\phi_0$ (coupled to $K_0$). This result follows from the invertibility of + +$$ \frac{\delta \Gamma}{\delta K_0} = \phi_0 + O(\hbar) $$ + +as a formal power series in $\hbar$ (since $\phi_0|_{\phi_a=0} = m_D$). Then the LFE for the vertex functional $\Gamma$ + +$$ W_a(\Gamma) = -\frac{1}{2} K_0(x) \phi_a(x) \qquad (28) $$ + +can be seen as a first-order functional differential equation controlling the dependence of $\Gamma$ on the fields $\phi_a$. Provided that a solution exists (as will be proven in Section 5), Equation (28) determines all the amplitudes involving at least one external $\phi_a$-leg in terms of the boundary condition provided by the functional $\Gamma[\tilde{j}, K_0] = \Gamma[\phi, \tilde{j}, K_0]|_{\phi_a=0}$. + +$\Gamma[\tilde{j}, K_0]$ is the generating functional of the so called ancestor amplitudes, i.e., the 1-PI amplitudes involving only external $\tilde{j}$ and $K_0$ legs. + +It is therefore reasonable to assume the LFE in Equation (28) as the starting point for the quantization of the theory. + +From a path-integral point of view, Equation (28) implies that one is performing an integration over the SU(2)-invariant Haar measure of the group, namely one is computing + +$$ Z[J, \tilde{j}_\mu, K_0] = \int \mathcal{D}\Omega(\phi) \exp \left( i\Gamma^{(0)}[\phi, \tilde{j}_\mu, K_0] + i \int d^D x j_\alpha \phi_\alpha \right) \qquad (29) $$ + +where we denote by $\mathcal{D}\Omega(\phi)$ the SU(2) Haar measure (in the coordinate representation spanned by the fields $\phi_\alpha$). This clarifies the geometrical meaning of the LFE. +---PAGE_BREAK--- + +### 4.3. Weak Power-Counting + +As we have already noticed, in four dimensions the NLSM is non power-counting renormalizable, since already at one loop level an infinite number of divergent $\phi$-amplitudes exists. One may wonder whether the UV behavior of the ancestor amplitudes (the boundary conditions to the LFE) is better. It turns out that this is indeed the case and one finds that in $D$ dimensions a $n$-th loop Feynman amplitude $G$ with $N_{K_0}$ external $K_0$-legs and $N_J$ external $\bar{J}$-legs has superficial degree of divergence given by [2] + +$$d(G) \leq (D-2)n + 2 - N_J - 2N_{K_0} \quad (30)$$ + +The proof is straightforward although somehow lengthy and will not be reported here. It can be found in [2]. Equation (30) establishes the Weak Power-Counting (WPC) condition: at every loop order only a finite number of superficially divergent ancestor amplitudes exist. + +For instance, in $D = 4$ and at one loop order, Equation (30) reduces to + +$$d(G) \leq 4 - N_J - 2N_{K_0} \quad (31)$$ + +i.e., UV divergent amplitudes involve only up to four external $\tilde{J}_\mu$ legs or two $K_0$-legs. + +By taking into account Lorentz-invariance and global SU(2)$_R$ symmetry, the list of UV divergent amplitudes reduces to + +$$ \begin{gathered} \int d^4 x \partial_\mu \tilde{J}_{av} \partial^\mu \tilde{J}_a^\nu, \quad \int d^4 x (\partial \tilde{J}_a)^2, \quad \int d^4 x \epsilon_{abc} \partial_\mu \tilde{J}_{av} \tilde{J}_b^\mu \tilde{J}_c^\nu, \quad \int d^4 x (\tilde{J}_a)^2 (\tilde{J}_b)^2 \\ \int d^4 x \tilde{J}_{a\mu} \tilde{J}_b^\mu \tilde{J}_{av} \tilde{J}_b^\nu, \quad \int d^4 x \tilde{J}_{a\mu}^2, \quad \int d^4 x K_0^2, \quad \int d^4 x K_0 \tilde{J}_a^2 \end{gathered} \quad (32) $$ + +Notice that the counterterms are local. + +It should be emphasized that the model is not power-counting renormalizable, even when ancestor amplitudes are considered, since according to Equation (30) the number of UV divergent amplitudes increases as the loop order $n$ grows. + +A special case is the 2-dimensional NLSM. For $D = 2$ Equation (30) yields + +$$d(G) \leq 2 - N_J - 2N_{K_0} \quad (33)$$ + +i.e., at every loop order there can be only two UV divergent ancestor amplitudes, namely + +$$\int d^2 x \bar{J}^2 \quad \text{and} \quad \int d^2 x K_0$$ + +These are precisely of the same functional form as the ancestor amplitudes entering in the tree-level vertex functional and, in this sense, the model shares the stability property of the classical action typical of power-counting renormalizable models. Renormalizability of the 2-dimensional NLSM can also be established by relying on the Ward identity of global SU(2) symmetry (see e.g., [37]). + +A comment is in order here. In References [24,25] the external fields are the sources of connected Green's functions of certain quark-antiquark currents. The ancestor amplitudes in the NLSM, in the approach based on the LFE, do not have a direct physical interpretation of this type, however they have a very clear geometrical meaning. First of all, $\bar{J}_\mu$ is the source coupled to the flat connection naturally associated with the group element $\Omega$. On the other hand, $K_0$ is the unique scalar source required, in the special case of the SU(2) group, in order to control the renormalization of the non-linear classical SU(2) transformation of the $\phi_a$'s and thus plays the role of the so-called antifields [33,50]. The extension to a general Lie group G is addressed at the end of Section 5. +---PAGE_BREAK--- + +**5. Cohomological Analysis of the LFE** + +In order to study the properties of the LFE, it is very convenient to introduce a fictious BRST operator $s$ by promoting the gauge parameters $\alpha_a(x)$ to classical anticommuting ghosts $\omega_a(x)$. I.e., one sets + +$$ +\begin{align} +s \tilde{J}_{a\mu} &= \partial_{\mu} \omega_a + \epsilon_{abc} \tilde{J}_{b\mu} \omega_c, & s \phi_a &= \frac{1}{2} \omega_a \phi_0 + \frac{1}{2} \epsilon_{abc} \phi_b \omega_c, & s \phi_0 &= -\frac{1}{2} \omega_a \phi_a \\ +s K_0 &= \frac{1}{2} \omega_a \frac{\delta \Gamma^{(0)}}{\delta \phi_a(x)}, & s \omega_a &= -\frac{1}{2} \epsilon_{abc} \omega_b \omega_c +\end{align} +\tag{34} $$ + +Some comments are in order here. First of all the BRST operator $s$ acts also on the external source $K_0$. Moreover, the BRST transformation of $\omega_a$ is fixed by nilpotency, namely $s^2 = 0$. + +The introduction of the ghosts allows to define a grading w.r.t. the conserved ghost number. $\omega$ has ghost number +1, while all the other fields and sources have ghost number zero. (The ghost number was called the Faddeev-Popov (ΦΠ) charge in [2].) + +In terms of the operator $s$ we can write the $n$-th order projection ($n \ge 1$) of the LFE in Equation (28) as follows: + +$$ [\int d^D x \omega_a W_a(\Gamma)]^{(n)} = s\Gamma^{(n)} + \sum_{j=1}^{n-1} \int d^D x \frac{1}{2}\omega_a \frac{\delta\Gamma^{(j)}}{\delta K_0} \frac{\delta\Gamma^{(n-j)}}{\delta\phi_a} = 0 \quad (35) $$ + +Notice that the bilinear term in the LFE manifests itself into the presence of the mixed $\frac{\delta\Gamma^{(j)}}{\delta K_0}$ $\frac{\delta\Gamma^{(n-j)}}{\delta\phi_a}$ contribution. Moreover in the r.h.s. there is no contribution from the breaking term linear in $\phi_a$ in Equation (18) since the latter remains classical. + +Suppose now that all divergences have been recursively subtracted up to order $n-1$. At the $n$-th order the UV divergent part can only come from the term involving $\Gamma^{(n)}$ in Equation (35) and therefore, if the LFE holds, one gets a condition on the UV divergent part $\Gamma_{pol}^{(n)}$ of $\Gamma^{(n)}$: + +$$ s\Gamma_{pol}^{(n)} = 0 \qquad (36) $$ + +To be specific, one can use Dimensional Regularization and subtract only the pole part of the ancestor amplitudes (after the proper normalization of the ancestor background connection amplitudes + +$$ \frac{m}{m_D} \frac{\delta^{(n)} \Gamma}{\delta J_{a_1}^{\mu_1} \dots \delta J_{a_n}^{\mu_n}} $$ + +The LFE then fixes the correct factor for the normalization of amplitudes involving $K_0$. This subtraction procedure has been shown to be symmetric [2,4], i.e., to preserve the LFE. The pole parts before subtraction obey the condition in Equation (36). + +By the nilpotency of $s$, solving Equation (36) is equivalent to computing the cohomology of the BRST operator $s$ in the space of local functionals in $\tilde{J}, \tilde{\phi}, K_0$ and their derivatives with ghost number zero. This can be achieved by using the techniques developed in [62]. + +One first builds invariant combinations in one-to-one correspondence with the ancestor variables $\tilde{J}_{a\mu}$ and $K_0$. For that purpose it is more convenient to switch back to matrix notation. The difference $I_\mu = F_\mu - \tilde{J}_\mu$ transforms in the adjoint representation of SU(2), being the difference of two gauge connections. Thus the conjugate of such a difference w.r.t. $\Omega$ + +$$ j_{\mu} = j_{a\mu} \frac{\tau_a}{2} = \Omega^{\dagger} I_{\mu} \Omega \qquad (37) $$ +---PAGE_BREAK--- + +is invariant under s. By direct computation one finds + +$$ +\begin{align} +m_{\bar{D}}^2 j_{a\mu} &= m_{\bar{D}}^2 I_{a\mu} - 2\phi_b^2 I_{a\mu} + 2\phi_b I_{b\mu}\phi_a + 2\phi_0 \epsilon_{abc} \phi_b I_{c\mu} \nonumber \\ +&\equiv m_{\bar{D}}^2 R_{ba} I_{b\mu} \tag{38} +\end{align} +$$ + +The matrix $R_{ba}$ is an element of the adjoint representation of SU(2) and therefore the mapping $\tilde{J}_{a\mu} \rightarrow j_{a\mu}$ is invertible. + +One can also prove that the following combination + +$$ +\bar{\kappa}_0 \equiv \frac{m_D^2 K_0}{\phi_0} - \phi_a \frac{\delta S}{\delta \phi_a} \quad (39) +$$ + +is invariant [2]. At $\phi_a = 0$ one gets + +$$ +\bar{\kappa}_0|_{\phi_a=0} = m_D \kappa_0 \qquad (40) +$$ + +and therefore the transformation $K_0 \to \bar{K}_0$ is also invertible. + +In terms of the new variables $\bar{K}_0$ and $j_\mu$ and by differentiating Equation (36) w.r.t. $\omega_a$ one gets + +$$ +\Theta_{ab} \frac{\delta \Gamma_{pol}^{(n)} [j, \bar{K}, \phi]}{\delta \phi_b} = 0 \quad (41) +$$ + +where $s\phi_b = \omega_a \Theta_{ab}, i.e.,$ + +$$ +\Theta_{ab} = \frac{1}{2}\phi_0 \delta_{ab} + \frac{1}{2}\epsilon_{abc}\phi_c \quad (42) +$$ + +$\Theta_{ab}$ is invertible and thus Equation (41) yields + +$$ +\frac{\delta \Gamma_{pol}^{(n)} [j, \bar{K}_0, \phi]}{\delta \phi_b} = 0 \qquad (43) +$$ + +This equation is a very powerful one. It states that the *n*-th order divergences (after the theory has been made finite up to order *n* − 1) of the *φ*-fields can only appear through the invariant combinations $\bar{K}_0$ and $j_{a\mu}$. These invariant variables have been called bleached variables and they are in one-to-one correspondence with the ancestor variables $K_0$ and $\tilde{J}_{a\mu}$. + +The subtraction strategy is thus the following. One computes the divergent part of the properly +normalized ancestor amplitudes that are superficially divergent at a given loop order according to the +WPC formula in Equation (30). Then the replacement $\tilde{J}_{a\mu} \to j_{a\mu}$ and $K_0 \to \bar{K}_0$ is carried out. This gives +the full set of counterterms required to make the theory finite at order *n* in the loop expansion. + +As an example, we give here the explicit form of the one-loop divergent counterterms for the +NLSM in *D* = 4 [2] (notice that we have set *g* = 1 according to our conventions in this paper): + +$$ +\hat{f}^{(1)} = \frac{1}{D-4} \left[ -\frac{1}{12} \frac{1}{(4\pi)^2} \frac{m_D^2}{m^2} (\mathcal{I}_1 - \mathcal{I}_2 - \mathcal{I}_3) + \frac{1}{(4\pi)^2} \frac{1}{48} \frac{m_D^2}{m^2} (\mathcal{I}_6 + 2\mathcal{I}_7) \right. \\ +\left. + \frac{1}{(4\pi)^2} \frac{3}{2} \frac{1}{m^2 m_D^2} \mathcal{I}_4 + \frac{1}{(4\pi)^2} \frac{1}{2} \frac{1}{m^2} \mathcal{I}_5 \right] \tag{44} +$$ + +By projecting the above equation on the relevant monomial in the $\phi_a$ fields one can get the divergences +of the descendant amplitudes. As an example, for the four point $\phi_a$ function one gets by explicit +---PAGE_BREAK--- + +computation that the contribution from the combination $I_1 - I_2 - I_3$ is zero, while the remaining invariants give + +$$ \hat{f}^{(1)}[\phi\phi\phi\phi] = -\frac{1}{D-4} \frac{1}{m_D^2 m^2 (4\pi)^2} \int d^D x \left( -\frac{1}{3}\partial_\mu \phi_a \partial^\mu \phi_a \partial_\nu \phi_b \partial^\nu \phi_b - \frac{2}{3}\partial_\mu \phi_a \partial_\nu \phi_b \partial^\mu \phi_b \partial^\nu \phi_b \right. \\ \left. -\frac{3}{2}\phi_a \Box \phi_a \phi_b \Box \phi_b - 2\phi_a \Box \phi_a \partial_\mu \phi_b \partial^\mu \phi_b \right) \quad (45) $$ + +The invariants in the combination $I_6 + 2I_7$ generate the counterterms in the first line between square brackets; these counterterms are globally SU(2) invariant. The other terms are generated by invariants involving the source $K_0$. In [39,40] they were constructed by means of a (non-locally invertible) field redefinition of $\phi_a$. The full set of mixed four point amplitudes involving at least one $\phi_a$ legs and the external sources $J_\mu$ and $K_0$ can be found in [2]. + +The correspondence with the linear sigma model in the large coupling limit has been studied in [5]. + +The massive NLSM in the LFE formulation has been studied in [15], while the symmetric subtraction procedure for the LFE associated with polar coordinates in the simplest case of the free complex scalar field has been given in [16]. + +In the SU(2) NLSM just one scalar source $K_0$ is sufficient in order to formulate the LFE. For an arbitrary Lie group G the LFE can always be written if one introduces a full set of antifields $\phi_I^*$ as follows. Let us denote by $\Omega(\phi_I)$ the group element belonging to G, parameterized by local coordinates $\phi_I$. Then under an infinitesimal left G-transformation of parameters $\alpha_J$ + +$$ \delta\Omega = i\alpha_J T_J \Omega \quad (46) $$ + +where $T_J$ are the generators of the group G, one has + +$$ \delta\phi_I = S_{IJ}(\phi)\alpha_J \quad (47) $$ + +It is convenient to promote the local left invariance to a BRST symmetry by upgrading the parameters $\alpha_I$ to local classical anticommuting ghosts $C_J$. Then one can introduce in the usual way the couplings with the antifields $\phi_I^*$ through + +$$ S_{\text{ext}} = \int d^D x \phi_I^* S_{IJ}(\phi) C_J \quad (48) $$ + +and then write the corresponding BV master equation [33]. This is the generalization of the LFE valid for the group G. The cohomology of the linearized BV operator (which is the main tool for identifying the bleached variables, as shown above) has been studied for any Lie group G in [62]. + +## 6. Higher Loops + +At orders $n > 1$ the LFE for $\Gamma^{(n)}$ is an inhomogeneous equation + +$$ s\Gamma^{(n)} = \Delta^{(n)} = -\frac{1}{2} \int d^D x \omega_a \sum_{j=1}^{n-1} \frac{\delta\Gamma^{(j)}}{\delta K_0} \frac{\delta\Gamma^{(n-j)}}{\delta\phi_a} \quad (49) $$ + +The above equation can be explicitly integrated by using the techniques of the Slavnov-Taylor (ST) parameterization of the effective action [63–65] (originally developed in order to provide a strategy for the restoration of the ST identity of non-anomalous gauge theories in the absence of a symmetric regularization). +---PAGE_BREAK--- + +For that purpose it is convenient to redefine the ghost according to + +$$ +\bar{\omega}_a = \Theta_{ab} \omega_b \tag{50} +$$ + +where $\Theta_{ab}$ is given in Equation (42). The action of $s$ then reduces to + +$$ +s\bar{K}_0 = s j_{a\mu} = 0, \quad s\phi_a = \bar{\omega}_a, \quad s\bar{\omega}_a = 0 \tag{51} +$$ + +This means that the variables $\bar{K}_0$ and $j_{a\mu}$ are invariant, while the pair $(\phi_a, \bar{\omega}_a)$ is a BRST doublet (i.e., a pair of variables $u, v$ such that $s u = v, s v = 0$) [33,66]. + +By the nilpotency of s the following consistency condition must hold for $\Delta^{(n)}$: + +$$ +s\Delta^{(n)} = 0 \tag{52} +$$ + +The fulfillment of the above equation as a consequence of the validity of the LFE up to order $n-1$ is proven in [63]. In terms of the new variables Equation (49) reads + +$$ +\int d^D x \bar{\omega}_a \frac{\delta \Gamma^{(n)}}{\delta \phi_a} = \Delta^{(n)} [\bar{\omega}_a, \phi_a, \bar{K}_0, j_{a\mu}] \quad (53) +$$ + +By noticing that $\Delta^{(n)}$ is linear in $\bar{\omega}_a$ and by differentiating Equation (53) w.r.t. $\bar{\omega}_a$ we arrive at + +$$ +\frac{\delta \Gamma^{(n)}}{\delta \phi_a(x)} = \frac{\delta \Delta^{(n)}}{\delta \bar{\omega}_a(x)} \qquad (54) +$$ + +The above equation controls the explicit dependence of the *n*-th order vertex functional on $\phi_a$ (there is +in addition an implicit dependence on $\phi_a$ through the variables $j_{a\mu}$ and $\bar{K}_0$). + +The explicit dependence on $\phi_a$ only appears through lower order terms. Hence it does not +influence the *n*-th order ancestor amplitudes. + +The solution of Equation (49) can be written in compact form by using a homotopy operator. +Indeed $\Gamma^{(n)}$ will be the sum of a $n$-th order contribution $A^{(n)}$, depending only on $j_{a\mu}$ and $\bar{K}_0$, plus a +lower order term: + +$$ +\begin{equation} +\Gamma^{(n)}[\phi_a, \bar{\omega}_a, \bar{K}_0, j_{a\mu}] = A^{(n)}[\bar{K}_0, j_{a\mu}] \tag{55} +\end{equation} +$$ + +The operator $\lambda_t$ acts as follows on a generic functional $X[\phi_a, \bar{\omega}_a, \bar{K}_0, j_{a\mu}]$: + +$$ +\lambda_t X[\phi_a, \bar{\omega}_a, \bar{K}_0, j_{a\mu}] = X[t\phi_a, t\bar{\omega}_a, \bar{K}_0, j_{a\mu}] \quad (56) +$$ + +The homotopy operator $\kappa$ for the BRST differential $s$ in the second line of Equation (55) is therefore given by + +$$ +\kappa = \int d^D x \int_0^1 dt \, \phi_a(x) \lambda_t \frac{\delta}{\delta \bar{\omega}_a(x)} \qquad (57) +$$ + +and satisfies the condition + +$$ +\{s, \kappa\} = 1 +\quad (58) +$$ + +where **1** denotes the identity on the space of functionals spanned by $\overline{\omega}_a, \phi_a$. +---PAGE_BREAK--- + +An important remark is in order here. The theory remains finite and respects the LFE if one adds to $\Gamma^{(n)}$ some integrated local monomials in $j_{a\mu}$ and $\bar{K}_0$ and ordinary derivatives thereof (with finite coefficients), compatible with Lorentz symmetry and global SU(2) invariance, while respecting the WPC condition in Equation (30): + +$$ \Gamma_{finite}^{(n)} = \sum_j \int d^D x M_j (j_{a\mu}, \bar{K}_0) \qquad (59) $$ + +This is a consequence of the non power-counting renormalizability of the theory: one can introduce order by order in the loop expansion an increasing number of finite parameters that do not appear in the classical action. Notice that they cannot be inserted back at tree-level: if one performs such an operation, the WPC condition is lost. + +This observation suggests that these finite parameters cannot be easily understood as physical free parameters of the theory, since they cannot appear in the tree-level action. It was then proposed to define the model by choosing the symmetric subtraction scheme discussed in Section 5 and by considering as physical parameters only those present in the classical action plus the scale of the radiative corrections $\Lambda$ [4]. While acceptable on physical grounds, from the mathematical point of view one may wonder whether there is some deeper reason justifying such a strategy. We will comment briefly on this point in the Conclusions. + +## 7. Applications to Yang-Mills and the Electroweak Theory + +When the vector source $\tilde{f}_{a\mu}$ becomes a dynamical gauge field, the NLSM action gives rise to the Stückelberg mass term [67]. + +The subtraction procedure based on the LFE has been used to implement a mathematically consistent formulation of non-linearly realized massive Yang-Mills theory. SU(2) Yang-Mills in the LFE formalism has been formulated in [6]. The pseudo-Goldstone fields take over the role of the $\phi_a$ fields of the NLSM. Their Green's functions are fixed by the LFE. The WPC proves to be very restrictive, since by imposing the WPC condition it turns out that the only allowed classical solution is the usual Yang-Mills theory plus the Stückelberg mass term. + +This is a very powerful (and somehow surprising) result. Indeed all possible monomials constructed out of $j_{a\mu}$ and ordinary derivatives thereof are gauge-invariant and therefore they could be used as interaction vertices in the classical action. + +Otherwise said, the peculiar structure of the Yang-Mills action + +$$ S_{YM} = - \int d^4 x \frac{1}{4} G_{a\mu\nu} G_a^{\mu\nu} \qquad (60) $$ + +where $G_{a\mu\nu}$ denotes the field strength of the gauge field $A_{a\mu}$ + +$$ G_{a\mu\nu} = \partial_{\mu} A_{av} - \partial_{v} A_{a\mu} + f_{abc} A_{b\mu} A_{cv} $$ + +is not automatically enforced by the requirement of gauge invariance if the gauge group is non-linearly realized. However if the WPC condition is satisfied, the only admissible solution becomes Yang-Mills theory plus the Stückelberg mass term: + +$$ S_{nLYM} = S_{YM} + \int d^4 x \frac{M^2}{2} (A_{a\mu} - F_{a\mu})^2 \qquad (61) $$ + +Massive Yang-Mills theory in the presence of a non-linearly realized gauge group is physically unitary [67] (despite the fact that it violates the Froissart bound [68–74] at tree-level). The counterterms in the Landau gauge have been computed at one loop level in [7]. The formulation of the theory in a general 't Hooft gauge has been given in [8]. +---PAGE_BREAK--- + +The approach based on the LFE can also be used for non-perturbative studies of Yang-Mills theory on the lattice. The phase diagram of SU(2) Yang-Mills has been considered in [17]. Emerging evidence is being accumulated about the formation of isospin scalar bound states [18] in the supposedly confined phase of the theory [19]. + +An analytic approach based on the massless bound-state formalism for the implementation of the Schwinger mechanism in non-Abelian gauge theories has been presented in [75–77]. + +A very important physical application of non-linearly realized gauge theories is the formulation of a non-linearly realized electroweak theory, based on the group SU(2) × U(1). The set of gauge fields comprises the SU(2) fields $A_{a\mu}$ and the hypercharge U(1) gauge connection $B_\mu$. By using the technique of bleached variables one can first construct SU(2) invariant variables in one-to-one correspondence with $A_\mu = A_{a\mu} \frac{\tau_a}{2}$ [8]: + +$$w_{\mu} = \Omega^{\dagger} g A_{\mu} \Omega - g' \frac{\tau_3}{2} B_{\mu} + i \Omega^{\dagger} \partial_{\mu} \Omega \equiv w_{a\mu} \frac{\tau_a}{2} \quad (62)$$ + +In the above equation we have reinserted back for later convenience the SU(2) and U(1) coupling constants $g$ and $g'$. Since $w_\mu$ is SU(2) invariant, the hypercharge generator coincides with the electric charge generator. $w_{3\mu}$ is then the bleached counterpart of the $Z_\mu$ field, since + +$$Z_{\mu} = \left. \frac{1}{\sqrt{g^2 + g'^2}} w_{3\mu} \right|_{\phi_a=0} = c_W A_{3\mu} - s_W B_{\mu} \quad (63)$$ + +where $s_W$ and $c_W$ are the sine and cosine of the Weinberg angle + +$$s_W = \frac{g'}{\sqrt{g^2 + g'^2}}, \qquad c_W = \frac{g}{\sqrt{g^2 + g'^2}} \quad (64)$$ + +The photon $A_\mu$ is described by the combination orthogonal to $Z_\mu$, namely + +$$A_{\mu} = s_W A_{3\mu} + c_W B_{\mu} \quad (65)$$ + +One can build out of $A_{1\mu}$ and $A_{2\mu}$ the charged $W^\pm$ field + +$$W_{\mu}^{\pm} = \frac{1}{\sqrt{2}}(A_{1\mu} \mp iA_{2\mu}) \quad (66)$$ + +whose bleached counterpart is simply + +$$w_{\mu}^{\pm} = \frac{1}{\sqrt{2}}(w_{1\mu} \mp i w_{2\mu}) \quad (67)$$ + +The WPC allows for the same symmetric couplings of the Standard Model and for two independent mass invariants [9–11] + +$$M_W^2 w^+ w^- + \frac{M_Z^2}{2} w_{3\mu}^2 \quad (68)$$ + +where the mass of the Z and W bosons are not related by the Weinberg relation + +$$M_Z = \frac{M_W}{c_W}$$ +---PAGE_BREAK--- + +This is a peculiar signature of the mass generation mechanism *à la* Stückelberg, that is not present in the linearly realized theory *à la* Brout-Englert-Higgs [78–80] (even if one discards the condition of power-counting renormalizability in favour of the WPC) [12]. + +The inclusion of physical scalar resonances in the non-linearly realized electroweak model, while respecting the WPC, yields some definite prediction for the Beyond the Standard Model (BSM) sector. Indeed it turns out that it is impossible to add a scalar singlet without breaking the WPC condition. The minimal solution requires a SU(2) doublet of scalars, leading to a CP-even physical field (to be identified with the recently discovered scalar resonance at 125.6 GeV) and to three additional heavier physical states, one CP-odd and neutral and two charged ones [13]. The proof of the WPC in this model and the BRST identification of physical states has been given in [14]. + +The WPC and the symmetries of the theory select uniquely the tree-level action of the non-linearly realized electroweak model. As in the NLSM case, mathematically additional finite counterterms are allowed at higher orders in the loop expansion. In [4] it has been argued that they cannot be interpreted as additional physical parameters (unlike in the effective field theory approach), on the basis of the observation that they are forbidden at tree-level by the WPC, and this strategy has been consistently applied in [7,11]. + +The question remains open of whether a Renormalization Group equation exists, involving a finite change in the higher order subtractions, in such a way to compensate the change in the sliding scale $\Lambda$ of the radiative corrections. We notice that in this case the finite higher order counterterms would be a function of the tree-level parameters only (unlike in the conventional effective field theory approach, where they are treated as independent extra free parameters). This issue deserves further investigation, since obviously the possibility of running the scale $\Lambda$ in a mathematically consistent way would allow to obtain physical predictions of the same observables applicable in different energy regimes. + +## 8. Conclusions + +The LFE makes it apparent that the independent amplitudes of the NLSM are not those of the quantum fields, over which the path-integral is carried out, but rather those of the background connection $\tilde{J}_\mu$ and of the source $K_0$, coupled to the solution of the non-linear constraint $\phi_0$. The WPC can be formulated only for these ancestor amplitudes; the LFE in turn fixes the descendant amplitudes, involving at least one pion external leg. Within this formulation, the minimal symmetric subtraction discussed in Section 5 is natural, since it provides a way to implement the idea that the number of ancestor interaction vertices, appearing in the classical action and compatible with the WPC, must be finite. + +However, it should be stressed that the most general solution to the LFE, compatible with the WPC, does not forbid to choose different finite parts of the higher order symmetric counterterms (as in the most standard view of effective field theories, where such arbitrariness is associated with extra free parameters of the non-renormalizable theory), as far as they are introduced at the order prescribed by the WPC condition and without violating the LFE. + +In this connection it should be noticed that the addition of the symmetric finite renormalizations in Equation (59), that are allowed by the symmetries of the theory, is equivalent to a change in the Hopf algebra [81,82] of the model. This is because the finite counterterms in Equation (59) modify the set of 1-PI Feynman diagrams on which the Hopf algebra is constructed, as a dual of the enveloping algebra of the Lie algebra of Feynman graphs. The approach to renormalization based on Hopf algebras is known to be equivalent [83] to the traditional approach based on the Bogoliubov recursive formula and its explicit solution through the Zimmermann’s forest formula [84]. For models endowed with a WPC it might provide new insights into the structure of the UV divergences of the theory. This connection seems to deserve further investigations. + +**Acknowledgments:** It is a pleasure to acknowledge many enlightening discussions with R. Ferrari. Useful comments and a careful reading of the manuscript by D. Bettinelli are also gratefully acknowledged. +---PAGE_BREAK--- + +# Appendix + +## One-Loop Invariants + +We report here the invariants controlling the one-loop divergences of the NLSM in $D = 4$ [2]. + +$$ +\begin{aligned} +\mathcal{I}_1 &= \int d^D x [D_\mu (F - \bar{J})_v]_a [D^\mu (F - \bar{J})^\nu]_a, \\ +\mathcal{I}_2 &= \int d^D x [D_\mu (F - \bar{J})^\mu]_a [D_v (F - \bar{J})^\nu]_a, \\ +\mathcal{I}_3 &= \int d^D x \epsilon_{abc} [D_\mu (F - \bar{J})_v]_a (F_b^\mu - \bar{J}_b^\mu) (F_c^\nu - \bar{J}_c^\nu), \\ +\mathcal{I}_4 &= \int d^D x \left(\frac{m_D^2 K_0}{\phi_0} - \phi_a \frac{\delta S}{\delta \phi_a}\right)^2, \\ +\mathcal{I}_5 &= \int d^D x \left(\frac{m_D^2 K_0}{\phi_0} - \phi_a \frac{\delta S}{\delta \phi_a}\right) (F_b^\mu - \bar{J}_b^\mu)^2, \\ +\mathcal{I}_6 &= \int d^D x (F_a^\mu - \bar{J}_a^\mu)^2 (F_b^\nu - \bar{J}_b^\nu)^2, \\ +\mathcal{I}_7 &= \int d^D x (F_a^\mu - \bar{J}_a^\mu) (F_a^\nu - \bar{J}_a^\nu) (F_{b\mu} - \bar{J}_{b\mu}) (F_{b\nu} - \bar{J}_{b\nu}) +\end{aligned} +\quad (\text{A1}) $$ + +In the above equation $D_\mu[F]$ stands for the covariant derivative w.r.t. $F_{a\mu}$ + +$$ D_{\mu}[F]_{ab} = \delta_{ab}\partial_{\mu} + \epsilon_{acb}F_{c\mu} \quad (\text{A2}) $$ + +**Conflicts of Interest:** The author declares no conflict of interest. + +## References + +1. Ferrari, R. Endowing the nonlinear sigma model with a flat connection structure: A way to renormalization. JHEP 2005, doi:10.1088/1126-6708/2005/08/048. + +2. Ferrari, R.; Quadri, A. A Weak power-counting theorem for the renormalization of the non-linear sigma model in four dimensions. Int. J. Theor. Phys. 2006, 45, 2497–2515. + +3. Bettinelli, D.; Ferrari, R.; Quadri, A. Path-integral over non-linearly realized groups and Hierarchy solutions. JHEP 2007, doi:10.1088/1126-6708/2007/03/065. + +4. Bettinelli, D.; Ferrari, R.; Quadri, A. Further Comments on the Symmetric Subtraction of the Nonlinear Sigma Model. Int. J. Mod. Phys. 2008, A23, 211–232. + +5. Bettinelli, D.; Ferrari, R.; Quadri, A. The Hierarchy principle and the large mass limit of the linear sigma model. Int. J. Theor. Phys. 2007, 46, 2560–2590. + +6. Bettinelli, D.; Ferrari, R.; Quadri, A. A Massive Yang-Mills Theory based on the Nonlinearly Realized Gauge Group. Phys. Rev. D 2008, 77, doi:10.1103/PhysRevD.77.045021. + +7. Bettinelli, D.; Ferrari, R.; Quadri, A. One-loop self-energy and counterterms in a massive Yang-Mills theory based on the nonlinearly realized gauge group. Phys. Rev. D 2008, 7, doi:10.1103/PhysRevD.77.105012. + +8. Bettinelli, D.; Ferrari, R.; Quadri, A. Gauge Dependence in the Nonlinearly Realized Massive SU(2) Gauge Theory. J. General. Lie Theor. Appl. 2008, 2, 122–126. + +9. Bettinelli, D.; Ferrari, R.; Quadri, A. The SU(2) × U(1) Electroweak Model based on the Nonlinearly Realized Gauge Group. Int. J. Mod. Phys. 2009, A24, 2639–2654. + +10. Bettinelli, D.; Ferrari, R.; Quadri, A. The SU(2) × U(1) Electroweak Model based on the Nonlinearly Realized Gauge Group. II. Functional Equations and the Weak Power-Counting. Acta Phys. Polon. 2010, B41, 597–628. + +11. Bettinelli, D.; Ferrari, R.; Quadri, A. One-loop Self-energies in the Electroweak Model with Nonlinearly Realized Gauge Group. Phys. Rev. D 2009, 79, doi:10.1103/PhysRevD.79.125028. +---PAGE_BREAK--- + +12. Quadri, A. The Algebra of Physical Observables in Nonlinearly Realized Gauge Theories. *Eur. Phys. J.* **2010**, C70, 479-489. + +13. Binosi, D.; Quadri, A. Scalar Resonances in the Non-linearly Realized Electroweak Theory. *JHEP* **2013**, 1302, doi:10.1007/JHEP02(2013)020. + +14. Bettinelli, D.; Quadri, A. The Stueckelberg Mechanism in the presence of Physical Scalar Resonances. *Phys. Rev. D* **2013**, 88, doi:10.1103/PhysRevD.88.065023. + +15. Ferrari, R. A Symmetric Approach to the Massive Nonlinear Sigma Model. *J. Math. Phys.* **2011**, 52, 092303:1-092303:16. + +16. Ferrari, R. On the Renormalization of the Complex Scalar Free Field Theory. *J. Math. Phys.* **2010**, 51, 032305:1-032305:20. + +17. Ferrari, R. On the Phase Diagram of Massive Yang-Mills. *Acta Phys. Polon.* **2012**, B43, 1965-1980. + +18. Ferrari, R. On the Spectrum of Lattice Massive SU(2) Yang-Mills. *Acta Phys. Polon.* **2013**, B44, 1871-1885. + +19. Ferrari, R. Metamorphosis versus Decoupling in Nonabelian Gauge Theories at Very High Energies. *Acta Phys. Polon.* **2012**, B43, 1735-1767. + +20. Gell-Mann, M.; Levy, M. The axial vector current in beta decay. *Nuovo Cim.* **1960**, 16, 705-726. + +21. Weinberg, S. Nonlinear realizations of chiral symmetry. *Phys. Rev.* **1968**, 166, 1568-1577. + +22. Coleman, S.R.; Wess, J.; Zumino, B. Structure of phenomenological Lagrangians. 1. *Phys. Rev.* **1969**, 177, 2239-2247. + +23. Callan, C.G., Jr.; Coleman, S.R.; Wess, J.; Zumino, B. Structure of phenomenological Lagrangians. 2. *Phys. Rev.* **1969**, 177, 2247-2250. + +24. Weinberg, S. Phenomenological Lagrangians. *Physica* **1979**, A96, 327-340. + +25. Gasser, J.; Leutwyler, H. Chiral Perturbation Theory to One Loop. *Ann. Phys.* **1984**, 158, 142-210. + +26. Gasser, J.; Leutwyler, H. Chiral Perturbation Theory: Expansions in the Mass of the Strange Quark. *Nucl. Phys.* **B** **1985**, 250, 465-516. + +27. Bijnsens, J.; Colangelo, G.; Ecker, G. Renormalization of chiral perturbation theory to order p**6. *Ann. Phys.* **2000**, 280, 100-139. + +28. Ecker, G.; Gasser, J.; Leutwyler, H.; Pich, A.; de Rafael, E. Chiral Lagrangians for Massive Spin 1 Fields. *Phys. Lett.* **B** **1989**, 223, 425-432. + +29. Buchmuller, W.; Wyler, D. Effective Lagrangian Analysis of New Interactions and Flavor Conservation. *Nucl. Phys.* **B** **1986**, 268, 621-653. + +30. Donoghue, J.F. Introduction to the effective field theory description of gravity. Available online: http://arxiv.org/abs/grqc/9512024 (accessed on 15 April 2014). + +31. Weinberg, S. *The Quantum Theory of Fields*. Vol. 2: Modern Applications; Cambridge University Press: Cambridge, UK, 1996. + +32. Itzykson, C.; Zuber, J. *Quantum Field Theory*; McGraw-Hill: New York, NY, USA, 1980. + +33. Gomis, J.; Paris, J.; Samuel, S. Antibracket, antifields and gauge theory quantization. *Phys. Rep.* **1995**, 259, 1-145. + +34. Gomis, J.; Weinberg, S. Are nonrenormalizable gauge theories renormalizable? *Nucl. Phys.* **B** **1996**, 469, 473-487. + +35. Brezin, E.; Zinn-Justin, J.; Le Guillou, J. Renormalization of the Nonlinear Sigma Model in (Two + Epsilon) Dimension. *Phys. Rev. D* **1976**, 14, 2615-2621. + +36. Becchi, C.; Piguet, O. On the Renormalization of Two-dimensional Chiral Models. *Nucl. Phys.* **B** **1989**, 315, 153-165. + +37. Zinn-Justin, J. *Quantum Field Theory and Critical Phenomena*; International Series of Monographs on Physics; Oxford University Press: Oxford, UK, 2002. + +38. Ecker, G.; Honerkamp, J. Application of invariant renormalization to the nonlinear chiral invariant pion lagrangian in the one-loop approximation. *Nucl. Phys.* **B** **1971**, 35, 481-492. + +39. Appelquist, T.; Bernard, C.W. The Nonlinear σ Model in the Loop Expansion. *Phys. Rev.* **D** **1981**, 23, doi:10.1103/PhysRevD.23.425. + +40. Tataru, L. One Loop Divergences of the Nonlinear Chiral Theory. *Phys. Rev.* **D** **1975**, 12, 3351-3352. + +41. Gerstein, I.; Jackiw, R.; Weinberg, S.; Lee, B. Chiral loops. *Phys. Rev.* **D** **1971**, 3, 2486-2492. + +42. Charap, J. Closed-loop calculations using a chiral-invariant lagrangian. *Phys. Rev.* **D** **1970**, 2, 1554-1561. + +43. Honerkamp, J.; Meetz, K. Chiral-invariant perturbation theory. *Phys. Rev.* **D** **1971**, 3, 1996-1998. +---PAGE_BREAK--- + +44. Stueckelberg, E. Interaction forces in electrodynamics and in the field theory of nuclear forces. *Helv. Phys. Acta* **1938**, *11*, 299-328. + +45. Ruegg, H.; Ruiz-Altaba, M. The Stueckelberg field. *Int. J. Mod. Phys.* **2004**, *A19*, 3265-3348. + +46. Altarelli, G.; Mangano, M.L. Electroweak Physics. In Proceedings of CERN Workshop on Standard Model Physics (and More) at the LHC, CERN, Geneva, Switzerland, 25-26 May 1999. + +47. Azatov, A.; Contino, R.; Galloway, J. Model-Independent Bounds on a Light Higgs. JHEP **2012**, 1204, doi:10.1007/JHEP04(2012)127. + +48. Contino, R. The Higgs as a Composite Nambu-Goldstone Boson. Available online: http://arxiv.org/abs/1005.4269 (accessed on 15 April 2014). + +49. Espinosa, J.; Grojean, C.; Muhleitner, M.; Trott, M. First Glimpses at Higgs' face. JHEP **2012**, 1212, doi:10.1007/JHEP12(2012)045. + +50. Zinn-Justin, J. Renormalization of Gauge Theories—Unbroken and broken. Phys. Rev. D **1974**, *9*, 933–946. + +51. Velo, G.; Wightman, A. Renormalization Theory. In Proceedings of the NATO Advanced Study Institute, Erice, Sicily, Italy, 17–31 August 1975. + +52. Breitenlohner, P.; Maison, D. Dimensional Renormalization and the Action Principle. Commun. Math. Phys. **1977**, *52*, 11–38. + +53. Lam, Y.M.P. Perturbation Lagrangian theory for scalar fields: Ward-Takahashi identity and current algebra. Phys. Rev. D **1972**, *6*, 2145–2161. + +54. Lam, Y.M.P. Perturbation lagrangian theory for Dirac fields—Ward-Takahashi identity and current algebra. Phys. Rev. D **1972**, *6*, 2161–2167. + +55. Lowenstein, J. Normal product quantization of currents in Lagrangian field theory. Phys. Rev. D **1971**, *4*, 2281–2290. + +56. Piguet, O.; Sorella, S. Algebraic renormalization: Perturbative renormalization, symmetries and anomalies. Lect. Notes Phys. **1995**, M28, 1–134. + +57. Becchi, C.; Rouet, A.; Stora, R. Renormalization of Gauge Theories. Ann. Phys. **1976**, *98*, 287-321. + +58. Becchi, C.; Rouet, A.; Stora, R. Renormalization of the Abelian Higgs-Kibble Model. Commun. Math. Phys. **1975**, *42*, 127-162. + +59. Becchi, C.; Rouet, A.; Stora, R. The Abelian Higgs-Kibble Model. Unitarity of the S Operator. Phys. Lett. B **1974**, *52*, 344-346. + +60. Wess, J.; Zumino, B. Consequences of anomalous Ward identities. Phys. Lett. B **1971**, *37*, 95-97. + +61. Barnich, G.; Brandt, F.; Henneaux, M. Local BRST cohomology in gauge theories. Phys. Rep. **2000**, *338*, 439-569. + +62. Henneaux, M.; Wilch, A. Local BRST cohomology of the gauged principal nonlinear sigma model. Phys. Rev. D **1998**, *58*, 025017:1-025017:14. + +63. Quadri, A. Slavnov-Taylor parameterization of Yang-Mills theory with massive fermions in the presence of singlet axial-vector currents. JHEP **2005**, 0506, doi:10.1088/1126-6708/2005/06/068. + +64. Quadri, A. Higher order nonsymmetric counterterms in pure Yang-Mills theory. J. Phys. G **2004**, *30*, 677-689. + +65. Quadri, A. Slavnov-Taylor parameterization for the quantum restoration of BRST symmetries in anomaly free gauge theories. JHEP **2003**, 0304, doi:10.1088/1126-6708/2003/04/017. + +66. Quadri, A. Algebraic properties of BRST coupled doublets. JHEP **2002**, 0205, doi:10.1088/1126-6708/2002/05/051. + +67. Ferrari, R.; Quadri, A. Physical unitarity for massive non-Abelian gauge theories in the Landau gauge: Stueckelberg and Higgs. JHEP **2004**, 0411, doi:10.1088/1126-6708/2004/11/019. + +68. Froissart, M. Asymptotic behavior and subtractions in the Mandelstam representation. Phys. Rev. **1961**, *123*, 1053-1057. + +69. Cornwall, J.M.; Levin, D.N.; Tikopoulos, G. Derivation of Gauge Invariance from High-Energy Unitarity Bounds on the s Matrix. Phys. Rev. D **1974**, *10*, 1145-1167. + +70. Lee, B.W.; Quigg, C.; Thacker, H. Weak Interactions at Very High-Energies: The Role of the Higgs Boson Mass. Phys. Rev. D **1977**, *16*, 1519-1531. + +71. Weldon, H.A. The Effects of Multiple Higgs Bosons on Tree Unitarity. Phys. Rev. D **1984**, *30*, 1547-1558. +---PAGE_BREAK--- + +72. Chanowitz, M.S.; Gaillard, M.K. The TeV Physics of Strongly Interacting W's and Z's. Nucl. Phys. B **1985**, 261, 379-431. + +73. Gounaris, G.; Kogerler, R.; Neufeld, H. Relationship Between Longitudinally Polarized Vector Bosons and their Unphysical Scalar Partners. Phys. Rev. D **1986**, *34*, 3257-3259. + +74. Bettinelli, D.; Ferrari, R.; Quadri, A. Of Higgs, Unitarity and other Questions. Proc. Steklov Inst. Math. **2011**, 272, 22-38. + +75. Aguilar, A.; Ibanez, D.; Mathieu, V.; Papavassiliou, J. Massless bound-state excitations and the Schwinger mechanism in QCD. Phys. Rev. D **2012**, *85*, doi:10.1103/PhysRevD.85.014018. + +76. Aguilar, A.; Binosi, D.; Papavassiliou, J. The dynamical equation of the effective gluon mass. Phys. Rev. D **2011**, *84*, doi:10.1103/PhysRevD.84.085026. + +77. Ibañez, D.; Papavassiliou, J. Gluon mass generation in the massless bound-state formalism. Phys. Rev. D **2013**, *87*, doi:10.1103/PhysRevD.87.034008. + +78. Higgs, P.W. Broken symmetries, massless particles and gauge fields. Phys. Lett. **1964**, *12*, 132-133. + +79. Higgs, P.W. Broken Symmetries and the Masses of Gauge Bosons. Phys. Rev. Lett. **1964**, *13*, 508-509. + +80. Englert, F.; Brout, R. Broken Symmetry and the Mass of Gauge Vector Mesons. Phys. Rev. Lett. **1964**, *13*, 321-323. + +81. Connes, A.; Kreimer, D. Renormalization in quantum field theory and the Riemann-Hilbert problem. 1. The Hopf algebra structure of graphs and the main theorem. Commun. Math. Phys. **2000**, *210*, 249-273. + +82. Connes, A.; Kreimer, D. Renormalization in quantum field theory and the Riemann-Hilbert problem. 2. The beta function, diffeomorphisms and the renormalization group. Commun. Math. Phys. **2001**, *216*, 215-241. + +83. Ebrahimi-Fard, K.; Patras, F. Exponential renormalization. Ann. Henri Poincare **2010**, *11*, 943-971. + +84. Zimmermann, W. Convergence of Bogolyubov's method of renormalization in momentum space. Commun. Math. Phys. **1969**, *15*, 208-234. + +© 2014 by the author. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +Dynamical Relation between Quantum Squeezing and Entanglement in Coupled Harmonic Oscillator System + +Lock Yue Chew ¹,* and Ning Ning Chung ² + +¹ Division of Physics and Applied Physics, School of Physical and Mathematical Sciences, Nanyang Technological University, Singapore 637371, Singapore + +² Department of Physics, National University of Singapore, Singapore 117542, Singapore; E-Mail: phycnn@nus.edu.sg + +* E-Mail: lockyue@ntu.edu.sg; Tel.: +65-6316-2968; +65-6316-6984. + +Received: 27 February 2014; in revised form: 14 April 2014 / Accepted: 18 April 2014 / +Published: 23 April 2014 + +**Abstract:** In this paper, we investigate into the numerical and analytical relationship between the dynamically generated quadrature squeezing and entanglement within a coupled harmonic oscillator system. The dynamical relation between these two quantum features is observed to vary monotonically, such that an enhancement in entanglement is attained at a fixed squeezing for a larger coupling constant. Surprisingly, the maximum attainable values of these two quantum entities are found to consistently equal to the squeezing and entanglement of the system ground state. In addition, we demonstrate that the inclusion of a small anharmonic perturbation has the effect of modifying the squeezing *versus* entanglement relation into a nonunique form and also extending the maximum squeezing to a value beyond the system ground state. + +**Keywords:** quantum entanglement; squeezed state; coupled harmonic oscillators + +PACS: 03.65.Ge, 31.15.MD + +# 1. Introduction + +Entanglement is a fundamental resource for non-classical tasks in the field of quantum information [1]. It has been shown to improve communication and computation capabilities via the notion of quantum dense coding [2], quantum teleportation [3], unconditionally secured quantum cryptographic protocols [4,5], and quantum algorithms for integer factorization [6]. For any quantum algorithm operating on pure states, it has been proven that the presence of multi-partite entanglement is necessary if the quantum algorithm is to offer an exponential speed-up over classical computation [7]. Note, however, that a non-zero value of entanglement might not be the necessary condition for quantum computational speed up of algorithm operating on mixed states [8]. In addition, in order to achieve these goals practically, it is necessary to maintain the entanglement within the quantum states which are fragile against the decohering environment. An approach would be to employ an entangled state with as large an entanglement as possible, and the idea is that the production of such entangled state could be tuned through the operation of quantum squeezing. + +Indeed, the relation between quantum squeezing and quantum entanglement has been actively pursued in recent years [9–18]. Notably, the creation of entanglement is shown experimentally to be able to induce spin squeezing [9,10]. Such entanglement-induced squeezing has the important outcome of producing measuring instruments that go beyond the precision of current models. In addition, quantum squeezing is found to be able to induce, enhance and even preserve entanglement in decohering environments [11–13]. Previously, we have investigated the relation between the squeezing +---PAGE_BREAK--- + +and entanglement of the ground state of the coupled harmonic oscillator system [16,17]. The ground state entanglement entropy was found to increase monotonically with an increase in quadrature squeezing within this system. When a small anharmonic perturbing potential is added to the system, a further enhancement in quadrature squeezing is observed. While the entropy-squeezing curve shifts to the right in this case, we realized that the entanglement entropy is still a monotonically increasing function in terms of quadrature squeezing. + +In this paper, we have extended our earlier work discussed above by investigating into the dynamical relation between quadrature squeezing and entanglement entropy of the coupled harmonic oscillator system. Coupled harmonic oscillator system has served as useful paradigm for many physical systems, such as the field modes of electromagnetic radiation [19–21], the vibrations in molecular systems [22], and the formulation of the Lee model in quantum field theory [23]. It was shown that the coupled harmonic oscillator system possesses the symmetry of the Lorentz group $O(3, 3)$ or $SL(4, r)$ classically, and that of the symmetry $O(3, 2)$ or $Sp(4)$ quantum mechanically [24]. In addition, the physics of coupled harmonic oscillator system can be conveniently represented by the mathematics of two-by-two matrices, which have played a role in clarifying the physical basis of entanglement [25]. In Section 2 of this paper, we first described the coupled harmonic oscillator model. It is then followed by a discussion on the relation between the dynamically generated squeezing and entanglement of the coupled oscillator systems, which we have determined quantitatively via numerical computation. In Section 3 of the paper, we present analytical results in support of the numerical results obtained in Section 2. Here, we illustrate how the problem can be solved in terms of two-by-two matrices. Then, in Section 4 of the paper, we study how the inclusion of anharmonicity can influence the relation between the dynamically generated squeezing and entanglement. Finally, we give our conclusion in Section 5 of the paper. + +## 2. Dynamical Relation of Quantum Squeezing and Entanglement in Coupled Harmonic Oscillator System + +The Hamiltonian of the coupled harmonic oscillator system is described as follow: + +$$H = \frac{p_1^2}{2m_1} + \frac{1}{2}m_1\omega_1^2 x_1^2 + \frac{p_2^2}{2m_2} + \frac{1}{2}m_2\omega_2^2 x_2^2 + \lambda(x_2 - x_1)^2 \quad (1)$$ + +where $x_1$ and $x_2$ are the position co-ordinates, while $p_1$ and $p_2$ are the momenta of the oscillators. The interaction potential between the two oscillators is assumed to depend quadratically on the distance between the oscillators, and is proportional to the coupling constant $\lambda$. For simplicity, we have set $m_1 = m_2 = m$ and $\omega_1 = \omega_2 = \omega$. This Hamiltonian is commonly used to model physical systems such as the vibrating molecules or the squeezed modes of electromagnetic field. In fact, the model has been widely explored [26–28] and is commonly used to elucidate the properties of quantum entanglement in continuous variable systems [29–35]. + +Next, let us discuss on the relation between the squeezing and entanglement of the lowest energy eigenstate of this coupled harmonic oscillator system. Note that + +$$H |g\rangle = E_0 |g\rangle \quad (2)$$ + +with $|g\rangle$ being the ground state and $E_0$ being the lowest eigen-energy of the coupled oscillator system with Hamiltonian given by Equation (1). Entanglement between the two oscillators can be quantified by the von Neumann entropy: + +$$S_{vN} = -\text{Tr}[\rho_l \ln \rho_l] \quad (3)$$ + +where $\rho_l$ is the reduced density matrix. For squeezing parameter, we shall adopt the dimensionless definition: + +$$S_x = -\ln \frac{\sigma_{x1}}{\sigma_{x1}^{(0)}} \quad (4)$$ +---PAGE_BREAK--- + +with $\sigma_{x_1} = \sqrt{\langle x_1^2 \rangle - \langle x_1 \rangle^2}$ being the uncertainty associated with the first oscillator's position and the normalization constant $\sigma_{x_1}^{(0)} = \sqrt{\hbar/2m\omega}$ being the uncertainty associated with the harmonic oscillator's position. For simplicity, we shall evaluate only the position squeezing in the first oscillator. + +Indeed, the position uncertainty squeezing and the entanglement entropy of the ground state of this oscillator have been solved analytically by previous studies [36,37] as follows: + +$$S_x = -\ln \frac{\sqrt{\frac{\hbar}{2m\omega}\frac{1+\gamma}{2}}}{\sqrt{\frac{\hbar}{2m\omega}}} = -\ln \sqrt{\frac{1+\gamma}{2}} \quad (5)$$ + +where $\gamma = 1/\sqrt{1+4\lambda/m\omega^2}$; and + +$$S_{vN} = \cosh^2\left(\frac{\ln\gamma}{4}\right) \ln\left[\cosh^2\left(\frac{\ln\gamma}{4}\right)\right] - \sinh^2\left(\frac{\ln\gamma}{4}\right) \ln\left[\sinh^2\left(\frac{\ln\gamma}{4}\right)\right] \quad (6)$$ + +As shown in Reference [17], by eliminating $\gamma$ between Equations (5) and (6), the relation between the squeezing parameter and the von Neumann entropy of the ground state of the coupled harmonic oscillators is obtained as follow: + +$$S_{vN} = \frac{(\zeta + 1)^2}{4\zeta} \ln\left(\frac{(\zeta + 1)^2}{4\zeta}\right) - \frac{(\zeta - 1)^2}{4\zeta} \ln\left(\frac{(\zeta - 1)^2}{4\zeta}\right) \quad (7)$$ + +with + +$$\zeta = \sqrt{2e^{-2S_x} - 1} \quad (8)$$ + +This relation is shown as a solid line in Figure 1. + +**Figure 1.** A plot on the dynamical relation between entanglement and squeezing obtained numerically for coupled harmonic oscillator system with the coupling constant $\lambda = 0.75$ (squares), 2 (triangles), 3.75 (circles) and 6 (crosses). Note that the ground state entanglement-squeezing curve given by Equation (7) is plotted as a solid curve for comparison. In addition, the values of the maximum attainable squeezing and entanglement for various $\lambda$ have been plotted as stars. + +In this paper, we have gone beyond the static relation between squeezing and entanglement based on the stationary ground state. In particular, we have explored numerically into the dynamical generation of squeezing and entanglement via the quantum time evolution, with the initial state being the tensor product of the vacuum states ($|0,0\rangle$) of the oscillators. Note that the obtained results +---PAGE_BREAK--- + +hold true for any initial coherent states ($|a_1, a_2\rangle$) since the entanglement dynamics of the coupled harmonic oscillator system is independent of initial states [38]. In general, the system dynamics is either two-frequency periodic or quasi-periodic depending on whether the ratio of the two frequencies, $f_1 = 1$ and $f_2 = \sqrt{1+4\lambda}$, are rational or irrational. By yielding the values of the squeezing parameter and the entanglement entropy at the same time point within their respective dynamical evolution, we obtained the dynamical relations between the squeezing and entanglement for different coupling constants $\lambda = 0.75, 2, 3.75$ and 6, as shown in Figure 1. Interestingly, the results show a smooth monotonic increase of the dynamically generated entanglement entropy as the quadrature squeezing increases for each $\lambda$. In addition, the dynamically generated entanglement entropy is observed to be larger for a fixed squeezing as $\lambda$ increases. It is surprising that the maximum attainable values of these two quantum entities determined dynamically are found to fall consistently on the system ground states' squeezing and entanglement relation as given by Equations (7) and (8) for all values of $\lambda$. More importantly, this relation also serves as a bound to the entanglement entropy and squeezing that are generated dynamically. + +### 3. Analytical Derivation on the Dynamical Relation between Quantum Squeezing and Entanglement + +In this section, we shall perform an analytical study on the dynamical relationship between quantum squeezing and the associated entanglement production. We first yield the second quantized form of the Hamiltonian of the coupled harmonic oscillator system as follows: + +$$H = a_1^\dagger a_1 + a_2^\dagger a_2 + 1 + \frac{\lambda}{2} \{(a_1^\dagger + a_1) - (a_2^\dagger + a_2)\}^2 \quad (9)$$ + +Then, the time evolution of the annihilation operator $a_j$ (as well as the creation operator $a_j^\dagger$) can be determined according to the following Heisenberg equation of motion: + +$$\frac{d}{dt} a_j = \frac{1}{i} [a_j, H] \quad (10)$$ + +From this, we obtain: + +$$\frac{d}{dt} \tilde{a} = A \tilde{a} \quad (11)$$ + +with $\tilde{a} = (a_1 a_1^\dagger a_2 a_2^\dagger)^T$ and + +$$A = \begin{pmatrix} B & C \\ C & B \end{pmatrix} \quad (12)$$ + +Note that + +$$B = i \begin{pmatrix} -(1+\lambda) & -\lambda \\ \lambda & 1+\lambda \end{pmatrix} \quad (13)$$ + +and + +$$C = i \begin{pmatrix} \lambda & \lambda \\ -\lambda & -\lambda \end{pmatrix} \quad (14)$$ + +Due to the symmetry in the coupled oscillator system, the matrix $A$ is symmetric in the form of a two-by-two matrix although it is not symmetric in its full four-by-four matrix form. This symmetric property enables a simple evaluation of the time dependent annihilation and creation operators of the oscillators: + +$$\tilde{a}(t) = F\tilde{a}(0) \quad (15)$$ + +where + +$$F = \frac{1}{2} \begin{pmatrix} J e^{D_1 t} J - K e^{D_2 t} K^{-1} & J e^{D_1 t} J + K e^{D_2 t} K^{-1} \\ J e^{D_1 t} J + K e^{D_2 t} K^{-1} & J e^{D_1 t} J - K e^{D_2 t} K^{-1} \end{pmatrix} \quad (16)$$ +---PAGE_BREAK--- + +$$J = \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix} \qquad (17)$$ + +$$D_1 = \begin{pmatrix} i & 0 \\ 0 & -i \end{pmatrix} \qquad (18)$$ + +$$D_2 = \begin{pmatrix} i\Omega & 0 \\ 0 & -i\Omega \end{pmatrix} \qquad (19)$$ + +and + +$$K = \begin{pmatrix} 1 & \beta \\ \beta & 1 \end{pmatrix} \qquad (20)$$ + +with $\Omega = f_2 = \sqrt{1+4\lambda}$ and $\beta = (1+\Omega)/(1-\Omega)$. We then have: + +$$a_1(t) = \left(\frac{1}{2}e^{-it} - \eta_1 + \eta_2\right) a_1(0) + \eta_3 a_1^\dagger(0) + \left(\frac{1}{2}e^{-it} + \eta_1 - \eta_2\right) a_2(0) - \eta_3 a_2^\dagger(0) \qquad (21)$$ + +$$a_1^\dagger(t) = -\eta_3 a_1(0) + \left(\frac{1}{2}e^{it} - \eta_1^* + \eta_2^*\right) a_1^\dagger(0) + \eta_3 a_2(0) + \left(\frac{1}{2}e^{it} + \eta_1^* - \eta_2^*\right) a_2^\dagger(0) \qquad (22)$$ + +$$a_2(t) = \left(\frac{1}{2}e^{-it} + \eta_1 - \eta_2\right) a_1(0) - \eta_3 a_1^\dagger(0) + \left(\frac{1}{2}e^{-it} - \eta_1 + \eta_2\right) a_2(0) + \eta_3 a_2^\dagger(0) \qquad (23)$$ + +$$a_2^\dagger(t) = \eta_3 a_1(0) + \left(\frac{1}{2}e^{it} + \eta_1^* - \eta_2^*\right) a_1^\dagger(0) - \eta_3 a_2(0) + \left(\frac{1}{2}e^{it} - \eta_1^* + \eta_2^*\right) a_2^\dagger(0) \qquad (24)$$ + +where + +$$\eta_1 = \frac{(1-\Omega)^2}{8\Omega} e^{i\Omega t}$$ + +$$\eta_2 = \frac{(1+\Omega)^2}{8\Omega} e^{-i\Omega t}$$ + +$$\eta_3 = \frac{i(1-\Omega)(1+\Omega)}{4\Omega} \sin(\Omega t)$$ + +With these results, we are now ready to determine the analytical expressions of both the quantum entanglement and squeezing against time. For entanglement, we shall employ the criterion developed by Duan *et al.* [39] for quantification since it leads to simplification of the analytical expression while remaining valid as a measure of entanglement in coupled harmonic oscillator systems. According to this criterion, as long as + +$$S_D = 2 - (\Delta u)^2 - (\Delta v)^2 > 0 \qquad (25)$$ + +the state of the quantum system is entangled. Note that $u = x_1 + x_2$ and $v = p_1 - p_2$ are two EPR-type operators, whereas $\Delta u$ and $\Delta v$ are the corresponding quantum fluctuation. This allows us to express the entanglement measure $S_D$ as follows: + +$$S_D(t) = 2\langle(a_1^\dagger a_1) - (a_1^\dagger)\langle a_1\rangle + (a_2^\dagger a_2) - (a_2^\dagger)\langle a_2\rangle + \\ (a_1^\dagger a_2^\dagger) - (a_1^\dagger)\langle a_2^\dagger\rangle + (a_1 a_2) - (a_1)\langle a_2\rangle\rangle \qquad (26)$$ + +Note that the short form $\langle O \rangle$ used in Equation (26) implies $\langle\alpha_1, \alpha_2|O(t)|\alpha_1, \alpha_2\rangle$, where $|\alpha_1, \alpha_2\rangle$ represents a tensor product of arbitrary initial coherent states. Recall that the subsequent results are independent of +---PAGE_BREAK--- + +the initial states as mentioned in the last section. After substituting Equations (21)–(24) into Equation (26), we obtain the analytical expression of entanglement against time: + +$$S_D(t) = (\Omega^2 - 1) \sin^2 \Omega t \quad (27)$$ + +In coupled harmonic oscillator systems, $S_D$ has a unique monotonic relation with $S_{vN}$ (see Figure 2). For squeezing, we have + +$$ +\begin{aligned} +S_x(t) &= -\ln \sqrt{\frac{\langle x_1^2 \rangle - \langle x_1 \rangle^2}{0.5}} \\ +&= -\ln \sqrt{\langle a_1^{\dagger 2} \rangle - \langle a_1^{\dagger} \rangle^2 + \langle a_1^2 \rangle - \langle a_1 \rangle^2 + \langle a_1^{\dagger} a_1 \rangle - \langle a_1^{\dagger} \rangle \langle a_1 \rangle + \langle a_1 a_1^{\dagger} \rangle - \langle a_1 \rangle \langle a_1^{\dagger} \rangle} +\end{aligned} +\quad (28) $$ + +Then, by substituting Equations (21)–(24) into Equation (28) as before, we obtain the analytical expression of squeezing against time: + +$$S_x(t) = -\ln \sqrt{1 - \frac{\Omega^2 - 1}{2\Omega^2} \sin^2 \Omega t} \quad (29)$$ + +We can also obtain an analytical expression between $S_D$ and $S_x$ by substituting Equation (27) into Equation (29) with some rearrangement: + +$$S_D = 2\Omega^2 (1 - e^{-2S_x}) \quad (30)$$ + +It is important to note that $S_x$ can only span a range of values $0 \le S_x \le S_x^{(m)}$, where $S_x^{(m)} = -\ln(\Omega^2+1)/2\Omega^2$. Furthermore, for a coupled harmonic oscillator system with a fixed value of $\lambda$, the dynamically generated squeezing can be higher than the squeezing in the system's ground state. The analytical result given by Equation (30) is plotted in Figure 3 for $\lambda = 0.75, 2, 3.75, 6$ and 10, with each curve begins at $S_x = 0$, $S_D = 0$ and ends at $S_x = S_x^{(m)}$, $S_D = S_D^{(m)} = \Omega^2 - 1$. In fact, the set of end points given by $S_x = S_x^{(m)}$, $S_D = S_D^{(m)}$ gives rise to the solid curve in Figure 3. Specifically, the maximum entanglement and the maximum squeezing parameter relates as follow: + +$$S_D^{(m)} = \frac{1 - \zeta^2}{\zeta^2} \quad (31)$$ + +with + +$$\zeta = \sqrt{2e^{-2S_x^{(m)}} - 1} \quad (32)$$ + +Note that Equation (32) is the same as Equation (8), and Equation (31) corresponds to the ground state solid curve of Figure 1. This allows us to deduce the monotonic relation between $S_D$ and $S_{vN}$, which is performed by evaluating the relation between $S_D$ of the maximum entangled state and $S_{vN}$ of the ground state at equal amount of squeezing. Indeed, the resulting derived relationship shown as solid line in Figure 2 is valid due to the fact that the link between $S_D(t)$ and $S_{vN}(t)$ is found to be expressible by precisely the same curve. Thus, we have concretely affirmed the one to one correspondence between $S_D$ and $S_{vN}$ through this relationship. More importantly, we have clearly demonstrated that the maximum entanglement attained dynamically is the same as the degree of entanglement of a ground state with the same squeezing. +---PAGE_BREAK--- + +**Figure 2.** This plot shows the monotonic relation between $S_D$ and $S_{vN}$ in coupled harmonic oscillator systems. $S_D(t)$ and $S_{vN}(t)$ are plotted as squares ($\lambda = 0.75$), triangles ($\lambda = 2$), circles ($\lambda = 3.75$) and crosses ($\lambda = 6$). The relation between the ground state von Neuman entropy given by $S_{vN} = \frac{(\xi+1)^2}{4\xi} \ln(\frac{\xi+1)^2}{4\xi}) - \frac{(\xi-1)^2}{4\xi} \ln(\frac{\xi-1)^2}{4\xi})$ and the maximum dynamically generated entanglement given by $S_D^{(m)} = \frac{1-\xi^2}{\xi^2}$ is plotted as solid curve. Note that both $S_{vN}$ and $S_D^{(m)}$ are functions of the squeezing parameter $S_x$ and $\xi = \sqrt{2e^{-2S_x} - 1}$. + +**Figure 3.** A plot on the dynamical relation between entanglement and squeezing given by Equation (30) for coupled harmonic oscillator system. The relation is dependent on $\lambda$ and the curves from top to bottom are with respect to $\lambda = 10, 6, 3.75, 2,$ and $0.75$ respectively. Note that the thick solid curve represents the values of the maximum attainable squeezing and entanglement for the range $0 < \lambda < 10$. + +When projected into the $x_1 - p_2$ or $x_2 - p_1$ plane, the initial coherent state can be represented by a circular distribution with equal uncertainty in both *x* and *p* direction. During the time evolution, the circular distribution is being rotated and squeezed. As a result, squeezing and entanglement are generated such that the distribution becomes elliptical in the $x_1 - p_2$ or $x_2 - p_1$ plane with rotation of the ellipse's major axis away from the *x*- or *p*-axis which creates entanglement. The generation of squeezing and entanglement reaches their maximum values at the same time when the major axis of the elliptical distribution has rotated 45° away from the *x*- or *p*-axis. Note that at this point, squeezing is merely in the collective modes. On the other hand, as discussed in Reference [37], the ground state wave function of the coupled harmonic oscillator system is separable in their collective modes. In both cases, entanglement and squeezing relates uniquely as given by Equation (7) and (31). +---PAGE_BREAK--- + +4. Quantum Squeezing and Entanglement in Coupled Anharmonic Oscillator Systems + +Next, let us investigate the effect of including an anharmonic potential on the dynamical relation +between squeezing and entanglement through the following Hamiltonian systems: + +$$ +H = \frac{p_1^2}{2m_1} + \frac{1}{2}m_1\omega_1^2 x_1^2 + \frac{p_2^2}{2m_2} + \frac{1}{2}m_2\omega_2^2 x_2^2 + \lambda(x_2 - x_1)^2 + \epsilon(x_1^4 + x_2^4) \quad (33) +$$ + +For simplicity, we consider only the quartic perturbation potential. For previous studies of entanglement in coupled harmonic oscillators with quartic perturbation, see Reference [40] and the references therein. Again, we choose the initial state to be the tensor product of the vacuum states. We then evolve the state numerically through the Hamiltonian given by Equation (33). For the numerical simulation, we consider only a small anharmonic perturbation, i.e., $\epsilon = 0.1$ and $0.2$. Note that we have truncated the basis size at $M = 85$ at which the results are found to converge. + +With a small anharmonic perturbation, the dynamically generated entanglement entropy is no longer a smooth monotonically increasing function of the quadrature squeezing as before (see Figure 4). This implies that for coupled anharmonic oscillator systems, the dynamically generated degree of entanglement cannot be characterized through a measurement of the squeezing parameter. In addition, when the anharmonic potential is included, the maximum attainable squeezing is much enhanced. This effect is clearly shown in Figure 4, where we observe that the maximum dynamical squeezing extends far beyond the largest squeezing given by the coupled anharmonic oscillator system’s ground state at different $\lambda$. In addition, as we increase the anharmonic perturbation from 0.1 to 0.2, we found that the maximum attainable squeezing continues to grow with extension going further beyond the largest squeezing given by the ground state of the coupled anharmonic oscillator system. + +**Figure 4.** The effect of anharmonicity ($\epsilon = 0.1$) on the dynamical relation between quadrature squeezing and entanglement. Note that we have employed the following parameter: (a) $\lambda = 0.75$; (b) $\lambda = 2$; (c) $\lambda = 3.75$; and (d) $\lambda = 6$. We have plotted the ground state entanglement-squeezing curve of the coupled anharmonic oscillator system with $\epsilon = 0.1$ as solid curve for comparison. +---PAGE_BREAK--- + +**Figure 5.** The effect of anharmonicity ($\epsilon = 0.2$) on the dynamical relation between quadrature squeezing and entanglement. Note that we have employed the following parameter: (a) $\lambda = 0.75$, (b) $\lambda = 2$, (c) $\lambda = 3.75$, and (d) $\lambda = 6$. We have plotted the ground state entanglement-squeezing curve of the coupled anharmonic oscillator system with $\epsilon = 0.2$ as solid curve for comparison. + +## 5. Conclusions + +We have studied into the dynamical generation of quadrature squeezing and entanglement for both coupled harmonic and anharmonic oscillator systems. Our numerical and analytical results show that the quantitative relation that defines the dynamically generated squeezing and entanglement in coupled harmonic oscillator system is a monotonically increasing function. Such a monotonic relation vanishes, however, when a small anharmonic potential is added to the system. This result implies the possibility of characterizing the dynamically generated entanglement by means of squeezing in the case of coupled harmonic oscillator system. In addition, we have uncovered the unexpected result that the maximum attainable entanglement and squeezing obtained dynamically matches exactly the entanglement-squeezing relation of the system's ground state of the coupled harmonic oscillators. When an anharmonic potential is included, we found that the dynamically generated squeezing can be further enhanced. We percieve that this result may provide important insights to the construction of precision instruments that attempt to beat the quantum noise limit. + +**Acknowledgments:** L. Y. Chew would like to thank Y. S. Kim for the helpful discussion on this work during the ICSSUR 2013 conference held in Nuremberg, Germany. + +**Author Contributions:** All authors contribute equally to the theoretical analysis, numerical computation, and writing of the paper. + +**Conflicts of Interest:** The authors declare no conflict of interest. + +## References + +1. Nielson, M.A.; Chuang, I.L. *Quantum Computation and Quantum Information*; Cambridge University Press: Cambridge, UK, 2000. +2. Bennett, C.H.; Wiesner, S.J. Communication via one- and two-particle operators on Einstein-Podolsky-Rosen states. *Phys. Rev. Lett.* **1992**, *69*, 2881–2884. +---PAGE_BREAK--- + +3. Bennett, C.H.; Brassard, G.; Crépeau, C.; Jozsa, R.; Peres, A.; Wooters, W.K. Teleporting an unknown quantum state via dual classical and Einstein-Podolsky-Rosen channels. Phys. Rev. Lett. 1993, 70, 1895-1899. + +4. Bennett, C.H.; Brassard, G. Quantum cryptography: Public key distribution and coin tossing. In Proceedings of the IEEE International Conference on Computers, Systems and Signal Processing, IEEE Computer Society, New York, NY, USA, 1984; pp. 175-179. + +5. Ekert, A.K. Quantum cryptography based on Bell's theorem. Phys. Rev. Lett. 1991, 67, 661-663. + +6. Shor, P.W. Polynomial-time algorithms for prime factorization and discrete logarithms on a quantum computer. SIAM J. Comput. 1997, 26, 1484-1509. + +7. Jozsa, R.; Linden, N. On the role of entanglement in quantum-computational speed-up. Proc. R. Soc. Lond. A 2003, 459, 2011-2032. + +8. Lanyon, B.P.; Barbieri, M.; Almeida, M.P.; White, A.G. Experimental quantum computing without entanglement. Phys. Rev. Lett. 2008, 101, 200501:1-200501:4. + +9. Sørensen, A.; Duan, L.M.; Cirac, J.I.; Zoller, P. Many-particle entanglement with Bose-Einstein condensates. Nature 2001, 409, 63-66. + +10. Bigelow, N. Squeezing Entanglement. Nature 2001, 409, 27-28. + +11. Furuichi, S.; Mahmoud, A.A. Entanglement in a squeezed two-level atom. J. Phys. A Math. Gen. 2001, 34, 6851-6857. + +12. Xiang, S.; Shao, B.; Song, K. Quantum entanglement and nonlocality properties of two-mode Gaussian squeezed states. Chin. Phys. B 2009, 18, 418-425. + +13. Galve, F.; Pachón, L.A.; Zueco, D. Bringing entanglement to the high temperature limit. Phys. Rev. Lett. 2010, 105, doi:10.1103/PhysRevLett.105.180501. + +14. Ulam-Orgikh, D.; Kitagawa, M. Spin squeezed and decoherence limit in Ramsey spectroscopy. Phys. Rev. A 2001, 64, doi:10.1103/PhysRevA.64.052106. + +15. Wolf, M.M.; Eisert, J.; Plenio, M.B. Entangling power of passive optical elements. Phys. Rev. Lett. 2003, 90, 047904:1-047904:4. + +16. Chung, N.N.; Er, C.H.; Teo, Y.S.; Chew, L.Y. Relation of the entanglement entropy and uncertainty product in ground states of coupled anharmonic oscillators. Phys. Rev. A 2010, 82, doi:10.1103/PhysRevA.82.014101. + +17. Chew, L.Y.; Chung, N.N. Quantum entanglement and squeezing in coupled harmonic and anharmonic oscillators systems. J. Russ. Laser Res. 2011, 32, 331-337. + +18. Er, C.H.; Chung, N.N.; Chew, L.Y. Threshold effect and entanglement enhancement through local squeezing of initial separable states in continuous-variable systems. Phys. Scripta 2013, 87, doi:10.1088/0031-8949/87/02/025001. + +19. Han, D.; Kim, Y.S.; Noz, M.E. Linear canonical transformations of coherent and squeezed states in the Wigner phase space. Phys. Rev. A 1988, 37, 807-814. + +20. Han, D.; Kim, Y.S.; Noz, M.E. Linear canonical transformations of coherent and squeezed states in the Wigner phase space. II. Quantitative analysis. Phys. Rev. A 1989, 40, 902-912. + +21. Han, D.; Kim, Y.S.; Noz, M.E. Linear canonical transformations of coherent and squeezed states in the Wigner phase space. III. Two-mode states. Phys. Rev. A 1990, 41, 6233-6244. + +22. Wilson, E.B.; Decius, J.C.; Cross, P.C. Molecular Vibration; McGraw-Hill: New York, NY, USA, 1955. + +23. Schweber, S.S. An Introduction to Relativistic Quantum Field Theory; Row-Peterson: New York, NY, USA, 1961. + +24. Han, D.; Kim, Y.S.; Noz, M.E. O(3,3)-like symmetries of coupled harmonic-oscillators. J. Math. Phys. 1995, 36, 3940-3954. + +25. Kim, Y.S.; Noz, M.E. Coupled oscillators, entangled oscillators, and Lorentz-covariant harmonic oscillators. J. Opt. B Quantum Semiclass. Opt. 2005, 7, S458-S467. + +26. Eisert, J.; Plenio, M.B.; Bose, S.; Hartley, J. Towards quantum entanglement in nanoelectromechanical devices. Phys. Rev. Lett. 2004, 93, 190402:1-190402:4. + +27. Joshi, C.; Jonson, M.; Öhberg, P.; Andersson, E. Constructive role of dissipation for driven coupled bosonic modes. Phys. Rev. A 2013, 87, 062304:1-062304:4. + +28. Joshi, C.; Hutter, A.; Zimmer, F.E.; Jonson, M.; Andersson, E.; Öhberg, P. Quantum entanglement of nanocantilevers. Phys. Rev. A 2010, 82, doi:10.1103/PhysRevA.82.043846. + +29. Ikeda, S.; Fillaux, F. Incoherent elastic-neutron-scattering study of the vibrational dynamics and spin-related symmetry of protons in the KHCO₃ crystal. Phys. Rev. B 1999, 59, 4134-4145. + +Symmetry **2014**, *6*, 295–307 +---PAGE_BREAK--- + +30. Fillaux, F. Quantum entanglement and nonlocal proton transfer dynamics in dimers of formic acid and analogues. *Chem. Phys. Lett.* **2005**, *408*, 302–306. + +31. Audenaert, K.; Eisert, J.; Plenio, M.B.; Werner, R.F. Symmetric qubits from cavity states. *Phys. Rev. A* **2002**, *66*, 042327:1–042327:6. + +32. Martina, L.; Soliani, G. Hartree-Fock approximation and entanglement. Available online: http://arxiv.org/abs/0704.3130 (accessed on 18 April 2014). + +33. Chung, N.N.; Chew, L.Y. Energy eigenvalues and squeezing properties of general systems of coupled quantum anharmonic oscillators. *Phys. Rev. A* **2007**, *76*, doi:10.1103/PhysRevA.76.032113. + +34. Chung, N.N.; Chew, L.Y. Two-step approach to the dynamics of coupled anharmonic oscillators. *Phys. Rev. A* **2009**, *80*, doi:10.1103/PhysRevA.80.012103. + +35. Jellal, A.; Madouri, F.; Merdaci, A. Entanglement in coupled harmonic oscillators studied using a unitary transformation. *J. Stat. Mech.* **2011**, doi:10.1088/1742-5468/2011/09/P09015. + +36. McDermott, R.M.; Redmount, I.H. Coupled classical and quantum oscillators. Available online: http://arxiv.org/abs/quant-ph/0403184 (accessed on 18 April 2014). + +37. Han, D.; Kim, Y.S.; Noz, M.E. Illustrative example of Feymann's rest of the universe. *Am. J. Phys.* **1999**, *67*, 61–66. + +38. Chung, N.N.; Chew, L.Y. Dependence of entanglement dynamics on the global classical dynamical regime. *Phys. Rev. E* **2009**, *80*, 016204:1–016204:7. + +39. Duan, L.M.; Giedke, G.; Cirac, J.I.; Zoller, P. Inseparable criterion for continuous variable systems. *Phys. Rev. Lett.* **2000**, *84*, 2722–2725. + +40. Joshi, C.; Jonson, M.; Andersson, E.; Öhberg, P. Quantum entanglement of anharmonic oscillators. *J. Phys. B At. Mol. Opt. Phys.* **2011**, *44*, doi:10.1088/0953-4075/44/24/245503. + +© 2014 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +Closed-Form Expressions for the Matrix Exponential + +F. De Zela + +Departamento de Ciencias, Sección Física, Pontificia Universidad Católica del Perú, Ap.1761, Lima L32, Peru; +E-Mail: fdezela@pucp.edu.pe; Tel.: +51-1-6262000; Fax: +51-1-6262085 + +Received: 28 February 2014; in revised form: 16 April 2014 / Accepted: 17 April 2014 / Published: 29 April 2014 + +**Abstract:** We discuss a method to obtain closed-form expressions of $f(A)$, where $f$ is an analytic function and $A$ a square, diagonalizable matrix. The method exploits the Cayley-Hamilton theorem and has been previously reported using tools that are perhaps not sufficiently appealing to physicists. Here, we derive the results on which the method is based by using tools most commonly employed by physicists. We show the advantages of the method in comparison with standard approaches, especially when dealing with the exponential of low-dimensional matrices. In contrast to other approaches that require, e.g., solving differential equations, the present method only requires the construction of the inverse of the Vandermonde matrix. We show the advantages of the method by applying it to different cases, mostly restricting the calculational effort to the handling of two-by-two matrices. + +**Keywords:** matrix exponential; Cayley-Hamilton theorem; two-by-two representations; Vandermonde matrices + +PACS: 02.30.Tb, 42.25.Ja, 03.65.Fd + +# 1. Introduction + +Physicists are quite often faced with the task of calculating $f(A)$, where $A$ is an $n \times n$ matrix and $f$ an analytic function whose series expansion generally contains infinitely many terms. The most prominent example corresponds to $\exp A$. Usual approaches to calculate $f(A)$ consist in either truncating its series expansion, or else finding a way to "re-summate" terms so as to get a closed-form expression. There is yet another option that can be advantageously applied when dealing with an $n \times n$ matrix, and which derives from the Cayley-Hamilton theorem [1]. This theorem states that every square matrix satisfies its characteristic equation. As a consequence of this property, any series expansion can be written in terms of the first $n$ powers of $A$. While this result is surely very well known among mathematicians, it appears to be not so widespread within the physicists' community [2]. Indeed, most textbooks on quantum mechanics still resort to the Baker-Hausdorff lemma or to special properties of the involved matrices, in order to obtain closed-form expressions of series expansions [3–5]. This happens even when dealing with low-dimensional matrices, i.e., in cases in which exploiting the Cayley-Hamilton theorem would straightforwardly lead to the desired result. Such a state of affairs probably reflects a lack of literature on the subject that is more palatable to physicists than to mathematicians. The present paper aims at dealing with the subject matter by using language and tools that are most familiar to physicists. No claim of priority is made; our purpose is to show how well the derived results fit into the repertoire of tools that physicists routinely employ. To this end, we start addressing the simple, yet rich enough case of $2 \times 2$ matrices. + +An archetypical example is the Hamiltonian $H = k\sigma \cdot B$ that rules the dynamics of a spin-1/2 particle subjected to a magnetic field $B$. Here, $\sigma = (\sigma_x, \sigma_y, \sigma_z)$ denotes the Pauli spin operator and $k$ is a parameter that provides the above expression with appropriate units. The upsurge of research in several areas of physics—most notably in quantum optics—involving two-level systems, has made a +---PAGE_BREAK--- + +Hamiltonian of the above type quite ubiquitous. Indeed, the dynamics of any two-level system is ruled by a Hamiltonian that can be written in such a form. Hence, one often requires an explicit, closed-form expression for quantities such as $\exp(i\alpha n \cdot \sigma)$, where $n$ is a unit vector. This closed-form expression can be obtained as a generalization of Euler's formula $\exp i\alpha = \cos \alpha + i \sin \alpha$. It reads + +$$ \exp(i\alpha n \cdot \sigma) = \cos \alpha I + i \sin \alpha n \cdot \sigma \quad (1) $$ + +with $I$ denoting the identity operator. + +Let us recall how most textbooks of quantum mechanics proceed to demonstrate Equation (1) (see, e.g., [3–5]). The demonstration starts by writing the series expansion $\exp A = \sum_k A^k/k!$ for the case $A = i\alpha n \cdot \sigma$. Next, one invokes the following relationship: + +$$ (a \cdot \sigma)(b \cdot \sigma) = (a \cdot b)I + i(a \times b) \cdot \sigma \quad (2) $$ + +whose proof rests on $c_i \sigma_j = \delta_{ij}I + i\epsilon_{ijk}\sigma_k$ (summation over repeated indices being understood). Equation (2) implies that $(n \cdot \sigma)^{2n} = I$, and hence $(n \cdot \sigma)^{2n+1} = n \cdot \sigma$. This allows one to split the power series of $\exp(i\alpha n \cdot \sigma)$ in two parts, one constituted by even and the other by odd powers of $i\alpha n \cdot \sigma$: + +$$ \exp(i\alpha n \cdot \sigma) = \sum_{n=0}^{\infty} \frac{(i\alpha)^{2n}}{2n!} I + \sum_{n=0}^{\infty} \frac{(i\alpha)^{2n+1}}{(2n+1)!} n \cdot \sigma \quad (3) $$ + +By similarly splitting Euler's exponential, i.e., + +$$ \exp i\alpha = \cos \alpha + i \sin \alpha = \sum_{n=0}^{\infty} \frac{(i\alpha)^{2n}}{2n!} + \sum_{n=0}^{\infty} \frac{(i\alpha)^{2n+1}}{(2n+1)!} \quad (4) $$ + +one sees that Equation (3) is the same as Equation (1). + +Although this standard demonstration is a relatively simple one, it seems to be tightly related to the particular properties of the operator $n \cdot \sigma$, as well as to our ability to "re-summate" the series expansion so as to obtain a closed-form expression. There are several other cases [6] in which a relation similar to Equation (1) follows as a consequence of generalizing some properties of the group SU(2) and its algebra to the case SU(N), with $N > 2$. Central to these generalizations and to their associated techniques are both the Cayley-Hamilton theorem and the closure of the Lie algebra su(N) under commutation and anti-commutation of its elements [6]. As already recalled, the Cayley-Hamilton theorem states that any $n \times n$ matrix $A$ satisfies its own characteristic equation $p(A) = 0$, where + +$$ p(\lambda) = \mathrm{Det}(\lambda I - A) = \lambda^n + c_{n-1}\lambda^{n-1} + \dots + c_1\lambda + c_0 \quad (5) $$ + +is $A$'s characteristic polynomial. From $p(A) = 0$ it follows that any power $A^k$, with $k \ge n$, can be written in terms of the matrices $I = A^0, A, \dots, A^{n-1}$. Thus, any infinite series, such as the one corresponding to $\exp A$, may be rewritten in terms of the $n$ powers $A^0, A, \dots, A^{n-1}$. By exploiting this fact one can recover Equation (1). Reciprocally, given $A$, one can construct a matrix $B$ that satisfies $\exp B = A$, as shown by Dattoli, Mari and Torre [2]. These authors used essentially the same tools as we do here and presented some of the results that we will show below, but leaving them in an implicit form. The aforementioned authors belong to a group that has extensively dealt with our subject matter and beyond it [7], applying the present techniques to cases of current interest [8]. A somewhat different approach was followed by Leonard [9], who related the Cayley-Hamilton theorem to the solution of ordinary differential equations, in order to get closed expressions for the matrix exponential. This technique can be applied to all $n \times n$ matrices, including those that are not diagonalizable. Untidt and Nielsen [10] used this technique when addressing the groups SU(2), SU(3) and SU(4). Now, especially when addressing SU(2), Leonard's approach seems to be unnecessarily involved. This is because there is a trade-off between the wide applicability of the method and its tailoring to a +---PAGE_BREAK--- + +special case. When dealing with diagonalizable matrices, the present approach may prove more useful. +Thus, one exploits not only the Cayley-Hamilton theorem, but the diagonalizability of the involved +matrices as well. As a result, we are provided with a straightforward way to obtain closed-form +expressions for the matrix exponential. There are certainly many other ways that are either more +general [9,11] or else better suited to specific cases [12–16], but the present method is especially useful +for physical applications. + +The rest of the paper is organized as follows. First, we present Leonard's technique in a way that somewhat differs from the approach used in [9]. Thereafter, we show how to obtain Equation (1) by using a technique that can be generalized to diagonalizable $n \times n$ matrices, thereby introducing the method that is the main subject of the present work. As an illustration of this technique, we address some representative cases that were taken from the repertoire of classical mechanics, quantum electrodynamics, quantum optics and from the realm of Lorentz transformations. While the results obtained are known, their derivations should serve to demonstrate the versatility of the method. Let us stress once again that our aim has been to present this method by following an approach that could be appealing to most physicists, rather than to mathematically oriented readers. + +## 2. Closed Form of the Matrix Exponential via the Solution of Differential Equations + +Consider the coupled system of differential equations, given by + +$$Dx = \frac{dx}{dt} = Ax \quad (6)$$ + +with $x = (x_1, \dots, x_n)^T$ and $A$ a constant, $n \times n$ matrix. The matrix exponential appears in the solution of Equation (6), when we write it as $x(t) = e^{At}x(0)$. By successive derivation of this exponential we obtain $D^k e^{At} = A^k e^{At}$. Hence, $p(D)e^{At} = (D^n + c_{n-1}D^{n-1} + \dots + c_1D + c_0)e^{At} = p(A)e^{At} = 0$, on account of $p(A) = 0$, i.e., the Cayley-Hamilton theorem. Now, as already noted, this implies that $e^{At}$ can be expressed in terms of $A^0, A, \dots, A^{n-1}$. Let us consider the matrix $M(t) := \sum_{k=0}^{n-1} y_k(t)A^k$, with the $y_k(t)$ being $n$ independent solutions of the differential equation $p(D)y(t) = 0$. That is, the $y_k(t)$ solve this equation for $n$ different initial conditions that will be conveniently chosen. We have thus that $p(D)M(t) = \sum_{k=0}^{n-1} p(D)y_k(t)A^k = 0$. Our goal is to choose the $y_k(t)$ so that $e^{At} = M(t)$. To this end, we note that $D^k e^{At}|_{t=0} = A^k e^{At}|_{t=0} = A^k$. That is, $e^{At}$ solves $p(D)\Phi(t) = 0$ with the initial conditions $\Phi(0) = A^0, \dots, D^{n-1}\Phi(0) = A^{n-1}$. It is then clear that we must take the following initial conditions: $D^j y_k(0) = \delta_{kj}^j$ with $j, k \in \{0, \dots, n-1\}$. In such a case, $e^{At}$ and $M(t)$ satisfy both the same differential equation and the same initial conditions. Hence, $e^{At} = M(t)$. + +Summarizing, the method consists in solving the *n*-th order differential equation $p(D)y(t) = 0$ for *n* different initial conditions. These conditions read $D^j y_k(0) = \delta_{kj}^j$, with $j, k \in \{0, \dots, n-1\}$. The matrix exponential is then given by $e^{At} = \sum_{k=0}^{n-1} y_k(t)A^k$. The standard procedure for solving $p(D)y(t) = 0$ requires finding the roots of the characteristic equation $p(\lambda) = 0$. Each root $\lambda$ with multiplicity *m* contributes to the general solution with a term $(a_0 + a_1\lambda + \dots + a_{m-1}\lambda^{m-1})e^{\lambda t}$, the $a_k$ being fixed by the initial conditions. As already said, this method applies even when the matrix *A* is not diagonalizable. However, when the eigenvalue problem for *A* is a solvable one, another approach can be more convenient. We present such an approach in what follows. + +## 3. Closed Form of the Matrix Exponential via the Solution of Algebraic Equations + +Let us return to Equation (1). We will derive it anew, this time using standard tools of quantum mechanics. Consider a Hermitian operator *A*, whose eigenvectors satisfy $A |a_k\rangle = a_k |a_k\rangle$ and span the Hilbert space on which *A* acts. Thus, the identity operator can be written as $I = \sum_k |a_k\rangle\langle a_k|$. One can also write $A = A \cdot I = \sum_k a_k |a_k\rangle\langle a_k|$. Moreover, $A^m = \sum_k a_k^m |a_k\rangle\langle a_k|$, from which it follows that + +$$F(A) = \sum_k F(a_k) |a_k\rangle\langle a_k| \qquad (7)$$ +---PAGE_BREAK--- + +for any function $F(A)$ that can be expanded in powers of $A$. + +Let us consider the 2 × 2 case $A = n \cdot \sigma$, with $n$ a unit vector. This matrix has the eigenvalues $\pm 1$ and the corresponding eigenvectors $|n_{\pm}\rangle$. That is, $n \cdot \sigma |n_{\pm}\rangle = \pm |n_{\pm}\rangle$. We need no more than this to get Equation (1). Indeed, from $n \cdot \sigma = |n_+⟩⟨n_+| - |n_-⟩⟨n_-|$ and $I = |n_+⟩⟨n_+| + |n_-⟩⟨n_-|$, it follows that $|n_{\pm}\rangle⟨n_{\pm}| = (I \pm n \cdot \sigma) / 2$. Next, we consider $F(A) = \exp A = \sum_k \exp a_k |a_k⟩⟨a_k|$, with $A = i\alpha n \cdot \sigma$. The operator $i\alpha n \cdot \sigma$ has eigenvectors $|n_{\pm}\rangle$ and eigenvalues $\pm i\alpha$. Thus, + +$$ +\begin{align} +\exp(i\alpha n \cdot \sigma) &= e^{i\alpha} |n_+\rangle \langle n_+| + e^{-i\alpha} |n_-\rangle \langle n_-| \tag{8} \\ +&= \frac{1}{2} e^{i\alpha} (I + n \cdot \sigma) + \frac{1}{2} e^{-i\alpha} (I - n \cdot \sigma) \tag{9} \\ +&= \left( \frac{e^{i\alpha} + e^{-i\alpha}}{2} \right) I + \left( \frac{e^{i\alpha} - e^{-i\alpha}}{2} \right) n \cdot \sigma \tag{10} +\end{align} +$$ + +which is Equation (1). Note that it has not been necessary to know the eigenvectors of $A = i\alpha n \cdot \sigma$. It is a matter of convenience whether one chooses to express $\exp(i\alpha n \cdot \sigma)$ in terms of the projectors $|n_{\pm}\rangle⟨n_{\pm}|$, or in terms of $I$ and $n \cdot \sigma$. + +Let us now see how the above method generalizes when dealing with higher-dimensional spaces. To this end, we keep dealing with rotations. The operator exp $(i\alpha n \cdot \sigma)$ is a rotation operator acting on spinor space. It is also an element of the group SU(2), whose generators can be taken as $X_i = i\gamma_i / 2$, $i = 1, 2, 3$. They satisfy the commutation relations $[X_i, X_j] = \epsilon_{ijk}X_k$ that characterize the rotation algebra. The rotation operator can also act on three-dimensional vectors $r$. In this case, one often uses the following formula, which gives the rotated vector $r'$ in terms of the rotation angle $\theta$ and the unit vector $n$ that defines the rotation axis: + +$$ r' = r \cos\theta + n(n \cdot r)[1 - \cos\theta] + (n \times r)\sin\theta \quad (11) $$ + +Equation (11) is usually derived from vector algebra plus some geometrical considerations [17]. We can derive it, alternatively, by the method used above. To this end, we consider the rotation generators $X_i$ for three-dimensional space, which can be read off from the next formula, Equation (12). The rotation matrix is then obtained as $\exp(\theta n \cdot X)$, with + +$$ n \cdot X = \begin{pmatrix} 0 & -n_3 & n_2 \\ n_3 & 0 & -n_1 \\ -n_2 & n_1 & 0 \end{pmatrix} \equiv M \qquad (12) $$ + +It is straightforward to find the eigenvalues of the non-Hermitian, antisymmetric matrix $M$. They are 0 and $\pm i$. Let us denote the corresponding eigenvectors as $|n_0\rangle$ and $|n_{\pm}\rangle$, respectively. Similarly to the spin case, we have now + +$$ I = |n_+\rangle\langle n_+| + |n_-\rangle\langle n_-| + |n_0\rangle\langle n_0| \quad (13) $$ + +$$ M = i|n_+\rangle\langle n_+| - i|n_-\rangle\langle n_-| \quad (14) $$ + +We need a third equation, if we want to express the three projectors $|n_k\rangle⟨n_k|$, $k = \pm, 0$, in terms of $I$ and $M$. This equation is obtained by squaring $M$: + +$$ M^2 = -|n_+\rangle\langle n_+| - |n_-\rangle\langle n_-| \quad (15) $$ + +From Equations (13)–(15) we immediately obtain $|n_{\pm}\rangle⟨n_{\pm}| = (\mp iM - M^2)/2$, and $|n_0\rangle⟨n_0| = I + M^2$. Thus, we have + +$$ +\begin{align} +\exp(\theta M) &= e^{i\theta} |n_+\rangle\langle n_+| + e^{-i\theta} |n_-\rangle\langle n_-| + e^0 |n_0\rangle\langle n_0| && (16) \\ +&= I + M \sin\theta + M^2 [1 - \cos\theta] && (17) +\end{align} +$$ +---PAGE_BREAK--- + +By letting $M$, as given in Equation (12), act on $\mathbf{r} = (x,y,z)^T$, we easily see that $Mr = n \times r$ and +$M^2\mathbf{r} = n \times (n \times \mathbf{r}) = n(n \cdot \mathbf{r}) - \mathbf{r}$. Thus, on account of Equation (17), $\mathbf{r}' = \exp(\theta M)\mathbf{r}$ reads the same +as Equation (11). + +The general case is now clear. Consider an operator *A* whose matrix representation is an +*N* × *N* matrix. Once the eigenvalues *a**k* of *A* (which we assume nondegenerate) have been +determined, we can write the *N* equations: *A*0 = *I* = Σ*k* |*a**k*⟩⟨*a**k*|, *A* = Σ*k* *a**k* |*a**k*⟩⟨*a**k*|, *A*2 = Σ*k*=1*N* *a**k**N*-1 |*a**k*⟩⟨*a**k*|, ..., *A**N*-1 = Σ*k*=1*N* *a**k**N*-1 |*a**k*⟩⟨*a**k*|, from which it is possible to obtain the *N* projectors +|*a**k*⟩⟨*a**k*| in terms of *I*, *A*, *A*2, ..., *A**N*-1. To this end, we must solve the system + +$$ +\begin{pmatrix} +1 & 1 & \cdots & 1 \\ +a_1 & a_2 & \cdots & a_N \\ +a_1^2 & a_2^2 & \cdots & a_N^2 \\ +\vdots & \vdots & \ddots & \vdots \\ +a_1^{N-1} & a_2^{N-1} & \cdots & a_N^{N-1} +\end{pmatrix} +\begin{pmatrix} +|a_1\rangle\langle a_1| \\ +|a_2\rangle\langle a_2| \\ +|a_3\rangle\langle a_3| \\ +\vdots \\ +|a_N\rangle\langle a_N| +\end{pmatrix} += +\begin{pmatrix} +I \\ +A \\ +A^2 \\ +\vdots \\ +A^{N-1} +\end{pmatrix} +\quad (18) +$$ + +The matrix in Equation (18), with components $V_{k,i} = a_i^{k-1}$ ($k,i \in \{1,...,N\}$), is a Vandermonde matrix, whose inverse can be explicitly given [18]. Once we have written the $|a_k\rangle\langle a_k|$ in terms of $I, A, ... A^{N-1}$, we can express any analytic function of $A$ in terms of these $N$ powers of $A$, in particular $\exp A = \sum_{k=1}^{N} \exp(a_k) |a_k\rangle\langle a_k|$. For the case $N=4$, for instance, we have the following result: + +$$ +\begin{align} +|a_1\rangle\langle a_1| &= \frac{A^3 - A^2(a_2 + a_3 + a_4) + A(a_2a_3 + a_2a_4 + a_3a_4) - a_2a_3a_4}{(a_1 - a_2)(a_1 - a_3)(a_1 - a_4)} \tag{19} \\ +|a_2\rangle\langle a_2| &= \frac{A^3 - A^2(a_1 + a_3 + a_4) + A(a_1a_3 + a_1a_4 + a_3a_4) - a_1a_3a_4}{(a_2 - a_1)(a_2 - a_3)(a_2 - a_4)} \tag{20} \\ +|a_3\rangle\langle a_3| &= \frac{A^3 - A^2(a_1 + a_2 + a_4) + A(a_1a_2 + a_1a_4 + a_2a_4) - a_1a_2a_4}{(a_3 - a_1)(a_3 - a_2)(a_3 - a_4)} \tag{21} \\ +|a_4\rangle\langle a_4| &= \frac{A^3 - A^2(a_1 + a_2 + a_3) + A(a_1a_2 + a_1a_3 + a_2a_3) - a_1a_3a_4}{(a_4 - a_1)(a_4 - a_2)(a_4 - a_3)} \tag{22} +\end{align} +$$ + +The general solution can be written in terms of the inverse of the Vandermonde matrix V. To this end, +consider a system of equations that reads like (18), but with the operators entering the column vectors +being replaced by numbers, i.e., $|a_j\rangle\langle a_j| \rightarrow w_j$, with $j = 1, \dots, N$, and $A^k \rightarrow q_{k+1}$, with $k = 0, \dots, N-1$. +The solution of this system is given by $w_j = \sum_{k=0}^{N-1} U_{j,k} q_k$, with $U = V^{-1}$, the inverse of the Vandermonde +matrix. This matrix inverse can be calculated as follows [18]. Let us define a polynomial $P_j(x)$ of degree +$N-1$ as + +$$ +P_j(x) = \prod_{\substack{n=1 \\ n \neq j}}^{N} \frac{x-a_n}{a_j-a_n} = \sum_{k=1}^{N} U_{j,k} x^{k-1} \quad (23) +$$ + +The coefficients $U_{j,k}$ of the last equality follow from expanding the preceding expression and collecting equal powers of $x$. These $U_{j,k}$ are the components of $V^{-1}$. Indeed, setting $x = a_i$ and observing that $P_j(a_i) = \delta_{ji} = \sum_{k=1}^N U_{j,k} a_i^{k-1} = (UV)_{ji}$, we see that $U$ is the inverse of the Vandermonde matrix. The projectors $|a_j\rangle\langle a_j|$ in Equation (18) can thus be obtained by replacing $x \to A$ in Equation (23). We get in this way the explicit solution + +$$ +|a_j\rangle\langle a_j| = \sum_{k=1}^{N} U_{j,k} A^{k-1} = \prod_{\substack{n=1 \\ n \neq j}}^{N} \frac{A - a_n}{a_j - a_n} \quad (24) +$$ + +The above expression can be inserted into Equation (7), if one wants to write $F(A)$ in terms of the first +$N$ powers of $A$. +---PAGE_BREAK--- + +So far, we have assumed that the eigenvalues of A are all nondegenerate. Let us now consider a matrix M with degenerate eigenvalues. As before, we deal with a special case, from which the general formalism can be easily inferred. Let M be of dimension four and with eigenvalues λ₁ and λ₂, which are two-fold degenerate. We can group the projectors as follows: + +$$I = (|e_1\rangle \langle e_1| + |e_2\rangle \langle e_2|) + (|e_3\rangle \langle a_3| + |e_4\rangle \langle e_4|) \quad (25)$$ + +$$M = \lambda_1 (|e_1\rangle \langle e_1| + |e_2\rangle \langle e_2|) + \lambda_2 (|e_3\rangle \langle a_3| + |e_4\rangle \langle e_4|) \quad (26)$$ + +It is then easy to solve the above equations for the two projectors associated with the two eigenvalues. We obtain + +$$|e_1\rangle \langle e_1| + |e_2\rangle \langle e_2| = \frac{\lambda_2 I - M}{\lambda_2 - \lambda_1} \quad (27)$$ + +$$|e_3\rangle \langle a_3| + |e_4\rangle \langle e_4| = \frac{\lambda_1 I - M}{\lambda_1 - \lambda_2} \quad (28)$$ + +We can then write + +$$e^M = \frac{1}{\lambda_1 - \lambda_2} \left[ (\lambda_1 e^{\lambda_2} - \lambda_2 e^{\lambda_1}) I + (e^{\lambda_1} - e^{\lambda_2}) M \right] \quad (29)$$ + +We will need this result for the calculation of the unitary operator that defines the Foldy–Wouthuysen transformation, our next example. It is now clear that in the general case of degenerate eigenvalues, we can proceed similarly to the nondegenerate case, but solving $n < N$ equations. + +## 4. Examples + +Let us now see how the method works when applied to some well-known cases. Henceforth, we refer to the method as the Cayley–Hamilton (CH)-method, for short. Our aim is to show the simplicity of the required calculations, as compared with standard techniques. + +### 4.1. The Foldy–Wouthuysen Transformation + +The Foldy–Wouthuysen transformation is introduced [19] with the aim of decoupling the upper ($\varphi$) and lower ($\chi$) components of a bispinor $\psi = (\varphi, \chi)^T$ that solves the Dirac equation $i\hbar\partial\psi/\partial t = H\psi$, where $H = -i\hbar c\alpha \cdot \nabla + \beta mc^2$. Here, $\beta$ and $\alpha = (\alpha_x, \alpha_y, \alpha_z)$ are the 4 × 4 Dirac matrices: + +$$\beta = \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix}, \quad \alpha = \begin{pmatrix} 0 & \sigma \\ \sigma & 0 \end{pmatrix} \qquad (30)$$ + +The Foldy–Wouthuysen transformation is given by $\psi' = U\psi$, with [19] + +$$U = \exp\left(\frac{\theta}{2}\beta\alpha \cdot p\right) \qquad (31)$$ + +We can calculate $U$ by applying Equation (29) for $M = \theta\beta\alpha \cdot p/2 = (\theta|p|/2)\beta\alpha \cdot n$, where $n = p/|p|$. +The eigenvalues of the 4 × 4 matrix $\beta\alpha \cdot n$ are $\pm i$, each being two-fold degenerate. This follows from +noting that the matrices + +$$\beta\alpha \cdot n = \begin{pmatrix} 0 & \sigma \cdot n \\ -\sigma \cdot n & 0 \end{pmatrix} \quad \text{and} \quad \begin{pmatrix} 0 & 1 \\ -1 & 0 \end{pmatrix} \qquad (32)$$ +---PAGE_BREAK--- + +have the same eigenvalues. Indeed, because $(\sigma \cdot n)^2 = 1$, the above matrices share the characteristic equation $\lambda^2 + 1 = 0$. Their eigenvalues are thus $\pm i$. The eigenvalues of $M = \theta \beta \alpha \cdot p/2$ are then $\lambda_{1,2} = \pm i\theta |p|/2$. Replacing these values in Equation (29) we obtain + +$$ +\begin{align} +\exp\left(\frac{\theta}{2}\beta\alpha \cdot p\right) &= \frac{1}{i\theta|p|} \left[ \frac{i\theta|p|}{2} \left(e^{-i\theta|p|/2} + e^{i\theta|p|/2}\right) I + \left(e^{i\theta|p|/2} - e^{-i\theta|p|/2}\right) \frac{\theta|p|}{2} \beta\alpha \cdot n \right] \tag{33} \\ +&= \cos\left(|p|\theta/2\right) + \sin\left(|p|\theta/2\right) \beta\alpha \cdot \frac{p}{|p|} \tag{34} +\end{align} + $$ + +The standard way to get this result requires developing the exponential in a power series. Thereafter, one must exploit the commutation properties of $\alpha$ and $\beta$ in order to group together odd and even powers of $\theta$. This finally leads to the same closed-form expression that we have arrived at after some few steps. + +## 4.2. Lorentz-Type Equations of Motion + +The dynamics of several classical and quantum systems is ruled by equations that can be cast as differential equations for a three-vector $S$. These equations often contain terms of the form $\Omega \times$. An example of this is the ubiquitous equation + +$$ \frac{dS}{dt} = \Omega \times S \qquad (35) $$ + +Equation (35) and its variants have been recently addressed by Babusci, Dattoli and Sabia [20], who applied operational methods to deal with them. Instead of writing Equation (35) in matrix form, these authors chose to exploit the properties of the vector product by defining the operator $\hat{\Omega} := \Omega \times$. The solution for the case $\partial\Omega/\partial t = 0$, for instance, was obtained by expanding $\exp(t\hat{\Omega})$ as an infinite series and using the cyclical properties of the vector product in order to get $S(t)$ in closed form. This form is nothing but Equation (11) with the replacements $r' \rightarrow S(t)$, $r \rightarrow S(0)$ and $\theta \rightarrow \Omega t$, where $\Omega := |\Omega|$. We obtained Equation (11) without expanding the exponential and without using any cyclic properties. Our solution follows from writing Equation (35) in matrix form, i.e., + +$$ \frac{dS}{dt} = \Omega MS \qquad (36) $$ + +where $M$ is given by Equation (12) with $n = \Omega/\Omega$. The solution $S(t) = \exp(M\Omega t)S(0)$ is then easily written in closed form by applying the CH-method, as in Equation (11). The advantages of this method show up even more sharply when dealing with some extensions of Equation (36). Consider, e.g., the non-homogeneous version of Equation (35): + +$$ \frac{dS}{dt} = \Omega \times S + N = \Omega MS + N \qquad (37) $$ + +This is the form taken by the Lorentz equation of motion when the electromagnetic field is given by scalar and vector potentials reading $\Phi = -E \cdot r$ and $A = B \times r/2$, respectively [20]. The solution of Equation (37) is easily obtained by acting on both sides with the "integrating (operator-valued) factor" $\exp(-\Omega Mt)$. One then readily obtains, for the initial condition $S(0) = S_0$, + +$$ S(t) = e^{\Omega M t} S_0 + \int_0^t e^{\Omega M(t-s)} N ds \qquad (38) $$ + +The matrix exponentials in Equation (38) can be expressed in their eigenbasis, as in Equation (16). For a time-independent $N$, the integral in Equation (38) is then trivial. An equivalent solution is given in [20], but written in terms of the evolution operator $\hat{U}(t) = \exp(i\hat{\Omega}t)$ and its inverse. Inverse operators repeatedly appear within such a framework [20] and are often calculated with the help of +---PAGE_BREAK--- + +the Laplace transform identity: $\hat{\Lambda}^{-1} = \int_{0}^{\infty} \exp(-s\hat{\Lambda})ds$. Depending on $\hat{\Lambda}$, this could be not such a straightforward task as it might appear at first sight. Now, while vector notation gives us additional physical insight, vector calculus can rapidly turn into a messy business. Our strategy is therefore to avoid vector calculus and instead rely on the CH-method as much as possible. Only at the end we write down our results, if we wish, in terms of vector products and the like. That is, we use Equations (13)–(17) systematically, in particular Equation (16) when we need to handle $\exp(\theta M)$, e.g., within integrals. The simplification comes about from our working with the eigenbasis of $\exp(\theta M)$, i.e., with the eigenbasis of $M$. Writing down the final results in three-vector notation amounts to expressing these results in the basis in which $M$ was originally defined, cf. Equation (12). Let us denote this basis by $\{|x\rangle, |y\rangle, |z\rangle\}$. The eigenvectors $|n_{\pm}\rangle$ and $|n_0\rangle$ of $M$ are easily obtained from those of $X_3$, cf. Equation (12). The eigenvectors of $X_3$ are, in turn, analogous to those of Pauli's $\sigma_y$, namely $|\pm\rangle = (|x\rangle \mp i|y\rangle)/\sqrt{2}$, plus a third eigenvector that is orthogonal to the former ones, that is, $|0\rangle = |z\rangle$. In order to obtain the eigenvectors of $n \cdot X$, with $n = (\sin\theta \cos\phi, \sin\theta \sin\phi, \cos\theta)$, we apply the rotation $\exp(\phi X_3) \exp(\theta X_2)$ to the eigenvectors $|\pm\rangle$ and $|0\rangle$, thereby getting $|n_{\pm}\rangle$ and $|n_0\rangle$, respectively. All these calculations are easily performed using the CH-method. + +Once we have $|n_{\pm}\rangle$ and $|n_0\rangle$, we also have the transformation matrix $T$ that brings $M$ into diagonal form: $T^{-1}MT = M_D = \text{diag}(-i, 0, i)$. Indeed, $T$'s columns are just $|n_{-}\rangle$, $|n_0\rangle$ and $|n_+\rangle$. After we have carried out all calculations in the eigenbasis of $M$, by applying $T$ we can express the final result in the basis $\{|x\rangle, |y\rangle, |z\rangle\}$, thereby obtaining the desired expressions in three-vector notation. Let us illustrate this procedure by addressing the evolution equation + +$$ \frac{dS}{dt} = \Omega \times S + \lambda \Omega \times (\Omega \times S) \qquad (39) $$ + +In matrix form, such an equation reads + +$$ \frac{dS}{dt} = \Omega MS + \lambda (\Omega M)^2 S = [\Omega M + \lambda (\Omega M)^2]S \equiv AS \qquad (40) $$ + +The solution is given by $S(t) = \exp(\Lambda t)S_0$. The eigenbasis of $\Lambda$ is the same as that of $M$. We have thus + +$$ \exp(\Lambda t) = e^{(i\Omega - \lambda\Omega^2)t} |n_+\rangle\langle n_+| + e^{(-i\Omega - \lambda\Omega^2)t} |n_-\rangle\langle n_-| + |n_0\rangle\langle n_0| \qquad (41) $$ + +The projectors $|n_k\rangle\langle n_k|$ can be written in terms of the powers of $\Lambda$ by solving the system + +$$ I = |n_+\rangle\langle n_+| + |n_-\rangle\langle n_-| + |n_0\rangle\langle n_0| \qquad (42) $$ + +$$ A = (i\Omega - \lambda\Omega^2)|n_+\rangle\langle n_+| - (i\Omega + \lambda\Omega^2)|n_-\rangle\langle n_-| \qquad (43) $$ + +$$ A^2 = (i\Omega - \lambda\Omega^2)^2 |n_+\rangle\langle n_+| + (i\Omega + \lambda\Omega^2)^2 |n_-\rangle\langle n_-| \qquad (44) $$ + +Using $A = \Omega M + \lambda(\Omega M)^2$ and $A^2 = -2\Lambda\Omega^3 M + (1 - \lambda^2\Omega^2)(\Omega M)^2$, and replacing the solution of the system (42)–(44) in Equation (41) we get + +$$ \exp(\Lambda t) = I + e^{-\lambda\Omega^2 t} \sin(\Omega t) M + [1 - e^{-\lambda\Omega^2 t} \cos(\Omega t)] M^2 \qquad (45) $$ + +Finally, we can write the solution $S(t) = \exp(\Lambda t)S_0$ in the original basis $\{|x\rangle, |y\rangle, |z\rangle\}$, something that in this case amounts to writing $MS_0 = n \times S_0$ and $M^2S_0 = n(n \cdot S_0) - S_0$. Equation (39) was also addressed in [20], but making use of the operator method. The solution was given in terms of a series expansion for the evolution operator. In order to write this solution in closed form, it is necessary to introduce sin- and cos-like functions [20]. These functions are defined as infinite series involving two-variable Hermite polynomials. The final expression reads like Equation (11), but with sin and cos replaced by the aforementioned functions containing two-variable Hermite polynomials. Now, one can hardly unravel from such an expression the physical features that characterize the system's dynamics. +---PAGE_BREAK--- + +On the other hand, a solution given as in Equation (45) clearly shows such dynamics, in particular +the damping effect stemming from the $\lambda$-term in Equation (39), for $\lambda > 0$. Indeed, Equation (45) +clearly shows that the state vector $S(t) = \exp(\mathcal{A}t)S_0$ asymptotically aligns with $\Omega$ while performing a +damped Larmor precession about the latter. + +The case $\partial\Omega/\partial t \neq 0$ is more involved and generally requires resorting to Dyson-like series expansions, e.g., time-ordered exponential integrations. While this subject lies beyond the scope of the present work, it should be mentioned that the CH-method can be advantageously applied also in this context. For instance, time-ordered exponential integrations involving operators of the form $A + B(t)$ do require the evaluation of $\exp A$. Likewise, disentangling techniques make repeated use of matrix exponentials of single operators [21]. In all these cases, the CH-method offers a possible shortcut. + +**4.3. The Jaynes-Cummings Hamiltonian** + +We address now a system composed by a two-level atom and a quantized (monochromatic) +electromagnetic field. Under the dipole and the rotating-wave approximations, the Hamiltonian of +this system reads (in standard notation) + +$$ +H = \frac{\hbar}{2} \omega_0 \sigma_z + \hbar \omega a^\dagger a + \hbar g (a^\dagger \sigma_- + a \sigma_+) \quad (46) +$$ + +Let us denote the upper and lower states of the two-level atom by $|a\rangle$ and $|b\rangle$, respectively, and the Fock states of the photon-field by $|n\rangle$. The Hilbert space of the atom-field system is spanned by the basis $B = \{|a,n\rangle, |b,n\rangle, n=0,1,\dots\}$. The states $|a,n\rangle$ and $|b,n\rangle$ are eigenstates of the unperturbed Hamiltonian $H_0 = \hbar\omega_0\sigma_z/2 + \hbar\omega a^\dagger a$. The interaction Hamiltonian $V = \hbar g (a^\dagger\sigma_- + a\sigma_+)$ couples the states $|a,n\rangle$ and $|b,n+1\rangle$ alone. Hence, $H$ can be split into a sum: $H = \sum_n H_n$, with each $H_n$ acting on the subspace $\text{Span}\{|a,n\rangle, |b,n+1\rangle\}$. Within such a subspace, $H_n$ is represented by the 2 × 2 matrix + +$$ +H_n = \hbar\omega \left(n + \frac{1}{2}\right) I + \hbar \begin{pmatrix} \frac{\delta}{2} & g\sqrt{n+1} \\ g\sqrt{n+1} & -\frac{\delta}{2} \end{pmatrix} \quad (47) +$$ + +where $\delta = \omega_0 - \omega$. + +A standard way [22] to calculate the evolution operator $U = \exp(-iHt/\hbar)$ goes as follows. +One first writes the Hamiltonian in the form $H = H_1 + H_2$, with $H_1 = \hbar\omega(a^{\dagger}a + \sigma_{z}/2)$ and +$H_2 = \hbar\delta\sigma_{z}/2 + \hbar g(a^{\dagger}\sigma_{-} + a\sigma_{+})$. Because $[H_1, H_2] = 0$, the evolution operator can be factored as +$U = U_1U_2 = \exp(-iH_1t/\hbar)\exp(-iH_2t/\hbar)$. The first factor is diagonal in Span $B$. The second factor can +be expanded in a Taylor series. As it turns out, one can obtain closed-form expressions for the even and +the odd powers of the expansion. Thus, a closed-form for $U_2$ can be obtained as well. As can be seen, +this method depends on the realization that Equation (46) can be written in a special form, which renders +it possible to factorize $U$. + +Let us now calculate $U$ by the CH-method. We can exploit the fact that $H$ splits as $H = \sum_n H_n$, with $[H_n, H_m] = 0$, and write $U = \prod_n U_n = \prod_n \exp(-iH_n t / \hbar)$. Generally, a 2 × 2 Hamiltonian $H$ has eigenvalues of the form $E_\pm = \hbar(\lambda_0 \pm \lambda)$. We have thus + +$$ +I = |+\rangle\langle +| + |-\rangle\langle -| \tag{48} +$$ + +$$ +H/\hbar = (\lambda_0 + \lambda) |+\rangle\langle +| + (\lambda_0 - \lambda) |-\rangle\langle -| \quad (49) +$$ + +so that + +$$ +\begin{align} +\exp(-iHt/\hbar) &= \exp(-i\lambda_+ t) |+\rangle\langle +| + \exp(-i\lambda_- t) |-\rangle\langle -| \tag{50} \\ +&= \frac{e^{-i\lambda_0 t}}{\lambda} \left[ (i\lambda_0 \sin \lambda t + \lambda \cos \lambda t) I - i(\sin \lambda t) \frac{H}{\hbar} \right] \tag{51} +\end{align} +$$ +---PAGE_BREAK--- + +In our case, $H_n$ has eigenvalues $E_n^\pm = \hbar\omega(n+1/2) \pm \hbar\sqrt{\delta^2/4 + g^2(n+1)} \equiv \hbar\omega(n+1/2) \pm \hbar R_n$. Whence, + +$$ \exp(-iH_n t / \hbar) = \frac{e^{-i\omega(n+1/2)t}}{R_n} \left[ \left( i\omega\left(n+\frac{1}{2}\right)\sin(R_n t) + R_n \cos(R_n t) \right) I - i\sin(R_n t) \frac{H_n}{\hbar} \right] \quad (52) $$ + +Replacing $H_n$ from Equation (47) in the above expression we get + +$$ \exp(-iH_n t / \hbar) = e^{-i\omega(n+1/2)t} \left[ \cos(R_n t) I - \frac{i \sin(R_n t)}{2R_n} \begin{pmatrix} \delta & 2g\sqrt{n+1} \\ 2g\sqrt{n+1} & -\delta \end{pmatrix} \right] \quad (53) $$ + +This result enables a straightforward calculation of the evolved state $|\psi(t)\rangle$ out of a general initial state + +$$ |\psi(0)\rangle = \sum_n C_{a,n} |a, n\rangle + C_{b,n+1} |b, n+1\rangle \quad (54) $$ + +Equation (53) refers to a matrix representation in the two-dimensional subspace $\text{Span}\{|a, n\rangle, |b, n+1\rangle\}$. Let us focus on + +$$ \cos (R_n t) I = \begin{pmatrix} \cos (R_n t) & 0 \\ 0 & \cos (R_n t) \end{pmatrix} \qquad (55) $$ + +This matrix is a representation in subspace $\text{Span}\{|a, n\rangle, |b, n+1\rangle\}$ of the operator + +$$ \cos \left( t \sqrt{\hat{\varphi} + g^2} \right) |a\rangle\langle a| + \cos \left( t \sqrt{\hat{\varphi}} \right) |b\rangle\langle b| \quad (56) $$ + +where $\hat{\varphi} := g^2 a^\dagger a + \delta^2/4$. Proceeding similarly with the other operators that enter Equation (53) and observing that $\sin(R_n t) R_n^{-1}\sqrt{n+1} = \langle n | i \sin(t\sqrt{\hat{\varphi}+g^2}) (\sqrt{\hat{\varphi}+g^2})^{-1} | n+1 \rangle$, etc., we readily obtain + +$$ \exp(-iHt/\hbar) = e^{-i\omega(a^\dagger a + \frac{1}{2})t} \left( \cos(t\sqrt{\hat{\varphi} + g^2}) - \frac{i\delta\sin(t\sqrt{\hat{\varphi} + g^2})}{2\sqrt{\hat{\varphi} + g^2}} a - \frac{ig\sin(t\sqrt{\hat{\varphi}})a^\dagger}{\sqrt{\hat{\varphi}}} \cos(t\sqrt{\hat{\varphi}}) + \frac{i\delta\sin(t\sqrt{\hat{\varphi}})}{2\sqrt{\hat{\varphi}}} a^\dagger \right) \quad (57) $$ + +where the 2 × 2 matrix refers now to the atomic subspace $\text{Span}\{|a\rangle, |b\rangle\}$. One can see that the CH-method reduces the amount of calculational effort invested to get Equation (53), as compared with other approaches [22]. + +### 4.4. Bispinors and Lorentz Transformations + +As a further application, let us consider the representation of Lorentz transformations in the space of bispinors. In coordinate space, Lorentz transformations are given by $\tilde{x}^\mu = A_\nu^\mu x^\nu$ (Greek indices run from 0 to 3), with the $A_\nu^\mu$ satisfying $A_\nu^\mu A_\sigma^\nu = \eta^{\mu\tau}$. Here, $\eta^{\mu\nu}$ represents the metric tensor of Minkowsky space ($\eta^{00} = -\eta^{11} = -\eta^{22} = \eta^{33} = 1$, $\eta^{\mu\nu} = 0$ for $\mu \neq \nu$). A bispinor $\psi(x)$ transforms according to [19] + +$$ \tilde{\psi}(\tilde{x}) = \tilde{\psi}(\Lambda x) = S(\Lambda)\psi(x) \quad (58) $$ + +with + +$$ S(\Lambda) = \exp B \quad (59) $$ + +$$ B = -\frac{1}{4} V^{\mu\nu} \gamma_{\mu} \gamma_{\nu} \quad (60) $$ +---PAGE_BREAK--- + +The $V^{\mu\nu} = -V^{\nu\mu}$ are the components of an antisymmetric tensor, which has thus six independent +components, corresponding to the six parameters defining a Lorentz transformation. The quantities +$\gamma_{\mu} = \eta_{\mu\nu}\gamma^{\nu}$ satisfy $\gamma^{\mu}\gamma^{\nu} + \gamma^{\nu}\gamma^{\mu} = 2\eta^{\mu\nu}$. The quantities $\gamma_{\mu}\gamma_{\nu}$ are the generators of the Lorentz group. +$S(\Lambda)$ is not a unitary transformation, but satisfies + +$$ +S^{-1} = \gamma_0 S^\dagger \gamma_0 \tag{61} +$$ + +For the following, it will be advantageous to define + +$$ +p_i = \gamma_0 \gamma_i, \quad i = 1, 2, 3 \tag{62} +$$ + +$$ +q_1 = \gamma_2 \gamma_3, q_2 = \gamma_3 \gamma_1, q_3 = \gamma_1 \gamma_2 \qquad (63) +$$ + +We call the $p_i$ Pauli generators and the $q_i$ quaternion generators. The pseudoscalar $\gamma_5 := \gamma_0\gamma_1\gamma_2\gamma_3$ satisfies $\gamma_5^2 = -1$, $\gamma_5\gamma_\mu = -\gamma_\mu\gamma_5$, so that it commutes with each generator of the Lorentz group: + +$$ +\gamma_5 (\gamma_{\mu} \gamma_{\nu}) = (\gamma_{\mu} \gamma_{\nu}) \gamma_5 \qquad (64) +$$ + +This means that quantities of the form $a + \beta\gamma_5$ ($a, \beta \in \mathbb{R}$) behave like complex numbers upon multiplication with $p_i$ and $q_i$. We denote the subspace spanned by such quantities as the complex-like subspace $C_i$ and set $i \equiv \gamma_5$. Noting that $\mathbf{i} p_i = q_i$ and $\mathbf{i} q_i = -p_i$, the following multiplication rules are easily derived: + +$$ +q_i q_j = \epsilon_{ijk} q_k - \delta_{ij} \quad (65) +$$ + +$$ +p_i p_j = -\epsilon_{ijk} q_k + \delta_{ij} = -q_i q_j = -i \epsilon_{ijk} p_k + \delta_{ij} \quad (66) +$$ + +$$ +p_i q_j = \epsilon_{ijk} p_k + i\delta_{ij} = i(-\epsilon_{ijk} q_k + \delta_{ij}) \quad (67) +$$ + +The following commutators can then be straightforwardly obtained: + +$$ +[q_i, q_j] = 2\epsilon_{ijk}q_k \tag{68} +$$ + +$$ +[p_i, p_j] = -2\epsilon_{ijk}q_k = -2ie_{ijk}p_k \tag{69} +$$ + +$$ +[p_i, q_j] = 2\epsilon_{ijk}p_k \tag{70} +$$ + +They make clear why we dubbed the $p_i$ as Pauli generators. Noting that they furthermore satisfy + +$$ +p_i p_j + p_j p_i = 2\delta_{ij} \tag{71} +$$ + +we see the correspondence $\mathbf{i} \rightarrow i$, $p_k \rightarrow -\sigma_k$, with $\mathbf{i}$ being the imaginary unit and $\sigma_k$ the Pauli matrices. These matrices, as is well-known, satisfy $[\sigma_i, \sigma_j] = 2ie_{ijk}\sigma_k$ and the anticommutation relations $\sigma_i\sigma_j + \sigma_j\sigma_i = 2\delta_{ij}$, which follow from $\sigma_i\sigma_j = i\epsilon_{ijk}\sigma_k + \delta_{ij}$. + +We can write now $S(\Lambda) = \exp(-\frac{1}{4}V^{\mu\nu}\gamma_{\mu}\gamma_{\nu})$ in terms of $p_i$ and $q_i$: + +$$ +B = \sum_{i=1}^{3} (\alpha^i p_i + \beta^i q_i) \tag{72} +$$ + +Here, we have set $\alpha^i = -V^{0i}/4$ and $\beta^k \epsilon_{ijk} = -V^{ij}/4$. We can write $B$ in terms of the Pauli-generators +alone: + +$$ +B = \sum_{i=1}^{3} (\alpha^i + i\beta^i) p_i = \sum_{i=1}^{3} z^i p_i \quad (73) +$$ +---PAGE_BREAK--- + +Considering the isomorphism $p_k \leftrightarrow -\sigma_k$, we could derive the expression for $S(\Lambda) = \exp B$ by splitting the series expansion into even and odd powers of $B$, and noting that + +$$B^2 = (\alpha^2 - \beta^2) + (2\alpha \cdot \beta) \quad i \equiv z^2 \in C_i \qquad (74)$$ + +where $\alpha^2 \equiv \alpha \cdot \alpha, \beta^2 \equiv \beta \cdot \beta$, and $\alpha \cdot \beta \equiv \sum_{i=1}^{3} \alpha^i \beta^i$. We have then that $B^3 = z^2 B, B^4 = z^4, B^5 = z^4 B, ...$. This allows us to write + +$$\exp(B) = 1 + B + \frac{z^2}{2!} + \frac{z^2}{3!}B + \frac{z^4}{4!} + \frac{z^4}{5!}B + \dots = \\ \left(1 + \sum_{n=1}^{\infty} \frac{z^{2n}}{(2n)!}\right) + B\left(1 + \frac{z^2}{3!} + \frac{z^4}{5!} + \dots\right) = \cosh z + \frac{\sinh z}{z} B \quad (75)$$ + +As in the previous examples, also in this case the above result can be obtained more directly by noting that $B = \sum_{i=1}^{3} (\alpha^i + i\beta^i) p_i \leftrightarrow -\sum_{i=1}^{3} (\alpha^i + i\beta^i) \sigma_i$. This suggests that we consider $\exp(-f \cdot \sigma)$, with $f = \alpha + i\beta \in \mathbb{C}$. The matrix $f \cdot \sigma$ has the (complex) eigenvalues + +$$\lambda_{\pm} = \pm\sqrt{\alpha^2 - \beta^2 + 2i\alpha \cdot \beta} \equiv \pm z \qquad (76)$$ + +Writing $|f_{\pm}\rangle$ for the corresponding eigenvectors, i.e., $f \cdot \sigma |f_{\pm}\rangle = \lambda_{\pm} |f_{\pm}\rangle$, we have that + +$$I = |f_+\rangle\langle f_+| + |f_-\rangle\langle f_-| \qquad (77)$$ + +$$f \cdot \sigma = \lambda_+ |f_+\rangle \langle f_+| + \lambda_- |f_-\rangle \langle f_-| \qquad (78)$$ + +Solving for $|f_{\pm}\rangle\langle f_{\pm}|$, we get + +$$|f_{\pm}\rangle\langle f_{\pm}| = \frac{zI \pm f \cdot \sigma}{2z} \qquad (79)$$ + +We apply now the general decomposition $\exp A = \sum_n \exp a_n |a_n\rangle\langle a_n|$ to the case $A = -f \cdot \sigma$. The operator $\exp(-f \cdot \sigma)$ has eigenvectors $|f_{\pm}\rangle$ and eigenvalues $\exp(\mp z)$. Thus, + +$$\begin{align} +\exp(-f \cdot \sigma) &= e^{-z} |f_+\rangle \langle f_+| + e^{z} |f_-\rangle \langle f_-| & (80) \\ +&= \frac{e^{-z}}{2z} (zI + f \cdot \sigma) + \frac{e^{z}}{2z} (zI - f \cdot \sigma) & (81) \\ +&= \left(\frac{e^{z} + e^{-z}}{2}\right) I - \left(\frac{e^{z} - e^{-z}}{2z}\right) f \cdot \sigma & (82) \\ +&= \cosh z - \frac{\sinh z}{z} f \cdot \sigma & (83) +\end{align}$$ + +which is equivalent to Equation (75) via the correspondence $\cosh(z) + \sinh(z)B/z \leftrightarrow \cosh(z) - i\epsilon(z)f \cdot z$. We have thus obtained closed-form expressions for $\exp(-f \cdot \sigma)$, with $f = \alpha + i\beta \in C^3$, i.e., for the elements of SL(2, C), the universal covering group of the Lorentz group. It is interesting to note that the elements of SL(2, C) are related to those of SU(2) by extending the parameters $\alpha$ entering $\exp(i\alpha n) \in SU(2)$ from the real to the complex domain: $i\alpha \rightarrow \alpha + i\beta$. Standard calculations that are carried out with SU(2) elements can be carried out similarly with SL(2, C) elements [15]. A possible realization of SU(2) transformations occurs in optics, by acting on the polarization of light with the help of birefringent elements (waveplates). If we also employ dichroic elements like polarizers, which absorb part of the light, then it is possible to implement SL(2, C) transformations as well. In this way, one can simulate Lorentz transformations in the optical laboratory [23]. The above formalism is of great help for designing the corresponding experimental setup. +---PAGE_BREAK--- + +**5. Conclusions** + +The method presented in this paper—referred to as the Cayley–Hamilton method—proves advantageous for calculating closed-form expressions of analytic functions $f(A)$ of an $n \times n$ matrix $A$, in particular matrix exponentials. The matrix $A$ is assumed to be a diagonalizable one, even though only its eigenvalues are needed, not its eigenvectors. We have recovered some known results from classical and quantum mechanics, including Lorentz transformations, by performing the straightforward calculations that the method prescribes. In most cases, the problem at hand was reshaped so as to solve it by dealing with two-by-two matrices only. + +**Acknowledgments:** The author gratefully acknowledges the Research Directorate of the Pontificia Universidad Católica del Perú (DGI-PUCP) for financial support under Grant No. 2014-0064. + +**Conflicts of Interest:** The author declares no conflict of interest. + +**References** + +1. Gantmacher, F.R. *The Theory of Matrices*; Chelsea Publishing Company: New York, NY, USA, 1960; p. 83. + +2. Dattoli, G.; Mari, C.; Torre, A. A simplified version of the Cayley-Hamilton theorem and exponential forms of the $2 \times 2$ and $3 \times 3$ matrices. *Il Nuovo Cimento* **1998**, *180*, 61–68. + +3. Cohen-Tannoudji, C.; Diu, B.; Laloë, F. *Quantum Mechanics*; John Wiley & Sons: New York, NY, USA, 1977; pp. 983–989. + +4. Sakurai, J.J. *Modern Quantum Mechanics*; Addison-Wesley: New York, NY, USA, 1980; pp. 163–168. + +5. Greiner, W.; Müller, B. *Quantum Mechanics, Symmetries*; Springer: New York, NY, USA, 1989; p. 68. + +6. Weigert, S. Baker-Campbell-Hausdorff relation for special unitary groups SU(N). *J. Phys. A* **1997**, *30*, 8739–8749. + +7. Dattoli, G.; Ottaviani, P.L.; Torre, A.; Vásquez, L. Evolution operator equations: Integration with algebraic and finite-difference methods. Applications to physical problems in classical and quantum mechanics and quantum field theory. *Riv. Nuovo Cimento* **1997**, *20*, 1–133. + +8. Dattoli, G.; Zhukovsky, K. Quark flavour mixing and the exponential form of the Kobayashi–Maskawa matrix. *Eur. Phys. J. C* **2007**, *50*, 817–821. + +9. Leonard, I. The matrix exponential. *SIAM Rev.* **1996**, *38*, 507–512. + +10. Untidt, T.S.; Nielsen, N.C. Closed solution to the Baker-Campbell-Hausdorff problem: Exact effective Hamiltonian theory for analysis of nuclear-magnetic-resonance experiments. *Phys. Rev. E* **2002**, *65*, doi:10.1103/PhysRevE.65.021108. + +11. Moore, G. Orthogonal polynomial expansions for the matrix exponential. *Linear Algebra Appl.* **2011**, *435*, 537–559. + +12. Ding, F. Computation of matrix exponentials of special matrices. *Appl. Math. Comput.* **2013**, *223*, 311–326. + +13. Koch, C.T.; Spence, J.C.H. A useful expansion of the exponential of the sum of two non-commuting matrices, one of which is diagonal. *J. Phys. A Math. Gen.* **2003**, *36*, 803–816. + +14. Ramakrishna, V.; Zhou, H. On the exponential of matrices in $su(4)$. *J. Phys. A Math. Gen.* **2006**, *39*, 3021–3034. + +15. Tudor, T. On the single-exponential closed form of the product of two exponential operators. *J. Phys. A Math. Theor.* **2007**, *40*, 14803–14810. + +16. Siminovitch, D.; Untidt, T.S.; Nielsen, N.C. Exact effective Hamiltonian theory. II. Polynomial expansion of matrix functions and entangled unitary exponential operators. *J. Chem. Phys.* **2004**, *120*, 51–66. + +17. Goldstein, H. *Classical Mechanics*, 2nd ed.; Addison-Wesley: New York, NY, USA, 1980; pp. 164–174. + +18. Press, W.H.; Teukolsky, S.A.; Vetterling, W.T.; Flannery, B.P. *Numerical Recipees in FORTRAN, The Art of Scientific Computing*, 2nd ed.; Cambridge University Press: Cambridge, UK, 1992; pp. 83–84. + +19. Bjorken, J.D.; Drell, S.D. *Relativistic Quantum Mechanics*; McGraw-Hill: New York, NY, USA, 1965. + +20. Babusci, D.; Dattoli, G.; Sabia, E. Operational methods and Lorentz-type equations of motion. *J. Phys. Math.* **2011**, *3*, 1–17. + +21. Puri, R.R. *Mathematical Methods of Quantum Optics*; Springer: New York, NY, USA, 2001; pp. 8–53. +---PAGE_BREAK--- + +22. Meystre, P.; Sargent, M. *Elements of Quantum Optics*, 2nd ed.; Springer: Berlin, Germany, 1999, pp. 372–373. + +23. Kim, Y.S.; Noz, M.E. Symmetries shared by the Poincaré group and the Poincaré sphere. *Symmetry* **2013**, *5*, 233–252. + +© 2014 by the author. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +# Invisibility and *PT* Symmetry: A Simple Geometrical Viewpoint + +Luis L. Sánchez-Soto * and Juan J. Monzón + +Departamento de Óptica, Facultad de Física, Universidad Complutense, 28040 Madrid, Spain; E-Mail: jjmonzon@opt.ucm.es + +* E-Mail: lsanchez@fis.ucm.es; Tel.: +34-91-3944-680; Fax: +34-91-3944-683. + +Received: 24 February 2014; in revised form: 12 May 2014 / Accepted: 14 May 2014 / +Published: 22 May 2014 + +**Abstract:** We give a simplified account of the properties of the transfer matrix for a complex one-dimensional potential, paying special attention to the particular instance of unidirectional invisibility. In appropriate variables, invisible potentials appear as performing null rotations, which lead to the helicity-gauge symmetry of massless particles. In hyperbolic geometry, this can be interpreted, via Möbius transformations, as parallel displacements, a geometric action that has no Euclidean analogy. + +**Keywords:** *PT* symmetry; SL(2, C); Lorentz group; Hyperbolic geometry + +## 1. Introduction + +The work of Bender and coworkers [1–6] has triggered considerable efforts to understand complex potentials that have neither parity (*P*) nor time-reversal symmetry (*T*), yet they retain combined *PT* invariance. These systems can exhibit real energy eigenvalues, thus suggesting a plausible generalization of quantum mechanics. This speculative concept has motivated an ongoing debate in several forefronts [7,8]. + +Quite recently, the prospect of realizing *PT*-symmetric potentials within the framework of optics has been put forward [9,10] and experimentally tested [11]. The complex refractive index takes on here the role of the potential, so they can be realized through a judicious inclusion of index guiding and gain/loss regions. These *PT*-synthetic materials can exhibit several intriguing features [12–14], one of which will be the main interest of this paper, namely, unidirectional invisibility [15–17]. + +In all these matters, the time-honored transfer-matrix method is particularly germane [18]. However, a quick look at the literature immediately reveals the different backgrounds and habits in which the transfer matrix is used and the very little cross talk between them. + +To remedy this flaw, we have been capitalizing on a number of geometrical concepts to gain further insights into the behavior of one-dimensional scattering [19–26]. Indeed, when one think in a unifying mathematical scenario, geometry immediately comes to mind. Here, we keep going this program and examine the action of the transfer matrices associated to invisible scatterers. Interestingly enough, when viewed in SO(1, 3), they turn to be nothing but parabolic Lorentz transformations, also called null rotations, which play a crucial role in the determination of the little group of massless particles. Furthermore, borrowing elementary techniques of hyperbolic geometry, we reinterpret these matrices as parallel displacements, which are motions without Euclidean counterpart. + +We stress that our formulation does not offer any inherent advantage in terms of efficiency in solving practical problems; rather, it furnishes a general and unifying setting to analyze the transfer matrix for complex potentials, which, in our opinion, is more than a curiosity. +---PAGE_BREAK--- + +## 2. Basic Concepts on Transfer Matrix + +To be as self-contained as possible, we first briefly review some basic facts on the quantum scattering of a particle of mass $m$ by a local complex potential $V(x)$ defined on the real line $\mathbb{R}$ [27–34]. Although much of the renewed interest in this topic has been fuelled by the remarkable case of *PT* symmetry, we do not use this extra assumption in this Section. + +The problem at hand is governed by the time-independent Schrödinger equation + +$$H\Psi(x) = \left[-\frac{d^2}{dx^2} + U(x)\right] \Psi(x) = \varepsilon \Psi(x) \quad (1)$$ + +where $\varepsilon = 2mE/\hbar^2$ and $U(x) = 2mV(x)/\hbar^2$, $E$ being the energy of the particle. We assume that $U(x) \to 0$ fast enough as $x \to \pm\infty$, although the treatment can be adapted, with minor modifications, to cope with potentials for which the limits $U_{\pm} = \lim_{x\to\pm\infty} U(x)$ are different. + +Since $U(x)$ decays rapidly as $|x| \to \infty$, solutions of (1) have the asymptotic behavior + +$$\Psi(x) = \begin{cases} A_+ e^{+ikx} + A_- e^{-ikx} & x \to -\infty \\ B_+ e^{+ikx} + B_- e^{-ikx} & x \to \infty \end{cases} \quad (2)$$ + +Here, $k^2 = \varepsilon$, $A_\pm$ and $B_\pm$ are $k$-dependent complex coefficients (unspecified, at this stage), and the subscripts $+$ and $-$ distinguish right-moving modes $\exp(+ikx)$ from left-moving modes $\exp(-ikx)$, respectively. + +The problem requires to work out the exact solution of (1) and invoke the appropriate boundary conditions, involving not only the continuity of $\Psi(x)$ itself, but also of its derivative. In this way, one has two linear relations among the coefficients $A_\pm$ and $B_\pm$, which can be solved for any amplitude pair in terms of the other two; the result can be expressed as a matrix equation that translates the linearity of the problem. Frequently, it is more advantageous to specify a linear relation between the wave amplitudes on both sides of the scatterer, namely, + +$$\begin{pmatrix} B_+ \\ B_- \end{pmatrix} = \mathbf{M} \begin{pmatrix} A_+ \\ A_- \end{pmatrix} \quad (3)$$ + +$\mathbf{M}$ is the transfer matrix, which depends in a complicated way on the potential $U(x)$. Yet one can extract a good deal of information without explicitly calculating it: let us apply (3) successively to a right-moving [( $A_+ = 1, B_- = 0$ )] and to a left-moving wave [( $A_+ = 0, B_- = 1$ )], both of unit amplitude. The result can be displayed as + +$$\begin{pmatrix} T^\ell \\ 0 \end{pmatrix} = \mathbf{M} \begin{pmatrix} 1 \\ R^\ell \end{pmatrix}, \quad \begin{pmatrix} R^r \\ 1 \end{pmatrix} = \mathbf{M} \begin{pmatrix} 0 \\ T^r \end{pmatrix} \quad (4)$$ + +where $T^{\ell,r}$ and $R^{\ell,r}$ are the transmission and reflection coefficients for a wave incoming at the potential from the left and from the right, respectively, defined in the standard way as the quotients of the pertinent fluxes [35]. + +With this in mind, Equation (4) can be thought of as a linear superposition of the two independent solutions + +$$\Psi_k^\ell(x) = \begin{cases} e^{+ikx} + R^\ell(k)e^{-ikx} & x \to -\infty, \\ T^\ell(k)e^{+ikx} & x \to \infty, \end{cases}, \quad \Psi_k^r(x) = \begin{cases} T^r(k)e^{-ikx} & x \to -\infty, \\ e^{-ikx} + R^r(k)e^{+ikx} & x \to \infty \end{cases} \quad (5)$$ + +which is consistent with the fact that, since $\varepsilon > 0$, the spectrum of the Hamiltonian (1) is continuous and there are two linearly independent solutions for a given value of $\varepsilon$. The wave function $\Psi_k^\ell(x)$ represents a wave incident from $-\infty [\exp(+ikx)]$ and the interaction with the potential produces a +---PAGE_BREAK--- + +reflected wave [$R^{\ell}(k) \exp(-ikx)$] that escapes to $-\infty$ and a transmitted wave [$T^{\ell}(k) \exp(+ikx)$] that moves off to $+\infty$. The solution $\Psi_k^{\ell}(x)$ can be interpreted in a similar fashion. + +Because of the Wronskian of the solutions (5) is independent of $x$, we can compute $W(\Psi_k^{\ell}, \Psi_k^r) = \Psi_k^{\ell}\Psi_k^{r'} - \Psi_k^{r'}\Psi_k^{\ell}$ first for $x \to -\infty$ and then for $x \to \infty$; this gives + +$$ \frac{i}{2k} W(\Psi_k^\ell, \Psi_k^r) = T^\ell(k) = T^r(k) \quad (6) $$ + +We thus arrive at the important conclusion that, irrespective of the potential, the transmission coefficient is always independent of the input direction. + +Taking this constraint into account, we go back to the system (4) and write the solution for **M** as + +$$ M_{11}(k) = T(k) - \frac{R^{\ell}(k) R^r(k)}{T(k)}, \quad M_{12}(k) = \frac{R^r(k)}{T(k)}, \quad M_{21}(k) = -\frac{R^{\ell}(k)}{T(k)}, \quad M_{22}(k) = \frac{1}{T(k)} \quad (7) $$ + +A straightforward check shows that $\det \mathbf{M} = +1$, so $\mathbf{M} \in \text{SL}(2, \mathbb{C})$; a result that can be drawn from a number of alternative and more elaborate arguments [36]. + +One could also relate outgoing amplitudes to the incoming ones (as they are often the magnitudes one can externally control): this is precisely the scattering matrix, which can be concisely formulated as + +$$ \begin{pmatrix} B_+ \\ A_- \end{pmatrix} = S \begin{pmatrix} A_+ \\ B_- \end{pmatrix} \qquad (8) $$ + +with matrix elements + +$$ S_{11}(k) = T(k), \quad S_{12}(k) = R^r(k), \quad S_{21}(k) = R^{\ell}(k), \quad S_{22}(k) = T(k) \quad (9) $$ + +Finally, we stress that transfer matrices are very convenient mathematical objects. Suppose that $V_1$ and $V_2$ are potentials with finite support, vanishing outside a pair of adjacent intervals $I_1$ and $I_2$. If $\mathbf{M}_1$ and $\mathbf{M}_2$ are the corresponding transfer matrices, the total system (with support $I_1 \cup I_2$) is described by + +$$ \mathbf{M} = \mathbf{M}_1 \mathbf{M}_2 \qquad (10) $$ + +This property is rather helpful: we can connect simple scatterers to create an intricate potential landscape and determine its transfer matrix by simple multiplication. This is a common instance in optics, where one routinely has to treat multilayer stacks. However, this important property does not seem to carry over into the scattering matrix in any simple way [37,38], because the incoming amplitudes for the overall system cannot be obtained in terms of the incoming amplitudes for every subsystem. + +### 3. Spectral Singularities + +The scattering solutions (5) constitute quite an intuitive way to attack the problem and they are widely employed in physical applications. Nevertheless, it is sometimes advantageous to look at the fundamental solutions of (1) in terms of left- and right-moving modes, as we have already used in (2). + +Indeed, the two independent solutions of (1) can be formally written down as [39] + +$$ \Psi_k^{(+)}(x) = e^{+ikx} + \int_x^\infty K_+(x,x')e^{+ikx'}dx' \qquad (11) $$ + +$$ \Psi_k^{(-)}(x) = e^{-ikx} + \int_{-\infty}^{x} K_-(x,x')e^{-ikx'}dx' $$ +---PAGE_BREAK--- + +The kernels $K_{\pm}(x, x')$ enjoy a number of interesting properties. What matters for our purposes is that the resulting $\Psi_k^{(\pm)}(x)$ are analytic with respect to $k$ in $C_+ = \{z \in C | \operatorname{Im} z > 0\}$ and continuous on the real axis. In addition, it is clear that + +$$ \Psi_k^{(+)}(x) = e^{+ikx} \quad x \to \infty, \qquad \Psi_k^{(-)}(x) = e^{-ikx} \quad x \to -\infty \tag{12} $$ + +that is, they are the Jost functions for this problem [31]. + +Let us look at the Wronskian of the Jost functions $W(\Psi_k^{(-)}, \Psi_k^{(+)})$, which, as a function of $k$, is analytical in $C_+$. A spectral singularity is a point $k_* \in \mathbb{R}_+$ of the continuous spectrum of the Hamiltonian (1) such that + +$$ W(\Psi_{k^*}^{(-)}, \Psi_{k^*}^{(+)}) = 0 \tag{13} $$ + +so $\Psi_k^{(\pm)}(x)$ become linearly dependent at $k_*$ and the Hamiltonian is not diagonalizable. In fact, the set of zeros of the Wronskian is bounded, has at most a countable number of elements and its limit points can lie in a bounded subinterval of the real axis [40]. There is an extensive theory of spectral singularities for (1) that was started by Naimark [41]; the interested reader is referred to, e.g., Refs. [42–46] for further details. + +The asymptotic behavior of $\Psi_k^{\pm}(x)$ at the opposite extremes of $\mathbb{R}$ with respect to those in (12) can be easily worked out by a simple application of the transfer matrix (and its inverse); viz, + +$$ \begin{align} \Psi_k^{(-)}(x) &= M_{12}e^{+ikx} + M_{22}e^{-ikx} && x \to \infty \\ \Psi_k^{(+)}(x) &= M_{22}e^{+ikx} - M_{12}e^{-ikx} && x \to -\infty \end{align} \tag{14} $$ + +Using $\Psi_k^{\pm}(x)$ in (12) and (14), we can calculate + +$$ \frac{i}{2k} W(\Psi_k^{(-)}, \Psi_k^{(+)}) = M_{22}(k) \tag{15} $$ + +Upon comparing with the definition (13), we can reinterpret the spectral singularities as the real zeros of $M_{22}(k)$ and, as a result, the reflection and transmission coefficients diverge therein. The converse holds because $M_{12}(k)$ and $M_{21}(k)$ are entire functions, lacking singularities. This means that, in an optical scenario, spectral singularities correspond to lasing thresholds [47–49]. + +One could also consider the more general case that the Hamiltonian (1) has, in addition to a continuous spectrum corresponding to $k \in \mathbb{R}_+$, a possibly complex discrete spectrum. The latter corresponds to the square-integrable solutions of that represent bound states. They are also zeros of $M_{22}(k)$, but unlike the zeros associated with the spectral singularities these must have a positive imaginary part [36]. + +The eigenvalues of S are + +$$ s_{\pm} = \frac{1}{M_{22}(k)} \left[ 1 \pm \sqrt{1 - M_{11}(k)M_{22}(k)} \right] \tag{16} $$ + +At a spectral singularity, $s_+$ diverges, while $s_- \to M_{11}(k)/2$, which suggests identifying spectral singularities with resonances with a vanishing width. + +**4. Invisibility and PT Symmetry** + +As heralded in the Introduction, unidirectional invisibility has been lately predicted in *PT* materials. We shall elaborate on the ideas developed by Mostafazadeh [50] in order to shed light into this intriguing question. + +The potential $U(x)$ is called reflectionless from the left (right), if $R^\ell(k) = 0$ and $R^r(k) \neq 0$ [$R^r(k) = 0$ and $R^\ell(k) \neq 0$]. From the explicit matrix elements in (7) and (9), we see that unidirectional +---PAGE_BREAK--- + +reflectionlessness implies the non-diagonalizability of both **M** and **S**. Therefore, the parameters of the potential for which it becomes reflectionless correspond to exceptional points of **M** and **S** [51,52]. + +The potential is called invisible from the left (right), if it is reflectionless from left (right) and in addition $T(k) = 1$. We can easily express the conditions for the unidirectional invisibility as + +$$ +\begin{align} +M_{12}(k) & \neq 0, & M_{11}(k) &= M_{22}(k) = 1 && \text{(left invisible)} \\ +M_{21}(k) & \neq 0, & M_{11}(k) &= M_{22}(k) = 1 && \text{(right invisible)} +\end{align} +\tag{17} $$ + +Next, we scrutinize the role of $\mathcal{PT}$-symmetry in the invisibility. For that purpose, we first briefly recall that the parity transformation “reflects” the system with respect to the coordinate origin, so that $x \mapsto -x$ and the momentum $p \mapsto -p$. The action on the wave function is + +$$ \Psi(x) \mapsto (\mathcal{P}\Psi)(x) = \Psi(-x) \tag{18} $$ + +On the other hand, the time reversal inverts the sense of time evolution, so that $x \mapsto x$, $p \mapsto -p$ and $i \mapsto -i$. This means that the operator $\mathcal{T}$ implementing such a transformation is antiunitary and its action reads + +$$ \Psi(x) \mapsto (\mathcal{T}\Psi)(x) = \Psi^*(x) \tag{19} $$ + +Consequently, under a combined $\mathcal{PT}$ transformation, we have + +$$ \Psi(x) \mapsto (\mathcal{PT}\Psi)(x) = \Psi^*(-x) \tag{20} $$ + +Let us apply this to a general complex scattering potential. The transfer matrix of the $\mathcal{PT}$-transformed system, we denote by $\mathbf{M}^{(\mathcal{PT})}$, fulfils + +$$ \begin{pmatrix} A_+^* \\ A_-^* \end{pmatrix} = \mathbf{M}^{(\mathcal{PT})} \begin{pmatrix} B_+^* \\ B_-^* \end{pmatrix} \tag{21} $$ + +Comparing with (3), we come to the result + +$$ \mathbf{M}^{(\mathcal{PT})} = (\mathbf{M}^{-1})^* \tag{22} $$ + +and, because $\det \mathbf{M} = 1$, this means + +$$ M_{11} \stackrel{\mathcal{PT}}{\longmapsto} M_{22}^*, \quad M_{12} \stackrel{\mathcal{PT}}{\longmapsto} -M_{12}^*, \quad M_{21} \stackrel{\mathcal{PT}}{\longmapsto} -M_{21}^*, \quad M_{22} \stackrel{\mathcal{PT}}{\longmapsto} M_{11}^* \tag{23} $$ + +When the system is invariant under this transformation [$\mathbf{M}^{(\mathcal{PT})} = \mathbf{M}$], it must hold + +$$ \mathbf{M}^{-1} = \mathbf{M}^* \tag{24} $$ + +a fact already noticed by Longhi [48] and that can be also recast as [53] + +$$ \mathrm{Re}\left(\frac{R^\ell}{T}\right) = \mathrm{Re}\left(\frac{R^r}{T}\right) = 0 \tag{25} $$ + +This can be equivalently restated in the form + +$$ \rho^\ell - \tau = \pm\pi/2, \quad \rho^r - \tau = \pm\pi/2 \tag{26} $$ + +with $\tau = \arg(T)$ and $\rho_{\ell,r} = \arg(R_{\ell,r})$. Hence, if we look at the complex numbers $R^\ell$, $R^r$, and $T$ as phasors, Equation (26) tell us that $R^\ell$ and $R^r$ are always collinear, while $T$ is simultaneously +---PAGE_BREAK--- + +perpendicular to them. We draw the attention to the fact that the same expressions have been derived for lossless symmetric beam splitters [54]: we have shown that they hold true for any *PT*-symmetric structure. + +A direct consequence of (23) is that there are particular instances of *PT*-invariant systems that are invisible, although not every invisible potential is *PT* invariant. In this respect, it is worth stressing, that even (*P*-symmetric) potentials do not support unidirectional invisibility and the same holds for real (*T*-symmetric) potentials. + +In optics, beam propagation is governed by the paraxial wave equation, which is equivalent to a Schrödinger-like equation, with the role of the potential played here by the refractive index. Therefore, a necessary condition for a complex refractive index to be *PT* invariant is that its real part is an even function of $x$, while the imaginary component (loss and gain profile) is odd. + +**5. Relativistic Variables** + +To move ahead, let us construct the Hermitian matrices + +$$ \mathbf{X} = \begin{pmatrix} X_+ \\ X_- \end{pmatrix} \otimes \begin{pmatrix} X_+^* & X_-^* \end{pmatrix} = \begin{pmatrix} |X_+|^2 & X_+ X_-^* \\ X_+^* X_- & |X_-|^2 \end{pmatrix} \quad (27) $$ + +where $X_{\pm}$ refers to either $A_{\pm}$ or $B_{\pm}$; i.e., the amplitudes that determine the behavior at each side of the potential. The matrices $\mathbf{X}$ are quite reminiscent of the coherence matrix in optics or the density matrix in quantum mechanics. + +One can verify that $\mathbf{M}$ acts on $\mathbf{X}$ by conjugation + +$$ \mathbf{X}' = \mathbf{M} \mathbf{X} \mathbf{M}^\dagger \quad (28) $$ + +The matrix $\mathbf{X}'$ is associated with the amplitudes $B_{\pm}$ and $\mathbf{X}$ with $A_{\pm}$. + +Let us consider the set $\sigma^{\mu} = (\mathbb{1}, \sigma)$, with Greek indices running from 0 to 3. The $\sigma^{\mu}$ are the identity and the standard Pauli matrices, which constitute a basis of the linear space of $2 \times 2$ complex matrices. For the sake of covariance, it is convenient to define $\tilde{\sigma}^{\mu} \equiv \sigma_{\mu} = (\mathbb{1}, -\sigma)$, so that [55] + +$$ \mathrm{Tr}(\tilde{\sigma}^{\mu}\sigma_{\nu}) = 2\delta_{\nu}^{\mu} \quad (29) $$ + +and $\delta_{\nu}^{\mu}$ is the Kronecker delta. To any Hermitian matrix $\mathbf{X}$ we can associate the coordinates + +$$ x^{\mu} = \frac{1}{2} \mathrm{Tr}(\mathbf{X}\tilde{\sigma}^{\mu}) \quad (30) $$ + +The congruence (28) induces in this way a transformation + +$$ x'^{\mu} = \Lambda_{\nu}^{\mu}(\mathbf{M}) x^{\nu} \quad (31) $$ + +where $\Lambda_{\nu}^{\mu}(\mathbf{M})$ can be found to be + +$$ \Lambda_{\nu}^{\mu}(\mathbf{M}) = \frac{1}{2} \mathrm{Tr} (\tilde{\sigma}^{\mu} \mathbf{M} \sigma_{\nu} \mathbf{M}^{\dagger}) \quad (32) $$ + +This equation can be solved to obtain $\mathbf{M}$ from $\Lambda$. The matrices $\mathbf{M}$ and $-\mathbf{M}$ generate the same $\Lambda$, so this homomorphism is two-to-one. The variables $x^{\mu}$ are coordinates in a Minkovskian (1+3)-dimensional space and the action of the system can be seen as a Lorentz transformation in SO(1, 3). + +Having set the general scenario, let us have a closer look at the transfer matrix corresponding to right invisibility (the left invisibility can be dealt with in an analogous way); namely, + +$$ \mathbf{M} = \begin{pmatrix} 1 & R \\ 0 & 1 \end{pmatrix} \quad (33) $$ +---PAGE_BREAK--- + +where, for simplicity, we have dropped the superscript from $R^r$, as there is no risk of confusion. +Under the homomorphism (32) this matrix generates the Lorentz transformation + +$$ +\Lambda(\mathbf{M}) = \begin{pmatrix} +1 + |R|^2/2 & \mathrm{Re}\,R & -\mathrm{Im}\,R & -|R|^2/2 \\ +\mathrm{Re}\,R & 1 & 0 & -\mathrm{Re}\,R \\ +-\mathrm{Im}\,R & 0 & 1 & \mathrm{Im}\,R \\ +|R|^2/2 & \mathrm{Re}\,R & -\mathrm{Im}\,R & 1 - |R|^2/2 +\end{pmatrix} \tag{34} +$$ + +According to Wigner [56], the little group is a subgroup of the Lorentz transformations under which a standard vector $s^\mu$ remains invariant. When $s^\mu$ is timelike, the little group is the rotation group SO(3). If $s^\mu$ is spacelike, the little group are the boosts SO(1, 2). In this context, the matrix (34) is an instance of a null rotation; the little group when $s^\mu$ is a lightlike or null vector, which is related to E(2), the symmetry group of the two-dimensional Euclidean space [57]. + +If we write (34) in the form $\Lambda(\mathbf{M}) = \exp(i\mathbf{N})$, we can easily work out that + +$$ +\mathbf{N} = \begin{pmatrix} +0 & \operatorname{Re} R & -\operatorname{Im} R & 0 \\ +\operatorname{Re} R & 0 & 0 & -\operatorname{Re} R \\ +-\operatorname{Im} R & 0 & 0 & \operatorname{Im} R \\ +0 & \operatorname{Re} R & -\operatorname{Im} R & 0 +\end{pmatrix} \quad (35) +$$ + +This is a nilpotent matrix and the vectors annihilated by N are invariant by Λ(M). In terms of the Lie +algebra so(1, 3), N can be expressed as + +$$ +\mathbf{N} = \mathrm{Re}\,R (\mathbf{K}_1 + \mathbf{J}_2) - \mathrm{Im}\,R (\mathbf{K}_2 + \mathbf{J}_1) \qquad (36) +$$ + +where $\mathbf{K}_i$ generate boosts and $\mathbf{J}_i$ rotations ($i=1,2,3$) [58]. Observe that the rapidity of the boost and the angle of the rotation have the same norm. The matrix $\mathbf{N}$ define a two-parameter Abelian subgroup. + +Let us take, for the time being, Re R = 0, as it happens for *PT*-invariant invisibility. We can +express **K**₂ + **J**₁ as the differential operator + +$$ +\mathbf{K}_2 + \mathbf{J}_1 \mapsto (x^2\partial_0 + x^0\partial_2) + (x^2\partial_3 - x^3\partial_2) = x^2(\partial_0 + \partial_3) + (x^0 - x^3)\partial_2 \quad (37) +$$ + +As we can appreciate, the combinations + +$$ +x^2, \quad x^0 - x^3, \quad (x^0)^2 - (x^1)^2 - (x^3)^2 \tag{38} +$$ + +remain invariant. Suppressing the inessential coordinate $x^2$, the flow lines of the Killing vector (37) is +the intersection of a null plane, $x^0 - x^3 = c_2$ with a hyperboloid $(x^0)^2 - (x^1)^2 - (x^3)^2 = c_3$. The case +$c_3 = 0$ has the hyperboloid degenerate to a light cone with the orbits becoming parabolas lying in +corresponding null planes. + +**6. Hyperbolic Geometry and Invisibility** + +Although the relativistic hyperboloid in Minkowski space constitute by itself a model of hyperbolic geometry (understood in a broad sense, as the study of spaces with constant negative curvature), it is not the best suited to display some features. + +Let us consider the customary tridimensional hyperbolic space $\mathbb{H}^3$, defined in terms of the upper half-space $\{(x,y,z) \in \mathbb{R}^3 | z > 0\}$, equipped with the metric [59] + +$$ +ds^2 = \frac{\sqrt{dx^2 + dy^2 + dz^2}}{z} \tag{39} +$$ + +The geodesics are the semicircles in $\mathbb{H}^3$ orthogonal to the plane $z=0$. +---PAGE_BREAK--- + +We can think of the plane $z = 0$ in $\mathbb{R}^3$ as the complex plane $\mathbb{C}$ with the natural identification $(x,y,z) \mapsto w = x + iy$. We need to add the point at infinity, so that $\hat{\mathbb{C}} = \mathbb{C} \cup \infty$, which is usually referred to as the Riemann sphere and identify $\hat{\mathbb{C}}$ as the boundary of $\mathbb{H}^3$. + +Every matrix $\mathbf{M}$ in SL(2, $\mathbb{C}$) induces a natural mapping in $\mathbb{C}$ via Möbius (or bilinear) transformations [60] + +$$w' = \frac{M_{11}w + M_{12}}{M_{21}w + M_{22}} \qquad (40)$$ + +Note that any matrix obtained by multiplying $\mathbf{M}$ by a complex scalar $\lambda$ gives the same transformation, so a Möbius transformation determines its matrix only up to scalar multiples. In other words, we need to quotient out SL(2, $\mathbb{C}$) by its center $\{\mathbb{1}, -\mathbb{1}\}$: the resulting quotient group is known as the projective linear group and is usually denoted PSL(2, $\mathbb{C}$). + +Observe that we can break down the action (40) into a composition of maps of the form + +$$w \mapsto w + \lambda, \quad w \mapsto \lambda w, \quad w \mapsto -1/w \qquad (41)$$ + +with $\lambda \in \mathbb{C}$. Then we can extend the Möbius transformations to all $\mathbb{H}^3$ as follows: + +$$ (w,z) \mapsto (w+\lambda,z), \quad (w,z) \mapsto (\lambda w, |\lambda|z), \quad (w,z) \mapsto \left(-\frac{w^*}{|w^2+z^2|}, \frac{z}{|w^2+z^2|}\right) \qquad (42) $$ + +The expressions above come from decomposing the action on $\hat{\mathbb{C}}$ of each of the elements of PSL(2, $\mathbb{C}$) in question into two inversions (reflections) in circles in $\hat{\mathbb{C}}$. Each such inversion has a unique extension to $\mathbb{H}_3$ as an inversion in the hemisphere spanned by the circle and composing appropriate pairs of inversions gives us these formulas. + +In fact, one can show that PSL(2, $\mathbb{C}$) preserves the metric on $\mathbb{H}_3$. Moreover every isometry of $\mathbb{H}_3$ can be seen to be the extension of a conformal map of $\hat{\mathbb{C}}$ to itself, since it must send hemispheres orthogonal to $\hat{\mathbb{C}}$ to hemispheres orthogonal to $\hat{\mathbb{C}}$, hence circles in $\hat{\mathbb{C}}$ to circles in $\hat{\mathbb{C}}$. Thus all orientation-preserving isometries of $\mathbb{H}_3$ are given by elements of PSL(2, $\mathbb{C}$) acting as above. + +In the classification of these isometries the notion of fixed points is of utmost importance. These points are defined by the condition $w' = w$ in (40), whose solutions are + +$$w_f = \frac{(M_{11} - M_{22}) \pm \sqrt{[Tr(\mathbf{M})]^2 - 4}}{2M_{21}} \qquad (43)$$ + +So, they are determined by the trace of $\mathbf{M}$. When the trace is a real number, the induced Möbius transformations are called elliptic, hyperbolic, or parabolic, according $[Tr(\mathbf{M})]^2$ is lesser than, greater than, or equal to 4, respectively. The canonical representatives of those matrices are [61] + +$$ \underbrace{\begin{pmatrix} e^{i\theta/2} & 0 \\ 0 & e^{-i\theta/2} \end{pmatrix}}_{\text{elliptic}}, \quad \underbrace{\begin{pmatrix} e^{\xi/2} & 0 \\ 0 & e^{-\xi/2} \end{pmatrix}}_{\text{hyperbolic}}, \quad \underbrace{\begin{pmatrix} 1 & \lambda \\ 0 & 1 \end{pmatrix}}_{\text{parabolic}} \qquad (44) $$ + +while the induced geometrical actions are + +$$w' = we^{i\theta}, \quad w' = we^{\xi}, \quad w' = w + \lambda \qquad (45)$$ + +that is, a rotation of angle $\theta$ (so fixes the axis z), a squeezing of parameter $\xi$ (it has two fixed points in $\hat{\mathbb{C}}$, no fixed points in $\mathbb{H}_3$, and every hyperplane in $\mathbb{H}_3$ that contains the geodesic joining the two fixed points in $\hat{\mathbb{C}}$ is invariant); and a parallel displacement of magnitude $\lambda$, respectively. We emphasize that this later action is the only one without Euclidean analogy. Indeed, in view of (33), this is precisely the action associated to an invisible scatterer. The far-reaching consequences of this geometrical interpretation will be developed elsewhere. +---PAGE_BREAK--- + +**7. Concluding Remarks** + +We have studied unidirectional invisibility by a complex scattering potential, which is characterized +by a set of *PT* invariant equations. Consequently, the *PT*-symmetric invisible configurations are quite +special, for they possess the same symmetry as the equations. + +We have shown how to cast this phenomenon in term of space-time variables, having in this way +a relativistic presentation of invisibility as the set of null rotations. By resorting to elementary notions +of hyperbolic geometry, we have interpreted in a natural way the action of the transfer matrix in this +case as a parallel displacement. + +We think that our results are yet another example of the advantages of these geometrical methods: +we have devised a geometrical tool to analyze invisibility in quite a concise way that, in addition, +can be closely related to other fields of physics. + +**Acknowledgments:** We acknowledge illuminating discussions with Antonio F. Costa, José F. Cariñena and José María Montesinos. Financial support from the Spanish Research Agency (Grant FIS2011-26786) is gratefully acknowledged. + +**Author Contributions:** Both authors contributed equally to the theoretical analysis, numerical calculations, and writing of the paper. + +**Conflicts of Interest:** The authors declare no conflict of interest. + +References + +1. Bender, C.M.; Boettcher, S. Real spectra in non-Hermitian Hamiltonians having *PT* symmetry. Phys. Rev. Lett. **1998**, *80*, 5243–5246. +2. Bender, C.M.; Boettcher, S.; Meisinger, P.N. *PT*-symmetric quantum mechanics. J. Math. Phys. **1999**, *40*, 2201–2229. +3. Bender, C.M.; Brody, D.C.; Jones, H.F. Complex extension of quantum mechanics. Phys. Rev. Lett. **2002**, *89*, doi:10.1103/PhysRevLett.89.270401. +4. Bender, C.M.; Brody, D.C.; Jones, H.F. Must a Hamiltonian be Hermitian? Am. J. Phys. **2003**, *71*, 1095–1102. +5. Bender, C.M. Making sense of non-Hermitian Hamiltonians. Rep. Prog. Phys. **2007**, *70*, 947–1018. +6. Bender, C.M.; Mannheim, P.D. *PT* symmetry and necessary and sufficient conditions for the reality of energy eigenvalues. Phys. Lett. A **2010**, *374*, 1616–1620. +7. Assis, P. *Non-Hermitian Hamiltonians in Field Theory: PT-symmetry and Applications*; VDM: Saarbrücken, Germany, 2010. +8. Moiseyev, N. *Non-Hermitian Quantum Mechanics*; Cambridge University Press: Cambridge, UK, 2011. +9. El-Ganainy, R.; Makris, K.G.; Christodoulides, D.N.; Musslimani, Z.H. Theory of coupled optical *PT*-symmetric structures. Opt. Lett. **2007**, *32*, 2632–2634. +10. Bendix, O.; Fleischmann, R.; Kottos, T.; Shapiro, B. Exponentially fragile *PT* symmetry in lattices with localized eigenmodes. Phys. Rev. Lett. **2009**, *103*, doi:10.1103/PhysRevLett.103.030402. +11. Ruter, C.E.; Makris, K.G.; El-Ganainy, R.; Christodoulides, D.N.; Segev, M.; Kip, D. Observation of parity-time symmetry in optics. Nat. Phys. **2010**, *6*, 192–195. +12. Makris, K.G.; El-Ganainy, R.; Christodoulides, D.N.; Musslimani, Z.H. Beam dynamics in *PT* symmetric optical lattices. Phys. Rev. Lett. **2008**, *100*, 103904:1–103904:4. +13. Longhi, S. Bloch oscillations in complex crystals with *PT* symmetry. Phys. Rev. Lett. **2009**, *103*, 123601:1–123601:4. +14. Sukhorukov, A.A.; Xu, Z.; Kivshar, Y.S. Nonlinear suppression of time reversals in *PT*-symmetric optical couplers. Phys. Rev. A **2010**, *82*, doi:10.1103/PhysRevA.82.043818. +15. Ahmed, Z.; Bender, C.M.; Berry, M.V. Reflectionless potentials and *PT* symmetry. J. Phys. A **2005**, *38*, L627–L630. +16. Lin, Z.; Ramezani, H.; Eichelkraut, T.; Kottos, T.; Cao, H.; Christodoulides, D.N. Unidirectional invisibility induced by *PT*-symmetric periodic structures. Phys. Rev. Lett. **2011**, *106*, doi:10.1103/PhysRevLett.106.213901. +17. Longhi, S. Invisibility in *PT*-symmetric complex crystals. J. Phys. A **2011**, *44*, doi:10.1088/1751-8113/44/48/485302. +---PAGE_BREAK--- + +18. Sánchez-Soto, L.L.; Monzón, J.J.; Barriuso, A.G.; Cariñena, J. The transfer matrix: A geometrical perspective. *Phys. Rep.* **2012**, *513*, 191–227. + +19. Monzón, J.J.; Sánchez-Soto, L.L. Lossles multilayers and Lorentz transformations: More than an analogy. *Opt. Commun.* **1999**, *162*, 1–6. + +20. Monzón, J.J.; Sánchez-Soto, L.L. Fully relativistic-like formulation of multilayer optics. *J. Opt. Soc. Am. A* **1999**, *16*, 2013–2018. + +21. Monzón, J.J.; Yonte, T.; Sánchez-Soto, L.L. Basic factorization for multilayers. *Opt. Lett.* **2001**, *26*, 370–372. + +22. Yonte, T.; Monzón, J.J.; Sánchez-Soto, L.L.; Cariñena, J.F.; López-Lacasta, C. Understanding multilayers from a geometrical viewpoint. *J. Opt. Soc. Am. A* **2002**, *19*, 603–609. + +23. Monzón, J.J.; Yonte, T.; Sánchez-Soto, L.L.; Cariñena, J.F. Geometrical setting for the classification of multilayers. *J. Opt. Soc. Am. A* **2002**, *19*, 985–991. + +24. Barriuso, A.G.; Monzón, J.J.; Sánchez-Soto, L.L. General unit-disk representation for periodic multilayers. *Opt. Lett.* **2003**, *28*, 1501–1503. + +25. Barriuso, A.G.; Monzón, J.J.; Sánchez-Soto, L.L.; Cariñena, J.F. Vectorlike representation of multilayers. *J. Opt. Soc. Am. A* **2004**, *21*, 2386–2391. + +26. Barriuso, A.G.; Monzón, J.J.; Sánchez-Soto, L.L.; Costa, A.F. Escher-like quasiperiodic heterostructures. *J. Phys. A* **2009**, *42*, 192002:1–192002:9. + +27. Muga, J.G.; Palao, J.P.; Navarro, B.; Egusquiza, I.L. Complex absorbing potentials. *Phys. Rep.* **2004**, *395*, 357–426. + +28. Levai, G.; Znojil, M. Systematic search for PT-symmetric potentials with real spectra. *J. Phys. A* **2000**, *33*, 7165–7180. + +29. Ahmed, Z. Schrödinger transmission through one-dimensional complex potentials. *Phys. Rev. A* **2001**, *64*, 042716:1–042716:4. + +30. Ahmed, Z. Energy band structure due to a complex, periodic, PT-invariant potential. *Phys. Lett. A* **2001**, *286*, 231–235. + +31. Mostafazadeh, A. Spectral singularities of complex scattering potentials and infinite reflection and transmission coefficients at real energies. *Phys. Rev. Lett.* **2009**, *102*, 220402:1–220402:4. + +32. Cannata, F.; Dedonder, J.P.; Ventura, A. Scattering in PT-symmetric quantum mechanics. *Ann. Phys.* **2007**, *322*, 397–433. + +33. Chong, Y.D.; Ge, L.; Stone, A.D. PT-symmetry breaking and laser-absorber modes in optical scattering systems. *Phys. Rev. Lett.* **2011**, *106*, doi:10.1103/PhysRevLett.106.093902. + +34. Ahmed, Z. New features of scattering from a one-dimensional non-Hermitian (complex) potential. *J. Phys. A* **2012**, *45*, doi:10.1088/1751-8113/45/3/032004. + +35. Boonserm, P.; Visser, M. One dimensional scattering problems: A pedagogical presentation of the relationship between reflection and transmission amplitudes. *Thai J. Math.* **2010**, *8*, 83–97. + +36. Mostafazadeh, A.; Mehri-Dehnavi, H. Spectral singularities, biorthonormal systems and a two-parameter family of complex point interactions. *J. Phys. A* **2009**, *42*, doi:10.1088/1751-8113/42/12/125303. + +37. Aktosun, T. A factorization of the scattering matrix for the Schrödinger equation and for the wave equation in one dimension. *J. Math. Phys.* **1992**, *33*, 3865–3869. + +38. Aktosun, T.; Klaus, M.; van der Mee, C. Factorization of scattering matrices due to partitioning of potentials in one-dimensional Schrödinger-type equations. *J. Math. Phys.* **1996**, *37*, 5897–5915. + +39. Marchenko, V.A. *Sturm-Liouville Operators and Their Applications*; AMS Chelsea: Providence, RI, USA, 1986. + +40. Tunca, G.; Bairamov, E. Discrete spectrum and principal functions of non-selfadjoint differential operator. *Czech J. Math.* **1999**, *49*, 689–700. + +41. Naimark, M.A. Investigation of the spectrum and the expansion in eigenfunctions of a non-selfadjoint operator of the second order on a semi-axis. *AMS Transl.* **1960**, *16*, 103–193. + +42. Pavlov, B.S. The nonself-adjoint Schrödinger operators. *Topics Math. Phys.* **1967**, *1*, 87–114. + +43. Naimark, M.A. *Linear Differential Operators: Part II*; Ungar: New York, NY, USA, 1968. + +44. Samsonov, B.F. SUSY transformations between diagonalizable and non-diagonalizable Hamiltonians. *J. Phys. A* **2005**, *38*, L397–L403. + +45. Andrianov, A.A.; Cannata, F.; Sokolov, A.V. Spectral singularities for non-Hermitian one-dimensional Hamiltonians: Puzzles with resolution of identity. *J. Math. Phys.* **2010**, *51*, 052104:1–052104:22 +---PAGE_BREAK--- + +46. Chaos-Cador, L.; García-Calderón, G. Resonant states for complex potentials and spectral singularities. *Phys. Rev. A* **2013**, 87, doi:10.1103/PhysRevA.87.042114. + +47. Schomerus, H. Quantum noise and self-sustained radiation of *PT*-symmetric systems. *Phys. Rev. Lett.* **2010**, *104*, doi:10.1103/PhysRevLett.104.233601. + +48. Longhi, S. *PT*-symmetric laser absorber. *Phys. Rev. A* **2010**, *82*, doi:10.1103/PhysRevA.82.031801. + +49. Mostafazadeh, A. Nonlinear spectral singularities of a complex barrier potential and the lasing threshold condition. *Phys. Rev. A* **2013**, *87*, doi:10.1103/PhysRevA.87.063838. + +50. Mostafazadeh, A. Invisibility and *PT*-symmetry. *Phys. Rev. A* **2013**, *87*, doi:10.1103/PhysRevA.87.012103. + +51. Müller, M.; Rotter, I. Exceptional points in open quantum systems. *J. Phys. A* **2008**, *41*, 244018:1–244018:15. + +52. Mehri-Dehnavi, H.; Mostafazadeh, A. Geometric phase for non-Hermitian Hamiltonians and its holonomy interpretation. *J. Math. Phys.* **2008**, *49*, 082105:1–082105:17. + +53. Monzón, J.J.; Barriuso, A.G.; Montesinos-Amilibia, J.M.; Sánchez-Soto, L.L. Geometrical aspects of *PT*-invariant transfer matrices. *Phys. Rev. A* **2013**, *87*, doi:10.1103/PhysRevA.87.012111. + +54. Mandel, L.; Wolf, E. *Optical Coherence and Quantum Optics*; Cambridge University Press: Cambridge, UK, 1995. + +55. Barut, A.O.; Rączka, R. *Theory of Group Representations and Applications*; PWN: Warszaw, Poland, 1977; Section 17.2. + +56. Wigner, E. On unitary representations of the inhomogeneous Lorentz group. *Ann. Math.* **1939**, *40*, 149–204. + +57. Kim, Y.S.; Noz, M.E. *Theory and Applications of the Poincaré Group*; Reidel: Dordrecht, The Netherlands, 1986. + +58. Weinberg, S. *The Quantum Theory of Fields*; Cambridge University Press: Cambridge, UK, 2005; Volume 1. + +59. Iversen, B. *Hyperbolic Geometry*; Cambridge University Press: Cambridge, UK, 1992; Chapter VIII. + +60. Ratcliffe, J.G. *Foundations of Hyperbolic Manifolds*; Springer: Berlin, Germany, 2006; Section 4.3. + +61. Anderson, J.W. *Hyperbolic Geometry*; Springer: New York, NY, USA, 1999; Chapter 3. + +© 2014 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +Wigner's Space-Time Symmetries Based on the +Two-by-Two Matrices of the Damped Harmonic +Oscillators and the Poincaré Sphere + +Sibel Başkal ¹, Young S. Kim ²,* and Marilyn E. Noz ³ + +¹ Department of Physics, Middle East Technical University, Ankara 06800, Turkey; +E-Mail: baskal@newton.physics.metu.edu.tr + +² Center for Fundamental Physics, University of Maryland, College Park, MD 20742, USA + +³ Department of Radiology, New York University, New York, NY 10016, USA; E-Mail: marilyne.noz@gmail.com + +* E-Mail: yskim@umd.edu; Tel.: +1-301-937-1306. + +Received: 28 February 2014; in revised form: 28 May 2014 / Accepted: 9 June 2014 / Published: 25 June 2014 + +**Abstract:** The second-order differential equation for a damped harmonic oscillator can be converted to two coupled first-order equations, with two two-by-two matrices leading to the group $Sp(2)$. It is shown that this oscillator system contains the essential features of Wigner's little groups dictating the internal space-time symmetries of particles in the Lorentz-covariant world. The little groups are the subgroups of the Lorentz group whose transformations leave the four-momentum of a given particle invariant. It is shown that the damping modes of the oscillator correspond to the little groups for massive and imaginary-mass particles respectively. When the system makes the transition from the oscillation to damping mode, it corresponds to the little group for massless particles. Rotations around the momentum leave the four-momentum invariant. This degree of freedom extends the $Sp(2)$ symmetry to that of $SL(2, c)$ corresponding to the Lorentz group applicable to the four-dimensional Minkowski space. The Poincaré sphere contains the $SL(2, c)$ symmetry. In addition, it has a non-Lorentzian parameter allowing us to reduce the mass continuously to zero. It is thus possible to construct the little group for massless particles from that of the massive particle by reducing its mass to zero. Spin-1/2 particles and spin-1 particles are discussed in detail. + +**Keywords:** damped harmonic oscillators; coupled first-order equations; unimodular matrices; Wigner's little groups; Poincaré sphere; $Sp(2)$ group; $SL(2, c)$ group; gauge invariance; neutrinos; photons + +PACS: 03.65.Fd, 03.67.-a, 05.30.-d + +# 1. Introduction + +We are quite familiar with the second-order differential equation + +$$m \frac{d^2 y}{dt^2} + b \frac{dy}{dt} + Ky = 0 \quad (1)$$ + +for a damped harmonic oscillator. This equation has the same mathematical form as + +$$L \frac{d^2 Q}{dt^2} + R \frac{dQ}{dt} + \frac{1}{C} Q = 0 \qquad (2)$$ + +for electrical circuits, where L, R, and C are the inductance, resistance, and capacitance respectively. These two equations play fundamental roles in physical and engineering sciences. Since they start from the same set of mathematical equations, one set of problems can be studied in terms of the other. For instance, many mechanical phenomena can be studied in terms of electrical circuits. +---PAGE_BREAK--- + +In Equation (1), when $b = 0$, the equation is that of a simple harmonic oscillator with the frequency $\omega = \sqrt{K/m}$. As $b$ increases, the oscillation becomes damped. When $b$ is larger than $2\sqrt{Km}$, the oscillation disappears, as the solution is a damping mode. + +Consider that increasing *b* continuously, while difficult mechanically, can be done electrically using Equation (2) by adjusting the resistance *R*. The transition from the oscillation mode to the damping mode is a continuous physical process. + +This *b* term leads to energy dissipation, but is not regarded as a fundamental force. It is inconvenient in the Hamiltonian formulation of mechanics and troublesome in transition to quantum mechanics, yet, plays an important role in classical mechanics. In this paper this term will help us understand the fundamental space-time symmetries of elementary particles. + +We are interested in constructing the fundamental symmetry group for particles in the Lorentz-covariant world. For this purpose, we transform the second-order differential equation of Equation (1) to two coupled first-order equations using two-by-two matrices. Only two linearly independent matrices are needed. They are the anti-symmetric and symmetric matrices + +$$A = \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix}, \quad \text{and} \quad S = \begin{pmatrix} 0 & i \\ i & 0 \end{pmatrix} \qquad (3)$$ + +respectively. The anti-symmetric matrix *A* is Hermitian and corresponds to the oscillation part, while the symmetric *S* matrix corresponds to the damping. + +These two matrices lead to the *Sp*(2) group consisting of two-by-two unimodular matrices with real elements. This group is isomorphic to the three-dimensional Lorentz group applicable to two space-like and one time-like coordinates. This group is commonly called the *O*(2, 1) group. + +This *O*(2, 1) group can explain all the essential features of Wigner's little groups dictating internal space-time symmetries of particles [1]. Wigner defined his little groups as the subgroups of the Lorentz group whose transformations leave the four-momentum of a given particle invariant. He observed that the little groups are different for massive, massless, and imaginary-mass particles. It has been a challenge to design a mathematical model which will combine those three into one formalism, but we show that the damped harmonic oscillator provides the desired mathematical framework. + +For the two space-like coordinates, we can assign one of them to the direction of the momentum, and the other to the direction perpendicular to the momentum. Let the direction of the momentum be along the z axis, and let the perpendicular direction be along the x axis. We therefore study the kinematics of the group within the zx plane, then see what happens when we rotate the system around the z axis without changing the momentum [2]. + +The Poincaré sphere for polarization optics contains the *SL*(2, *c*) symmetry isomorphic to the four-dimensional Lorentz group applicable to the Minkowski space [3–7]. Thus, the Poincaré sphere extends Wigner’s picture into the three space-like and one time-like coordinates. Specifically, this extension adds rotations around the given momentum which leaves the four-momentum invariant [2]. + +While the particle mass is a Lorentz-invariant variable, the Poincaré sphere contains an extra variable which allows the mass to change. This variable allows us to take the mass-limit of the symmetry operations. The transverse rotational degrees of freedom collapse into one gauge degree of freedom and polarization of neutrinos is a consequence of the requirement of gauge invariance [8,9]. + +The *SL*(2,*c*) group contains symmetries not seen in the three-dimensional rotation group. While we are familiar with two spinors for a spin-1/2 particle in nonrelativistic quantum mechanics, there are two additional spinors due to the reflection properties of the Lorentz group. There are thus 16 bilinear combinations of those four spinors. This leads to two scalars, two four-vectors, and one antisymmetric four-by-four tensor. The Maxwell-type electromagnetic field tensor can be obtained as a massless limit of this tensor [10]. + +In Section 2, we review the damped harmonic oscillator in classical mechanics, and note that the solution can be either in the oscillation mode or damping mode depending on the magnitude of +---PAGE_BREAK--- + +the damping parameter. The translation of the second order equation into a first order differential equation with two-by-two matrices is possible. This first-order equation is similar to the Schrödinger equation for a spin-1/2 particle in a magnetic field. + +Section 3 shows that the two-by-two matrices of Section 2 can be formulated in terms of the $Sp(2)$ group. These matrices can be decomposed into the Bargmann and Wigner decompositions. Furthermore, this group is isomorphic to the three-dimensional Lorentz group with two space and one time-like coordinates. + +In Section 4, it is noted that this three-dimensional Lorentz group has all the essential features of Wigner's little groups which dictate the internal space-time symmetries of the particles in the Lorentz-covariant world. Wigner's little groups are the subgroups of the Lorentz group whose transformations leave the four-momentum of a given particle invariant. The Bargmann Wigner decompositions are shown to be useful tools for studying the little groups. + +In Section 5, we note that the given momentum is invariant under rotations around it. The addition of this rotational degree of freedom extends the $Sp(2)$ symmetry to the six-parameter $SL(2,c)$ symmetry. In the space-time language, this extends the three dimensional group to the Lorentz group applicable to three space and one time dimensions. + +Section 6 shows that the Poincaré sphere contains the symmetries of $SL(2,c)$ group. In addition, it contains an extra variable which allows us to change the mass of the particle, which is not allowed in the Lorentz group. + +In Section 7, the symmetries of massless particles are studied in detail. In addition to rotation around the momentum, Wigner's little group generates gauge transformations. While gauge transformations on spin-1 photons are well known, the gauge invariance leads to the polarization of massless spin-1/2 particles, as observed in neutrino polarizations. + +In Section 8, it is noted that there are four spinors for spin-1/2 particles in the Lorentz-covariant world. It is thus possible to construct 16 bilinear forms, applicable to two scalars, and two vectors, and one antisymmetric second-rank tensor. The electromagnetic field tensor is derived as the massless limit. This tensor is shown to be gauge-invariant. + +## 2. Classical Damped Oscillators + +For convenience, we write Equation (1) as + +$$ \frac{d^2 y}{dt^2} + 2\mu \frac{dy}{dt} + \omega^2 y = 0 \quad (4) $$ + +with + +$$ \omega = \sqrt{\frac{K}{m}}, \quad \text{and} \quad \mu = \frac{b}{2m} \qquad (5) $$ + +The damping parameter $\mu$ is positive when there are no external forces. When $\omega$ is greater than $\mu$, the solution takes the form + +$$ y = e^{-\mu t} [C_1 \cos(\omega't) + C_2 \sin(\omega't)] \quad (6) $$ + +where + +$$ \omega' = \sqrt{\omega^2 - \mu^2} \quad (7) $$ + +and $C_1$ and $C_2$ are the constants to be determined by the initial conditions. This expression is for a damped harmonic oscillator. Conversely, when $\mu$ is greater than $\omega$, the quantity inside the square-root sign is negative, then the solution becomes + +$$ y = e^{-\mu t} [C_3 \cosh(\mu't) + C_4 \sinh(\mu't)] \quad (8) $$ + +with + +$$ \mu' = \sqrt{\mu^2 - \omega^2} \quad (9) $$ +---PAGE_BREAK--- + +If $\omega = \mu$, both Equations (6) and (8) collapse into one solution + +$$y(t) = e^{-\mu t} [C_5 + C_6 t] \quad (10)$$ + +These three different cases are treated separately in textbooks. Here we are interested in the transition from Equation (6) to Equation (8), via Equation (10). For convenience, we start from $\mu$ greater than $\omega$ with $\mu'$ given by Equation (9). + +For a given value of $\mu$, the square root becomes zero when $\omega$ equals $\mu$. If $\omega$ becomes larger, the square root becomes imaginary and divides into two branches. + +$$\pm i \sqrt{\omega^2 - \mu^2} \qquad (11)$$ + +This is a continuous transition, but not an analytic continuation. To study this in detail, we translate the second order differential equation of Equation (4) into the first-order equation with two-by-two matrices. + +Given the solutions of Equations (6) and (10), it is convenient to use $\psi(t)$ defined as + +$$\psi(t) = e^{\mu t} y(t), \quad \text{and} \quad y = e^{-\mu t} \psi(t) \qquad (12)$$ + +Then $\psi(t)$ satisfies the differential equation + +$$\frac{d^2 \psi(t)}{dt^2} + (\omega^2 - \mu^2)\psi(t) = 0 \qquad (13)$$ + +## 2.1. Two-by-Two Matrix Formulation + +In order to convert this second-order equation to a first-order system, we introduce $\psi_1(t)$ and $\psi_2(t)$ satisfying two coupled differential equations + +$$\frac{d\psi_1}{dt} = (\mu - \omega)\psi_2(t) \qquad (14)$$ + +$$\frac{d\psi_2}{dt} = (\mu + \omega)\psi_1(t) \qquad (15)$$ + +which can be written in matrix form as + +$$\frac{d}{dt} \begin{pmatrix} \psi_1 \\ \psi_2 \end{pmatrix} = \begin{pmatrix} 0 & \mu - \omega \\ \mu + \omega & 0 \end{pmatrix} \begin{pmatrix} \psi_1 \\ \psi_2 \end{pmatrix} \qquad (16)$$ + +Using the Hermitian and anti-Hermitian matrices of Equation (3) in Section 1, we construct the linear combination + +$$H = \omega \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix} + \mu \begin{pmatrix} 0 & i \\ i & 0 \end{pmatrix} \qquad (17)$$ + +We can then consider the first-order differential equation + +$$i \frac{\partial}{\partial t} \psi(t) = H \psi(t) \qquad (18)$$ + +While this equation is like the Schrödinger equation for an electron in a magnetic field, the two-by-two matrix is not Hermitian. Its first matrix is Hermitian, but the second matrix is anti-Hermitian. It is of course an interesting problem to give a physical interpretation to this non-Hermitian matrix +---PAGE_BREAK--- + +in connection with quantum dissipation [11], but this is beyond the scope of the present paper. +The solution of Equation (18) is + +$$ +\psi(t) = \exp \left\{ \begin{pmatrix} 0 & -\omega + \mu \\ \omega + \mu & 0 \end{pmatrix} t \right\} \begin{pmatrix} C_7 \\ C_8 \end{pmatrix} \quad (19) +$$ + +where $C_7 = \psi_1(0)$ and $C_8 = \psi_2(0)$ respectively. + +2.2. Transition from the Oscillation Mode to Damping Mode + +It appears straight-forward to compute this expression by a Taylor expansion, but it is not. +This issue was extensively discussed in the earlier papers by two of us [12,13]. The key idea is to write +the matrix + +$$ +\begin{pmatrix} +0 & -\omega + \mu \\ +\omega + \mu & 0 +\end{pmatrix} +\qquad (20) +$$ + +as a similarity transformation of + +$$ +\omega' \begin{pmatrix} 0 & -1 \\ 1 & 0 \end{pmatrix} \quad (\omega > \mu) \tag{21} +$$ + +and as that of + +$$ +\mu' \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix} \quad (\mu > \omega) \tag{22} +$$ + +with $\omega'$ and $\mu'$ defined in Equations (7) and (9), respectively. +Then the Taylor expansion leads to + +$$ +\left( \frac{\cos(\omega't)}{\sqrt{(\omega+\mu)/(\omega-\mu)}} \sin(\omega't) - \frac{\sqrt{(\omega-\mu)/(\omega+\mu)}}{\cos(\omega't)} \sin(\omega't) \right) \quad (23) +$$ + +when $\omega$ is greater than $\mu$. The solution $\psi(t)$ takes the form + +$$ +\begin{pmatrix} +C_7 \cos(\omega't) - C_8 \sqrt{(\omega-\mu)/( \omega+\mu)} \sin(\omega't) \\ +C_7 \sqrt{(\omega+\mu)/( \omega-\mu)} \sin(\omega't) + C_8 \cos(\omega't) +\end{pmatrix} +\quad (24) +$$ + +If $\mu$ is greater than $\omega$, the Taylor expansion becomes + +$$ +\left( \frac{\cosh(\mu't)}{\sqrt{(\mu+\omega)/(\mu-\omega)}} \frac{\sqrt{(\mu-\omega)/(\mu+\omega)}}{\cosh(\mu't)} \sinh(\mu't) \right) \quad (25) +$$ + +When $\omega$ is equal to $\mu$, both Equations (23) and (25) become + +$$ +\begin{pmatrix} 1 & 0 \\ 2\omega t & 1 \end{pmatrix} \tag{26} +$$ + +If $\omega$ is sufficiently close to but smaller than $\mu$, the matrix of Equation (25) becomes + +$$ +\begin{pmatrix} +1 + (\epsilon/2)(2\omega t)^2 & +\epsilon(2\omega t) \\ +(2\omega t) & 1 + (\epsilon/2)(2\omega t)^2 +\end{pmatrix} +\quad (27) +$$ + +with + +$$ +\epsilon = \frac{\mu - \omega}{\mu + \omega} \tag{28} +$$ +---PAGE_BREAK--- + +If $\omega$ is sufficiently close to $\mu$, we can let + +$$ \mu + \omega = 2\omega, \quad \text{and} \quad \mu - \omega = 2\mu\epsilon \tag{29} $$ + +If $\omega$ is greater than $\mu$, $\epsilon$ defined in Equation (28) becomes negative, the matrix of Equation (23) becomes + +$$ \begin{pmatrix} 1 - (-\epsilon/2)(2\omega t)^2 & -(\epsilon)(2\omega t) \\ 2\omega t & 1 - (-\epsilon/2)(2\omega t)^2 \end{pmatrix} \tag{30} $$ + +We can rewrite this matrix as + +$$ \begin{pmatrix} 1 - (1/2) \left[ (2\omega\sqrt{-\epsilon})t \right]^2 & -\sqrt{-\epsilon} \left[ (2\omega\sqrt{-\epsilon})t \right] \\ 2\omega t & 1 - (1/2) \left[ (2\omega\sqrt{-\epsilon})t \right]^2 \end{pmatrix} \tag{31} $$ + +If $\epsilon$ becomes positive, Equation (27) can be written as + +$$ \begin{pmatrix} 1 + (1/2) [(2\omega\sqrt{\epsilon})t]^2 & \sqrt{\epsilon} [(2\omega\sqrt{\epsilon})t] \\ 2\omega t & 1 + (1/2) [(2\omega\sqrt{\epsilon})t]^2 \end{pmatrix} \tag{32} $$ + +The transition from Equation (31) to Equation (32) is continuous as they become identical when $\epsilon = 0$. As $\epsilon$ changes its sign, the diagonal elements of above matrices tell us how cos($\omega't$) becomes cosh($\mu't$). As for the upper-right element, $-\sin(\omega't)$ becomes sinh($\mu't$). This non-analytic continuity is discussed in detail in one of the earlier papers by two of us on lens optics [13]. This type of continuity was called there "tangential continuity." There, the function and its first derivative are continuous while the second derivative is not. + +## 2.3. Mathematical Forms of the Solutions + +In this section, we use the Heisenberg approach to the problem, and obtain the solutions in the form of two-by-two matrices. We note that + +1. For the oscillation mode, the trace of the matrix is smaller than 2. The solution takes the form of + +$$ \begin{pmatrix} \cos(x) & -e^{-\eta} \sin(x) \\ e^{\eta} \sin(x) & \cos(x) \end{pmatrix} \tag{33} $$ + +with trace $2 \cos(x)$. The trace is independent of $\eta$. + +2. For the damping mode, the trace of the matrix is greater than 2. + +$$ \begin{pmatrix} \cosh(x) & e^{-\eta} \sinh(x) \\ e^{\eta} \sinh(x) & \cosh(x) \end{pmatrix} \tag{34} $$ + +with trace $2 \cosh(x)$. Again, the trace is independent of $\eta$. + +3. For the transition mode, the trace is equal to 2, and the matrix is triangular and takes the form of + +$$ \begin{pmatrix} 1 & 0 \\ \gamma & 1 \end{pmatrix} \tag{35} $$ + +When $x$ approaches zero, the Equations (33) and (34) take the form + +$$ \begin{pmatrix} 1 - x^2/2 & -xe^{-\eta} \\ xe^{\eta} & 1 - x^2/2 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 + x^2/2 & xe^{-\eta} \\ xe^{\eta} & 1 + x^2/2 \end{pmatrix} \tag{36} $$ +---PAGE_BREAK--- + +respectively. These two matrices have the same lower-left element. Let us fix this element to be a +positive number $\gamma$. Then + +$$ +x = \gamma e^{-\eta} \tag{37} +$$ + +Then the matrices of Equation (36) become + +$$ +\begin{pmatrix} 1 - \gamma^2 e^{-2\eta} / 2 & -\gamma e^{-2\eta} \\ \gamma & 1 - \gamma^2 e^{-2\eta} / 2 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 + \gamma^2 e^{-2\eta} / 2 & \gamma e^{-2\eta} \\ \gamma & 1 + \gamma^2 e^{-2\eta} / 2 \end{pmatrix} \tag{38} +$$ + +If we introduce a small number $\epsilon$ defined as + +$$ +\epsilon = \sqrt{\gamma} e^{-\eta} \tag{39} +$$ + +the matrices of Equation (38) become + +$$ +\begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix} \begin{pmatrix} 1 - \gamma\epsilon^2/2 & \sqrt{\gamma}\epsilon \\ \sqrt{\gamma}\epsilon & 1 - \gamma\epsilon^2/2 \end{pmatrix} \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \tag{40} +$$ + +$$ +\begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix} \begin{pmatrix} 1 + \gamma\epsilon^2/2 & \sqrt{\gamma}\epsilon \\ \sqrt{\gamma}\epsilon & 1 + \gamma\epsilon^2/2 \end{pmatrix} \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} +$$ + +respectively, with $e^{-\eta} = \epsilon / \sqrt{\gamma}$. + +**3. Groups of Two-by-Two Matrices** + +If a two-by-two matrix has four complex elements, it has eight independent parameters. If the determinant of this matrix is one, it is known as an unimodular matrix and the number of independent parameters is reduced to six. The group of two-by-two unimodular matrices is called SL(2, c). This six-parameter group is isomorphic to the Lorentz group applicable to the Minkowski space of three space-like and one time-like dimensions [14]. + +We can start with two subgroups of SL(2, c). + +1. While the matrices of SL(2, c) are not unitary, we can consider the subset consisting of unitary matrices. This subgroup is called SU(2), and is isomorphic to the three-dimensional rotation group. This three-parameter group is the basic scientific language for spin-1/2 particles. + +2. We can also consider the subset of matrices with real elements. This three-parameter group is called Sp(2) and is isomorphic to the three-dimensional Lorentz group applicable to two space-like and one time-like coordinates. + +In the Lorentz group, there are three space-like dimensions with x, y, and z coordinates. +However, for many physical problems, it is more convenient to study the problem in the +two-dimensional (x, z) plane first and generalize it to three-dimensional space by rotating the system +around the z axis. This process can be called Euler decomposition and Euler generalization [2]. + +First, we study *Sp*(2) symmetry in detail, and achieve the generalization by augmenting the +two-by-two matrix corresponding to the rotation around the *z* axis. In this section, we study in detail +properties of *Sp*(2) matrices, then generalize them to *SL*(2, *c*) in Section 5. + +There are three classes of Sp(2) matrices. Their traces can be smaller or greater than two, or equal to two. While these subjects are already discussed in the literature [15–17] our main interest is what happens as the trace goes from less than two to greater than two. Here we are guided by the model we have discussed in Section 2, which accounts for the transition from the oscillation mode to the damping mode. +---PAGE_BREAK--- + +### 3.1. Lie Algebra of Sp(2) + +The two linearly independent matrices of Equation (3) can be written as + +$$ K_1 = \frac{1}{2} \begin{pmatrix} 0 & i \\ i & 0 \end{pmatrix}, \quad \text{and} \quad J_2 = \frac{1}{2} \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix} \qquad (41) $$ + +However, the Taylor series expansion of the exponential form of Equation (23) or Equation (25) requires an additional matrix + +$$ K_3 = \frac{1}{2} \begin{pmatrix} i & 0 \\ 0 & -i \end{pmatrix} \qquad (42) $$ + +These matrices satisfy the following closed set of commutation relations. + +$$ [K_1, J_2] = iK_3, \quad [J_2, K_3] = iK_1, \quad [K_3, K_1] = -iJ_2 \qquad (43) $$ + +These commutation relations remain invariant under Hermitian conjugation, even though $K_1$ and $K_3$ are anti-Hermitian. The algebra generated by these three matrices is known in the literature as the group $Sp(2)$ [17]. Furthermore, the closed set of commutation relations is commonly called the Lie algebra. Indeed, Equation (43) is the Lie algebra of the $Sp(2)$ group. + +The Hermitian matrix $J_2$ generates the rotation matrix + +$$ R(\theta) = \exp(-i\theta J_2) = \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \qquad (44) $$ + +and the anti-Hermitian matrices $K_1$ and $K_2$, generate the following squeeze matrices. + +$$ S(\lambda) = \exp(-i\lambda K_1) = \begin{pmatrix} \cosh(\lambda/2) & \sinh(\lambda/2) \\ \sinh(\lambda/2) & \cosh(\lambda/2) \end{pmatrix} \qquad (45) $$ + +and + +$$ B(\eta) = \exp(-i\eta K_3) = \begin{pmatrix} \exp(\eta/2) & 0 \\ 0 & \exp(-\eta/2) \end{pmatrix} \qquad (46) $$ + +respectively. + +Returning to the Lie algebra of Equation (43), since $K_1$ and $K_3$ are anti-Hermitian, and $J_2$ is Hermitian, the set of commutation relation is invariant under the Hermitian conjugation. In other words, the commutation relations remain invariant, even if we change the sign of $K_1$ and $K_3$, while keeping that of $J_2$ invariant. Next, let us take the complex conjugate of the entire system. Then both the $J$ and $K$ matrices change their signs. + +### 3.2. Bargmann and Wigner Decompositions + +Since the $Sp(2)$ matrix has three independent parameters, it can be written as [15] + +$$ \begin{pmatrix} \cos(\alpha_1/2) & -\sin(\alpha_1/2) \\ \sin(\alpha_1/2) & \cos(\alpha_1/2) \end{pmatrix} \begin{pmatrix} \cosh\chi & \sinh\chi \\ \sinh\chi & \cosh\chi \end{pmatrix} \begin{pmatrix} \cos(\alpha_2/2) & -\sin(\alpha_2/2) \\ \sin(\alpha_2/2) & \cos(\alpha_2/2) \end{pmatrix} \qquad (47) $$ + +This matrix can be written as + +$$ \begin{pmatrix} \cos(\delta/2) & -\sin(\delta/2) \\ \sin(\delta/2) & \cos(\delta/2) \end{pmatrix} \begin{pmatrix} a & b \\ c & d \end{pmatrix} \begin{pmatrix} \cos(\delta/2) & \sin(\delta/2) \\ -\sin(\delta/2) & \cos(\delta/2) \end{pmatrix} \qquad (48) $$ +---PAGE_BREAK--- + +where + +$$ +\begin{pmatrix} a & b \\ c & d \end{pmatrix} = \begin{pmatrix} \cos(\alpha/2) & -\sin(\alpha/2) \\ \sin(\alpha/2) & \cos(\alpha/2) \end{pmatrix} \begin{pmatrix} \cosh \chi & \sinh \chi \\ \sinh \chi & \cosh \chi \end{pmatrix} \begin{pmatrix} \cos(\alpha/2) & -\sin(\alpha/2) \\ \sin(\alpha/2) & \cos(\alpha/2) \end{pmatrix} \quad (49) +$$ + +with + +$$ +\delta = \frac{1}{2}(\alpha_1 - \alpha_2), \quad \text{and} \quad \alpha = \frac{1}{2}(\alpha_1 + \alpha_2) \tag{50} +$$ + +If we complete the matrix multiplication of Equation (49), the result is + +$$ +\left( +\begin{array}{cc} + (\cosh \chi) \cos \alpha & \sinh \chi - (\cosh \chi) \sin \alpha \\ + \sinh \chi + (\cosh \chi) \sin \alpha & (\cosh \chi) \cos \alpha +\end{array} +\right) +\qquad (51) +$$ + +We shall call hereafter the decomposition of Equation (49) the Bargmann decomposition. This means that every matrix in the Sp(2) group can be brought to the Bargmann decomposition by a similarity transformation of rotation, as given in Equation (48). This decomposition leads to an equidiagonal matrix with two independent parameters. + +For the matrix of Equation (49), we can now consider the following three cases. Let us assume that $\chi$ is positive, and the angle $\theta$ is less than 90°. Let us look at the upper-right element. + +1. If it is negative with $[\sinh\chi < (\cosh\chi)\sin\alpha]$, then the trace of the matrix is smaller than 2, and the matrix can be written as + +$$ +\begin{pmatrix} +\cos(\theta/2) & -e^{-\eta}\sin(\theta/2) \\ +e^{\eta}\sin(\theta/2) & \cos(\theta/2) +\end{pmatrix} +\qquad (52) +$$ + +with + +$$ +\cos(\theta/2) = (\cosh\chi)\cos\alpha, \quad \text{and} \quad e^{-2\eta} = \frac{(\cosh\chi)\sin\alpha - \sinh\chi}{(\cosh\chi)\sin\alpha + \sinh\chi} \tag{53} +$$ + +2. If it is positive with $[\sinh \chi > (\cosh \chi) \sin \alpha]$, then the trace is greater than 2, and the matrix can be written as + +$$ +\begin{pmatrix} +\cosh(\lambda/2) & e^{-\eta} \sinh(\lambda/2) \\ +e^{\eta} \sinh(\lambda/2) & \cosh(\lambda/2) +\end{pmatrix} +\qquad (54) +$$ + +with + +$$ +\cosh(\lambda/2) = (\cosh\chi)\cos\alpha, \quad \text{and} \quad e^{-2\eta} = \frac{\sinh\chi - (\cosh\chi)\sin\alpha}{(\cosh\chi)\sin\alpha + \sinh\chi} \tag{55} +$$ + +3. If it is zero with $[(\sinh \chi = (\cosh \chi) \sin \alpha)]$, then the trace is equal to 2, and the matrix takes the form + +$$ +\begin{pmatrix} +1 & 0 \\ +2 \sinh \chi & 1 +\end{pmatrix} +\qquad (56) +$$ + +The above repeats the mathematics given in Section 2.3. + +Returning to Equations (52) and (53), they can be decomposed into + +$$ +M(\theta, \eta) = \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix} \quad (57) +$$ + +and + +$$ +M(\lambda, \eta) = \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \begin{pmatrix} \cosh(\lambda/2) & \sinh(\lambda/2) \\ \sinh(\lambda/2) & \cos(\lambda/2) \end{pmatrix} \begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix} \quad (58) +$$ + +respectively. In view of the physical examples given in Section 6, we shall call this the “Wigner decomposition.” Unlike the Bargmann decomposition, the Wigner decomposition is in the form of a similarity transformation. +---PAGE_BREAK--- + +We note that both Equations (57) and (58) are written as similarity transformations. Thus + +$$ +[M(\theta, \eta)]^n = \begin{pmatrix} \cos(n\theta/2) & -e^{-\eta} \sin(n\theta/2) \\ e^{\eta} \sin(n\theta/2) & \cos(n\theta/2) \end{pmatrix} \quad (59) +$$ + +$$ +[M(\lambda, \eta)]^n = \begin{pmatrix} \cosh(n\lambda/2) & e^\eta \sinh(n\lambda/2) \\ e^{-\eta} \sinh(n\lambda/2) & \cosh(n\lambda/2) \end{pmatrix} \quad (60) +$$ + +$$ +[M(\gamma)]^n = \begin{pmatrix} 1 & 0 \\ n\gamma & 1 \end{pmatrix} \tag{61} +$$ + +These expressions are useful for studying periodic systems [18]. + +The question is what physics these decompositions describe in the real world. To address this, +we study what the Lorentz group does in the real world, and study isomorphism between the Sp(2) +group and the Lorentz group applicable to the three-dimensional space consisting of one time and +two space coordinates. + +3.3. Isomorphism with the Lorentz Group + +The purpose of this section is to give physical interpretations of the mathematical formulas given in Section 3.2. We will interpret these formulae in terms of the Lorentz transformations which are normally described by four-by-four matrices. For this purpose, it is necessary to establish a correspondence between the two-by-two representation of Section 3.2 and the four-by-four representations of the Lorentz group. + +Let us consider the Minkowskian space-time four-vector + +$$ +(t, z, x, y) \tag{62} +$$ + +where $(t^2 - z^2 - x^2 - y^2)$ remains invariant under Lorentz transformations. The Lorentz group consists of four-by-four matrices performing Lorentz transformations in the Minkowski space. + +In order to give physical interpretations to the three two-by-two matrices given in +Equations (44)–(46), we consider rotations around the *y* axis, boosts along the *x* axis, and boosts +along the *z* axis. The transformation is restricted in the three-dimensional subspace of (*t*, *z*, *x*). It is +then straight-forward to construct those four-by-four transformation matrices where the *y* coordinate +remains invariant. They are given in Table 1. Their generators also given. Those four-by-four generators +satisfy the Lie algebra given in Equation (43). + +**Table 1.** Matrices in the two-by-two representation, and their corresponding four-by-four generators and transformation matrices. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MatricesGeneratorsFour-by-FourTransform matrices
R(θ)J2 = 12 (0
i − i
0)
0   0   0   0
0   0 − i   0
0   i   0   0
0   0   0   0
1   0   0   0
0   cosθ − sinθ   0
0   sinθ   cosθ   0
0   0   0   1
B(η)K3 = 12(i
0 − i)
i
0)
0   i   0   0
i   0   0   0
0   0   0   0
0   0   0   0
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
cosh ηsinh η00
sinh ηcosh η00
0010
0001
S(λ)K1 = 12(0
i i
0)
0   0   i   0
0   0   0   0
i   0   0   0
0   0   0   0
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
cosh λ0sinh λ0
0100
sinh λ0cosh λ0
0001
+ + +---PAGE_BREAK--- + +**4. Internal Space-Time Symmetries** + +We have seen that there corresponds a two-by-two matrix for each four-by-four Lorentz transformation matrix. It is possible to give physical interpretations to those four-by-four matrices. It must thus be possible to attach a physical interpretation to each two-by-two matrix. + +Since 1939 [1] when Wigner introduced the concept of the little groups many papers have been published on this subject, but most of them were based on the four-by-four representation. In this section, we shall give the formalism of little groups in the language of two-by-two matrices. In so doing, we provide physical interpretations to the Bargmann and Wigner decompositions introduced in Section 3.2. + +**4.1. Wigner's Little Groups** + +In [1], Wigner started with a free relativistic particle with momentum, then constructed subgroups of the Lorentz group whose transformations leave the four-momentum invariant. These subgroups thus define the internal space-time symmetry of the given particle. Without loss of generality, we assume that the particle momentum is along the z direction. Thus rotations around the momentum leave the momentum invariant, and this degree of freedom defines the helicity, or the spin parallel to the momentum. + +We shall use the word "Wigner transformation" for the transformation which leaves the four-momentum invariant: + +1. For a massive particle, it is possible to find a Lorentz frame where it is at rest with zero momentum. The four-momentum can be written as $m(1,0,0,0)$, where $m$ is the mass. This four-momentum is invariant under rotations in the three-dimensional $(z, x, y)$ space. + +2. For an imaginary-mass particle, there is the Lorentz frame where the energy component vanishes. The momentum four-vector can be written as $p(0,1,0,0)$, where $p$ is the magnitude of the momentum. + +3. If the particle is massless, its four-momentum becomes $p(1,1,0,0)$. Here the first and second components are equal in magnitude. + +The constant factors in these four-momenta do not play any significant roles. Thus we write them as $(1,0,0,0)$, $(0,1,0,0)$, and $(1,1,0,0)$ respectively. Since Wigner worked with these three specific four-momenta [1], we call them Wigner four-vectors. + +All of these four-vectors are invariant under rotations around the z axis. The rotation matrix is + +$$Z(\phi) = \begin{pmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & \cos\phi & -\sin\phi \\ 0 & 0 & \sin\phi & \cos\phi \end{pmatrix} \quad (63)$$ + +In addition, the four-momentum of a massive particle is invariant under the rotation around the y axis, whose four-by-four matrix was given in Table 1. The four-momentum of an imaginary particle is invariant under the boost matrix $S(\lambda)$ given in Table 1. The problem for the massless particle is more complicated, but will be discussed in detail in Section 7. See Table 2. +---PAGE_BREAK--- + +**Table 2.** Wigner four-vectors and Wigner transformation matrices applicable to two space-like and one time-like dimensions. Each Wigner four-vector remains invariant under the application of its Wigner matrix. + +
MassWigner Four-VectorWigner Transformation
Massive(1, 0, 0, 0)(1 0 0 0)
(0 cos θ - sinθ 0)
(0 sin θ cos θ 0)
(0 0 0 1)
Massless(1, 1, 0, 0)(1 + γ2/2 - γ2/2 γ 0)
2/2 1 - γ2/2 γ 0)
-γ γ 1 0
(0 0 0 1)
Imaginary mass(0, 1, 0, 0)(cosh λ 0 sinh λ 0)
(0 1 0 0)
(sinh λ 0 cosh λ 0)
(0 0 0 1)
+ +## 4.2. Two-by-Two Formulation of Lorentz Transformations + +The Lorentz group is a group of four-by-four matrices performing Lorentz transformations on the Minkowskian vector space of $(t, z, x, y)$, leaving the quantity + +$$t^2 - z^2 - x^2 - y^2 \quad (64)$$ + +invariant. It is possible to perform the same transformation using two-by-two matrices [7,14,19]. In this two-by-two representation, the four-vector is written as + +$$X = \begin{pmatrix} t+z & x-iy \\ x+iy & t-z \end{pmatrix} \quad (65)$$ + +where its determinant is precisely the quantity given in Equation (64) and the Lorentz transformation on this matrix is a determinant-preserving, or unimodular transformation. Let us consider the transformation matrix as [7,19] + +$$G = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix}, \quad \text{and} \quad G^{\dagger} = \begin{pmatrix} \alpha^{*} & \gamma^{*} \\ \beta^{*} & \delta^{*} \end{pmatrix} \quad (66)$$ + +with + +$$\det(G) = 1 \quad (67)$$ + +and the transformation + +$$X' = GXG^{\dagger} \quad (68)$$ + +Since $G$ is not a unitary matrix, Equation (68) not a unitary transformation, but rather we call this the “Hermitian transformation”. Equation (68) can be written as + +$$\begin{pmatrix} t' + z' & x' - iy' \\ x + iy & t' - z' \end{pmatrix} = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix} \begin{pmatrix} t + z & x - iy \\ x + iy & t - z \end{pmatrix} \begin{pmatrix} \alpha^* & \gamma^* \\ \beta^* & \delta^* \end{pmatrix} \quad (69)$$ + +It is still a determinant-preserving unimodular transformation, thus it is possible to write this as a four-by-four transformation matrix applicable to the four-vector $(t,z,x,y)$ [7,14]. + +Since the $G$ matrix starts with four complex numbers and its determinant is one by Equation (67), it has six independent parameters. The group of these $G$ matrices is known to be locally isomorphic +---PAGE_BREAK--- + +to the group of four-by-four matrices performing Lorentz transformations on the four-vector (t, z, x, y). +In other words, for each G matrix there is a corresponding four-by-four Lorentz-transform matrix [7]. + +The matrix G is not a unitary matrix, because its Hermitian conjugate is not always its inverse. +This group has a unitary subgroup called SU(2) and another consisting only of real matrices called +Sp(2). For this later subgroup, it is sufficient to work with the three matrices R(θ), S(λ), and B(η) +given in Equations (44)–(46) respectively. Each of these matrices has its corresponding four-by-four +matrix applicable to the (t, z, x, y). These matrices with their four-by-four counterparts are tabulated in +Table 1. + +The energy-momentum four vector can also be written as a two-by-two matrix. It can be written +as + +$$ +P = \begin{pmatrix} p_0 + p_z & p_x - ip_y \\ p_x + ip_y & p_0 - p_z \end{pmatrix} \tag{70} +$$ + +with + +$$ +\det(P) = p_0^2 - p_x^2 - p_y^2 - p_z^2 \quad (71) +$$ + +which means + +$$ +\det(P) = m^2 \tag{72} +$$ + +where *m* is the particle mass. + +The Lorentz transformation can be written explicitly as + +$$ +P' = GPG^{\dagger} \qquad (73) +$$ + +or + +$$ +\begin{pmatrix} p'_0 + p'_z & p'_x - ip'_y \\ p'_x + ip'_y & E' - p'_z \end{pmatrix} = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix} \begin{pmatrix} p_0 + p_z & p_x - ip_y \\ p_x + ip_y & p_0 - p_z \end{pmatrix} \begin{pmatrix} \alpha^* & \gamma^* \\ \beta^* & \delta^* \end{pmatrix} \quad (74) +$$ + +This is an unimodular transformation, and the mass is a Lorentz-invariant variable. Furthermore, it was shown in [7] that Wigner's little groups for massive, massless, and imaginary-mass particles can be explicitly defined in terms of two-by-two matrices. + +Wigner's little group consists of two-by-two matrices satisfying + +$$ +P = WPW^{\dagger} \tag{75} +$$ + +The two-by-two W matrix is not an identity matrix, but tells about the internal space-time symmetry of a particle with a given energy-momentum four-vector. This aspect was not known when Einstein formulated his special relativity in 1905, hence the internal space-time symmetry was not an issue at that time. We call the two-by-two matrix W the Wigner matrix, and call the condition of Equation (75) the Wigner condition. + +If determinant of W is a positive number, then P is proportional to + +$$ +P = \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} \tag{76} +$$ + +corresponding to a massive particle at rest, while if the determinant is negative, it is proportional to + +$$ +P = \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \tag{77} +$$ +---PAGE_BREAK--- + +corresponding to an imaginary-mass particle moving faster than light along the z direction, with a vanishing energy component. If the determinant is zero, P is + +$$P = \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \tag{78}$$ + +which is proportional to the four-momentum matrix for a massless particle moving along the z direction. + +For all three cases, the matrix of the form + +$$Z(\phi) = \begin{pmatrix} e^{-i\phi/2} & 0 \\ 0 & e^{i\phi/2} \end{pmatrix} \tag{79}$$ + +will satisfy the Wigner condition of Equation (75). This matrix corresponds to rotations around the z axis. + +For the massive particle with the four-momentum of Equation (76), the transformations with the rotation matrix of Equation (44) leave the *P* matrix of Equation (76) invariant. Together with the *Z*(Φ) matrix, this rotation matrix leads to the subgroup consisting of the unitary subset of the *G* matrices. The unitary subset of *G* is SU(2) corresponding to the three-dimensional rotation group dictating the spin of the particle [14]. + +For the massless case, the transformations with the triangular matrix of the form + +$$\begin{pmatrix} 1 & \gamma \\ 0 & 1 \end{pmatrix} \tag{80}$$ + +leave the momentum matrix of Equation (78) invariant. The physics of this matrix has a stormy history, and the variable $\gamma$ leads to a gauge transformation applicable to massless particles [8,9,20,21]. + +For a particle with an imaginary mass, a W matrix of the form of Equation (45) leaves the four-momentum of Equation (77) invariant. + +Table 3 summarizes the transformation matrices for Wigner's little groups for massive, massless, and imaginary-mass particles. Furthermore, in terms of their traces, the matrices given in this subsection can be compared with those given in Section 2.3 for the damped oscillator. The comparisons are given in Table 4. + +Of course, it is a challenging problem to have one expression for all three classes. This problem has been discussed in the literature [12], and the damped oscillator case of Section 2 addresses the continuity problem. + +**Table 3.** Wigner vectors and Wigner matrices in the two-by-two representation. The trace of the matrix tells whether the particle $m^2$ is positive, zero, or negative. + +
Particle MassFour-MomentumTransform MatrixTrace
Massive(10 01)(cos(θ/2) − sin(θ/2)
sin(θ/2) cos(θ/2))
less than 2
Massless(10 00)(10 γ)equal to 2
Imaginary mass(10 0−1)(cosh(λ/2) sinh(λ/2)
sinh(λ/2) cosh(λ/2))
greater than 2
+---PAGE_BREAK--- + +**Table 4.** Damped Oscillators and Space-time Symmetries. Both share Sp(2) as their symmetry group. + +
TraceDamped OscillatorParticle Symmetry
Smaller than 2Oscillation ModeMassive Particles
Equal to 2Transition ModeMassless Particles
Larger than 2Damping ModeImaginary-mass Particles
+ +## 5. Lorentz Completion of Wigner's Little Groups + +So far we have considered transformations applicable only to (t, z, x) space. In order to study the full symmetry, we have to consider rotations around the z axis. As previously stated, when a particle moves along this axis, this rotation defines the helicity of the particle. + +In [1], Wigner worked out the little group of a massive particle at rest. When the particle gains a momentum along the z direction, the single particle can reverse the direction of momentum, the spin, or both. What happens to the internal space-time symmetries is discussed in this section. + +### 5.1. Rotation around the z Axis + +In Section 3, our kinematics was restricted to the two-dimensional space of z and x, and thus includes rotations around the y axis. We now introduce the four-by-four matrix of Equation (63) performing rotations around the z axis. Its corresponding two-by-two matrix was given in Equation (79). Its generator is + +$$J_3 = \frac{1}{2} \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \qquad (81)$$ + +If we introduce this additional matrix for the three generators we used in Sections 3 and 3.2, we end up the closed set of commutation relations + +$$[J_i, J_j] = i\epsilon_{ijk}J_k, \quad [J_i, K_j] = i\epsilon_{ijk}K_k, \quad [K_i, K_j] = -i\epsilon_{ijk}J_k \qquad (82)$$ + +with + +$$J_i = \frac{1}{2}\sigma_i, \quad \text{and} \quad K_i = \frac{i}{2}\sigma_i \qquad (83)$$ + +where $\sigma_i$ are the two-by-two Pauli spin matrices. + +For each of these two-by-two matrices there is a corresponding four-by-four matrix generating Lorentz transformations on the four-dimensional Lorentz group. When these two-by-two matrices are imaginary, the corresponding four-by-four matrices were given in Table 1. If they are real, the corresponding four-by-four matrices were given in Table 5. +---PAGE_BREAK--- + +**Table 5.** Two-by-two and four-by-four generators not included in Table 1. The generators given there and given here constitute the set of six generators for SL(2, c) or of the Lorentz group given in Equation (82). + +
GeneratorTwo-by-TwoFour-by-Four
J312(1 0)
0 -1
(
000
000
00-i
00i
)
J112(0 1)
1 0
(
000
00i
000
0-i0
)
K212(0 1)
-1 0
(
00i
000
0i0
)
+ +This set of commutation relations is known as the Lie algebra for the SL(2, c), namely the group of two-by-two elements with unit determinants. Their elements are complex. This set is also the Lorentz group performing Lorentz transformations on the four-dimensional Minkowski space. + +This set has many useful subgroups. For the group SL(2, c), there is a subgroup consisting only of real matrices, generated by the two-by-two matrices given in Table 1. This three-parameter subgroup is precisely the Sp(2) group we used in Sections 3 and 3.2. Their generators satisfy the Lie algebra given in Equation (43). + +In addition, this group has the following Wigner subgroups governing the internal space-time symmetries of particles in the Lorentz-covariant world [1]: + +1. The $J_i$ matrices form a closed set of commutation relations. The subgroup generated by these Hermitian matrices is SU(2) for electron spins. The corresponding rotation group does not change the four-momentum of the particle at rest. This is Wigner's little group for massive particles. +If the particle is at rest, the two-by-two form of the four-vector is given by Equation (76). The Lorentz transformation generated by $J_3$ takes the form + +$$ \begin{pmatrix} e^{i\phi/2} & 0 \\ 0 & e^{-i\phi/2} \end{pmatrix} \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} \begin{pmatrix} e^{-i\phi/2} & 0 \\ 0 & e^{i\phi/2} \end{pmatrix} = \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} \quad (84) $$ + +Similar computations can be carried out for $J_1$ and $J_2$. + +2. There is another Sp(2) subgroup, generated by $K_1$, $K_2$, and $J_3$. They satisfy the commutation relations + +$$ [K_1, K_2] = -iJ_3, \quad [J_3, K_1] = iK_2, \quad [K_2, J_3] = iK_1. \quad (85) $$ + +The Wigner transformation generated by these two-by-two matrices leave the momentum four-vector of Equation (77) invariant. For instance, the transformation matrix generated by $K_2$ takes the form + +$$ \exp(-i\xi K_2) = \begin{pmatrix} \cosh(\xi/2) & i \sinh(\xi/2) \\ i \sinh(\xi/2) & \cosh(\xi/2) \end{pmatrix} \quad (86) $$ + +and the Wigner transformation takes the form + +$$ \begin{pmatrix} \cosh(\xi/2) & i \sinh(\xi/2) \\ -i \sinh(\xi/2) & \cosh(\xi/2) \end{pmatrix} \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \begin{pmatrix} \cosh(\xi/2) & i \sinh(\xi/2) \\ -i \sinh(\xi/2) & \cosh(\xi/2) \end{pmatrix} = \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \quad (87) $$ + +Computations with $K_2$ and $J_3$ lead to the same result. +---PAGE_BREAK--- + +Since the determinant of the four-momentum matrix is negative, the particle has an imaginary mass. In the language of the four-by-four matrix, the transformation matrices leave the four-momentum of the form (0, 1, 0, 0) invariant. + +3. Furthermore, we can consider the following combinations of the generators: + +$$N_1 = K_1 - J_2 = \begin{pmatrix} 0 & i \\ 0 & 0 \end{pmatrix}, \quad \text{and} \quad N_2 = K_2 + J_1 = \begin{pmatrix} 0 & 1 \\ 0 & 0 \end{pmatrix} \qquad (88)$$ + +Together with $J_3$, they satisfy the following commutation relations. + +$$[N_1, N_2] = 0, \quad [N_1, J_3] = -iN_2, \quad [N_2, J_3] = iN_1 \qquad (89)$$ + +In order to understand this set of commutation relations, we can consider an *x y* coordinate system in a two-dimensional space. Then rotation around the origin is generated by + +$$J_3 = -i \left( x \frac{\partial}{\partial y} - y \frac{\partial}{\partial x} \right) \qquad (90)$$ + +and the two translations are generated by + +$$N_1 = -i \frac{\partial}{\partial x}, \quad \text{and} \quad N_2 = -i \frac{\partial}{\partial y} \qquad (91)$$ + +for the *x* and *y* directions respectively. These operators satisfy the commutations relations given in Equation (89). + +The two-by-two matrices of Equation (88) generate the following transformation matrix. + +$$G(\gamma, \phi) = \exp[-i\gamma(N_1 \cos\phi + N_2 \sin\phi)] = \begin{pmatrix} 1 & \gamma e^{-i\phi} \\ 0 & 1 \end{pmatrix} \qquad (92)$$ + +The two-by-two form for the four-momentum for the massless particle is given by Equation (78). The computation of the Hermitian transformation using this matrix is + +$$\begin{pmatrix} 1 & \gamma e^{-i\phi} \\ 0 & 1 \end{pmatrix} \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \begin{pmatrix} 1 & 0 \\ \gamma e^{i\phi} & 1 \end{pmatrix} = \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \qquad (93)$$ + +confirming that $N_1$ and $N_2$, together with $J_3$, are the generators of the $E(2)$-like little group for massless particles in the two-by-two representation. The transformation that does this in the physical world is described in the following section. + +## 5.2. $E(2)$-Like Symmetry of Massless Particles + +From the four-by-four generators of $K_{1,2}$ and $J_{1,2}$, we can write + +$$N_1 = \begin{pmatrix} 0 & 0 & i & 0 \\ 0 & 0 & i & 0 \\ i & -i & 0 & 0 \\ 0 & 0 & 0 & 0 \end{pmatrix}, \quad \text{and} \quad N_2 = \begin{pmatrix} 0 & 0 & 0 & i \\ 0 & 0 & 0 & i \\ 0 & 0 & 0 & 0 \\ i & -i & 0 & 0 \end{pmatrix} \qquad (94)$$ +---PAGE_BREAK--- + +These matrices lead to the transformation matrix of the form + +$$ +G(\gamma, \phi) = \begin{pmatrix} +1 + \frac{\gamma^2}{2} & -\frac{\gamma^2}{2} & \gamma \cos \phi & \gamma \sin \phi \\ +\frac{\gamma^2}{2} & 1 - \frac{\gamma^2}{2} & \gamma \cos \phi & \gamma \sin \phi \\ +-\gamma \cos \phi & \gamma \cos \phi & 1 & 0 \\ +-\gamma \sin \phi & \gamma \sin \phi & 0 & 1 +\end{pmatrix} \quad (95) +$$ + +This matrix leaves the four-momentum invariant, as we can see from + +$$ +G(\gamma, \phi) \begin{pmatrix} 1 \\ 1 \\ 0 \\ 0 \end{pmatrix} = \begin{pmatrix} 1 \\ 1 \\ 0 \\ 0 \end{pmatrix} \tag{96} +$$ + +When it is applied to the photon four-potential + +$$ +G(\gamma, \phi) \begin{pmatrix} A_0 \\ A_3 \\ A_1 \\ A_2 \end{pmatrix} = \begin{pmatrix} A_0 \\ A_3 \\ A_1 \\ A_2 \end{pmatrix} + \gamma (A_1 \cos \phi + A_2 \sin \phi) \begin{pmatrix} 1 \\ 1 \\ 0 \\ 0 \end{pmatrix} \quad (97) +$$ + +with the Lorentz condition which leads to $A_3 = A_0$ in the zero mass case. Gauge transformations are well known for electromagnetic fields and photons. Thus Wigner's little group leads to gauge transformations. + +In the two-by-two representation, the electromagnetic four-potential takes the form + +$$ +\begin{pmatrix} +2A_0 & A_1 - iA_2 \\ +A_1 + iA_2 & 0 +\end{pmatrix} +\qquad +(98) +$$ + +with the Lorentz condition $A_3 = A_0$. Then the two-by-two form of Equation (97) is + +$$ +\begin{pmatrix} 1 & \gamma e^{-i\phi} \\ 0 & 1 \end{pmatrix} \begin{pmatrix} 2A_0 & A_1 - iA_2 \\ A_1 + iA_2 & 0 \end{pmatrix} \begin{pmatrix} 1 & 0 \\ \gamma e^{i\phi} & 1 \end{pmatrix} \quad (99) +$$ + +which becomes + +$$ +\begin{pmatrix} A_0 & A_1 - iA_2 \\ A_1 + iA_2 & 0 \end{pmatrix} + \begin{pmatrix} 2\gamma (A_1 \cos \phi - A_2 \sin \phi) & 0 \\ 0 & 0 \end{pmatrix} \quad (100) +$$ + +This is the two-by-two equivalent of the gauge transformation given in Equation (97). + +For massless spin-1/2 particles starting with the two-by-two expression of G(γ, φ) given in Equation (92), and considering the spinors + +$$ +u = \begin{pmatrix} 1 \\ 0 \end{pmatrix}, \quad \text{and} \quad v = \begin{pmatrix} 0 \\ 1 \end{pmatrix} \tag{101} +$$ + +for spin-up and spin-down states respectively, + +$$ +Gu = u, \quad \text{and} \quad Gv = v + \gamma e^{-i\phi} u +\quad (102) +$$ + +This means that the spinor $u$ for spin up is invariant under the gauge transformation while $v$ is not. Thus, the polarization of massless spin-1/2 particle, such as neutrinos, is a consequence of the gauge invariance. We shall continue this discussion in Section 7. +---PAGE_BREAK--- + +### 5.3. Boosts along the z Axis + +In Sections 4.1 and 5.1, we studied Wigner transformations for fixed values of the four-momenta. The next question is what happens when the system is boosted along the z direction, with the transformation + +$$ \begin{pmatrix} t' \\ z' \end{pmatrix} = \begin{pmatrix} \cosh \eta & \sinh \eta \\ \sinh \eta & \cosh \eta \end{pmatrix} \begin{pmatrix} t \\ z \end{pmatrix} \qquad (103) $$ + +Then the four-momenta become + +$$ (\cosh \eta, \sinh \eta, 0, 0), \quad (\sinh \eta, \cosh \eta, 0, 0), \quad e^{\eta}(1, 1, 0, 0) \qquad (104) $$ + +respectively for massive, imaginary, and massless particles cases. In the two-by-two representation, the boost matrix is + +$$ \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \qquad (105) $$ + +and the four-momenta of Equation (104) become + +$$ \begin{pmatrix} e^\eta & 0 \\ 0 & e^{-\eta} \end{pmatrix}, \quad \begin{pmatrix} e^\eta & 0 \\ 0 & -e^{-\eta} \end{pmatrix}, \quad \begin{pmatrix} e^\eta & 0 \\ 0 & 0 \end{pmatrix} \qquad (106) $$ + +respectively. These matrices become Equations (76)–(78) respectively when $\eta = 0$. + +We are interested in Lorentz transformations which leave a given non-zero momentum invariant. We can consider a Lorentz boost along the direction preceded and followed by identical rotation matrices, as described in Figure 1 and the transformation matrix as + +$$ \begin{pmatrix} \cos(\alpha/2) & -\sin(\alpha/2) \\ \sin(\alpha/2) & \cos(\alpha/2) \end{pmatrix} \begin{pmatrix} \cosh \chi & -\sinh \chi \\ -\sinh \chi & \cosh \chi \end{pmatrix} \begin{pmatrix} \cos(\alpha/2) & -\sin(\alpha/2) \\ \sin(\alpha/2) & \cos(\alpha/2) \end{pmatrix} \qquad (107) $$ + +which becomes + +$$ \begin{pmatrix} (\cos \alpha) \cosh \chi & -\sinh \chi - (\sin \alpha) \cosh \chi \\ -\sinh \chi + (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi \end{pmatrix} \qquad (108) $$ + +**Figure 1.** Bargmann and Wigner decompositions. (a) Bargmann decomposition; (b) Wigner decomposition. In the Bargmann decomposition, we start from a momentum along the z direction. We can rotate, boost, and rotate to bring the momentum to the original position. The resulting matrix is the product of one boost and two rotation matrices. In the Wigner decomposition, the particle is boosted back to the frame where the Wigner transformation can be applied. Make a Wigner transformation there and come back to the original state of the momentum. This process also can also be written as the product of three simple matrices. +---PAGE_BREAK--- + +Except the sign of $\chi$, the two-by-two matrices of Equations (107) and (108) are identical with those given in Section 3.2. The only difference is the sign of the parameter $\chi$. We are thus ready to interpret this expression in terms of physics. + +1. If the particle is massive, the off-diagonal elements of Equation (108) have opposite signs, and this matrix can be decomposed into + +$$ \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \quad (109) $$ + +with + +$$ \cos(\theta/2) = (\cosh \chi) \cos \alpha, \quad \text{and} \quad e^{2\eta} = \frac{\cosh(\chi) \sin \alpha + \sinh \chi}{\cosh(\chi) \sin \alpha - \sinh \chi} \quad (110) $$ + +and + +$$ e^{2\eta} = \frac{p_0 + p_z}{p_0 - p_z} \quad (111) $$ + +According to Equation (109) the first matrix (far right) reduces the particle momentum to zero. The second matrix rotates the particle without changing the momentum. The third matrix boosts the particle to restore its original momentum. This is the extension of Wigner's original idea to moving particles. + +2. If the particle has an imaginary mass, the off-diagonal elements of Equation (108) have the same sign, + +$$ \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \begin{pmatrix} \cosh(\lambda/2) & -\sinh(\lambda/2) \\ \sinh(\lambda/2) & \cosh(\lambda/2) \end{pmatrix} \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \quad (112) $$ + +with + +$$ \cosh(\lambda/2) = (\cosh \chi) \cos \alpha, \quad \text{and} \quad e^{2\eta} = \frac{\sinh \chi + \cosh(\chi) \sin \alpha}{\cosh(\chi) \sin \alpha - \sinh \chi} \quad (113) $$ + +and + +$$ e^{2\eta} = \frac{p_0 + p_z}{p_0 - p_z} \quad (114) $$ + +This is also a three-step operation. The first matrix brings the particle momentum to the zero-energy state with $p_0 = 0$. Boosts along the x or y direction do not change the four-momentum. We can then boost the particle back to restore its momentum. This operation is also an extension of the Wigner's original little group. Thus, it is quite appropriate to call the formulas of Equations (109) and (112) Wigner decompositions. + +3. If the particle mass is zero with + +$$ \sinh \chi = (\cosh \chi) \sin \alpha \quad (115) $$ + +the $\eta$ parameter becomes infinite, and the Wigner decomposition does not appear to be useful. We can then go back to the Bargmann decomposition of Equation (107). With the condition of Equations (115) and (108) becomes + +$$ \begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix} \quad (116) $$ + +with + +$$ \gamma = 2 \sinh \chi \quad (117) $$ + +The decomposition ending with a triangular matrix is called the Iwasawa decomposition [16,22] and its physical interpretation was given in Section 5.2. The $\gamma$ parameter does not depend on $\eta$. +---PAGE_BREAK--- + +Thus, we have given physical interpretations to the Bargmann and Wigner decompositions given in Section (3.2). Consider what happens when the momentum becomes large. Then $\eta$ becomes large for nonzero mass cases. All three four-momenta in Equation (106) become + +$$e^{\eta} \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \qquad (118)$$ + +As for the Bargmann-Wigner matrices, they become the triangular matrix of Equation (116), with $\gamma = \sin(\theta/2)e^{\eta}$ and $\gamma = \sinh(\lambda/2)e^{\eta}$, respectively for the massive and imaginary-mass cases. + +In Section 5.2, we concluded that the triangular matrix corresponds to gauge transformations. However, particles with imaginary mass are not observed. For massive particles, we can start with the three-dimensional rotation group. The rotation around the z axis is called helicity, and remains invariant under the boost along the z direction. As for the transverse rotations, they become gauge transformation as illustrated in Table 6. + +**Table 6.** Covariance of the energy-momentum relation, and covariance of the internal space-time symmetry. Under the Lorentz boost along the z direction, $J_3$ remains invariant, and this invariant component of the angular momentum is called the helicity. The transverse component $J_1$ and $J_2$ collapse into a gauge transformation. The $\gamma$ parameter for the massless case has been studied in earlier papers in the four-by-four matrix formulation of Wigner's little groups [8,21]. + +
Massive, SlowCovarianceMassless, Fast
$E = p^2/2m$
$J_3$
Einstein's $E = mc^2$$E = cp$
Helicity
$J_1, J_2$Wigner's Little GroupGauge Transformation
+ +### 5.4. Conjugate Transformations + +The most general form of the SL(2, c) matrix is given in Equation (66). Transformation operators for the Lorentz group are given in exponential form as: + +$$D = \exp \left\{ -i \sum_{i=1}^{3} (\theta_i J_i + \eta_i K_i) \right\} \qquad (119)$$ + +where the $J_i$ are the generators of rotations and the $K_i$ are the generators of proper Lorentz boosts. They satisfy the Lie algebra given in Equation (43). This set of commutation relations is invariant under the sign change of the boost generators $K_i$. Thus, we can consider "dot conjugation" defined as + +$$\dot{D} = \exp \left\{ -i \sum_{i=1}^{3} (\theta_i J_i - \eta_i K_i) \right\} \qquad (120)$$ + +Since $K_i$ are anti-Hermitian while $J_i$ are Hermitian, the Hermitian conjugate of the above expression is + +$$D^{\dagger} = \exp \left\{ -i \sum_{i=1}^{3} (-\theta_i J_i + \eta_i K_i) \right\} \qquad (121)$$ + +while the Hermitian conjugate of G is + +$$\dot{D}^{\dagger} = \exp \left\{ -i \sum_{i=1}^{3} (-\theta_i J_i - \eta_i K_i) \right\} \qquad (122)$$ +---PAGE_BREAK--- + +Since we understand the rotation around the z axis, we can now restrict the kinematics to the +zt plane, and work with the Sp(2) symmetry. Then the D matrices can be considered as Bargmann +decompositions. First, D and $\tilde{D}$, and their Hermitian conjugates are + +$$ +D(\alpha, \chi) = \begin{pmatrix} (\cos \alpha) \cosh \chi & \sinh \chi - (\sin \alpha) \cosh \chi \\ \sinh \chi + (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi \end{pmatrix} \quad (123) +$$ + +$$ +\dot{D}(\alpha, \chi) = \begin{pmatrix} (\cos \alpha) \cosh \chi & -\sinh \chi - (\sin \alpha) \cosh \chi \\ -\sinh \chi + (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi \end{pmatrix} \quad (124) +$$ + +These matrices correspond to the "D loops" given in Figure 2a,b respectively. The "dot" conjugation changes the direction of boosts. The dot conjugation leads to the inversion of the space which is called the parity operation. + +We can also consider changing the direction of rotations. Then they result in the Hermitian +conjugates. We can write their matrices as + +$$ +D^{\dagger}(\alpha, \chi) = \begin{pmatrix} (\cos \alpha) \cosh \chi & \sinh \chi + (\sin \alpha) \cosh \chi \\ \sinh \chi - (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi \end{pmatrix} \qquad (125) +$$ + +$$ +\dot{D}^{\dagger}(\alpha, \chi) = \begin{pmatrix} (\cos \alpha) \cosh \chi & -\sinh \chi + (\sin \alpha) \cosh \chi \\ -\sinh \chi - (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi \end{pmatrix} \quad (126) +$$ + +From the exponential expressions from Equation (119) to Equation (122), it is clear that + +$$ +D^{\dagger} = \dot{D}^{-1}, \quad \text{and} \quad \dot{D}^{\dagger} = D^{-1} \tag{127} +$$ + +The D loop given in Figure 1 corresponds to $\dot{D}$. We shall return to these loops in Section 7. + +Figure 2. Four D-loops resulting from the Bargmann decomposition. (a) Bargmann decomposition from Figure 1; (b) Direction of the Lorentz boost is reversed; (c) Direction of rotation is reversed; (d) Both directions are reversed. These operations correspond to the space-inversion, charge conjugation, and the time reversal respectively. +---PAGE_BREAK--- + +## 6. Symmetries Derivable from the Poincaré Sphere + +The Poincaré sphere serves as the basic language for polarization physics. Its underlying language is the two-by-two coherency matrix. This coherency matrix contains the symmetry of SL(2, c) isomorphic to the Lorentz group applicable to three space-like and one time-like dimensions [4,6,7]. + +For polarized light propagating along the z direction, the amplitude ratio and phase difference of electric field x and y components traditionally determine the state of polarization. Hence, the polarization can be changed by adjusting the amplitude ratio or the phase difference or both. Usually, the optical device which changes amplitude is called an "attenuator" (or "amplifier") and the device which changes the relative phase a "phase shifter". + +Let us start with the Jones vector: + +$$ \begin{pmatrix} \psi_1(z,t) \\ \psi_2(z,t) \end{pmatrix} = \begin{pmatrix} a \exp[i(kz - \omega t)] \\ a \exp[i(kz - \omega t)] \end{pmatrix} \qquad (128) $$ + +To this matrix, we can apply the phase shift matrix of Equation (79) which brings the Jones vector to: + +$$ \begin{pmatrix} \psi_1(z,t) \\ \psi_2(z,t) \end{pmatrix} = \begin{pmatrix} a \exp[i(kz - \omega t - i\phi/2)] \\ a \exp[i(kz - \omega t + i\phi/2)] \end{pmatrix} \qquad (129) $$ + +The generator of this phase-shifter is $J_3$ given Table 5. + +The optical beam can be attenuated differently in the two directions. The resulting matrix is: + +$$ e^{-\mu} \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \qquad (130) $$ + +with the attenuation factor of $\exp(-\mu_0 + \eta/2)$ and $\exp(-\mu - \eta/2)$ for the x and y directions respectively. We are interested only in the relative attenuation given in Equation (46) which leads to different amplitudes for the x and y component, and the Jones vector becomes: + +$$ \begin{pmatrix} \psi_1(z,t) \\ \psi_2(z,t) \end{pmatrix} = \begin{pmatrix} ae^{\mu/2} \exp[i(kz - \omega t - i\phi/2)] \\ ae^{-\mu/2} \exp[i(kz - \omega t + i\phi/2)] \end{pmatrix} \qquad (131) $$ + +The squeeze matrix of Equation (46) is generated by $K_3$ given in Table 1. + +The polarization is not always along the x and y axes, but can be rotated around the z axis using Equation (79) generated by $J_2$ given in Table 1. + +Among the rotation angles, the angle of 45° plays an important role in polarization optics. Indeed, if we rotate the squeeze matrix of Equation (46) by 45°, we end up with the squeeze matrix of Equation (45) generated by $K_1$ given also in Table 1. + +Each of these four matrices plays an important role in special relativity, as we discussed in Sections 3.2 and 6. Their respective roles in optics and particle physics are given in Table 7. +---PAGE_BREAK--- + +**Table 7.** Polarization optics and special relativity share the same mathematics. Each matrix has its clear role in both optics and relativity. The determinant of the Stokes or the four-momentum matrix remains invariant under Lorentz transformations. It is interesting to note that the decoherence parameter (least fundamental) in optics corresponds to the (mass)$^2$ (most fundamental) in particle physics. + +
Polarization OpticsTransformation MatrixParticle Symmetry
Phase shift by φ(e-iφ/2 0
0 eiφ/2)
Rotation around z.
Rotation around z(cos(θ/2) - sin(θ/2)
sin(θ/2) cos(θ/2))
Rotation around y.
Squeeze along x and y(eη/2 0
0 e-η/2)
Boost along z.
Squeeze along 45°(cosh(λ/2) sinh(λ/2)
sinh(λ/2) cosh(λ/2))
Boost along x.
a4 (sinξ)2Determinant(mass)2
+ +The most general form for the two-by-two matrix applicable to the Jones vector is the G matrix of Equation (66). This matrix is of course a representation of the SL(2, c) group. It brings the simplest Jones vector of Equation (128) to its most general form. + +## 6.1. Coherency Matrix + +However, the Jones vector alone cannot tell us whether the two components are coherent with each other. In order to address this important degree of freedom, we use the coherency matrix defined as [3,23] + +$$C = \begin{pmatrix} S_{11} & S_{12} \\ S_{21} & S_{22} \end{pmatrix} \qquad (132)$$ + +where + +$$\langle \psi_i^* \psi_j \rangle = \frac{1}{T} \int_0^T \psi_i^* (t + \tau) \psi_j(t) dt \qquad (133)$$ + +where T is a sufficiently long time interval. Then, those four elements become [4] + +$$S_{11} = \langle \psi_1^* \psi_1 \rangle = a^2, \quad S_{12} = \langle \psi_1^* \psi_2 \rangle = a^2 (\cos \xi) e^{-i\phi} \qquad (134)$$ + +$$S_{21} = \langle \psi_2^* \psi_1 \rangle = a^2(\cos\xi)e^{+i\phi}, \quad S_{22} = \langle \psi_2^* \psi_2 \rangle = a^2 \qquad (135)$$ + +The diagonal elements are the absolute values of $\psi_1$ and $\psi_2$ respectively. The angle $\phi$ could be different from the value of the phase-shift angle given in Equation (79), but this difference does not play any role in the reasoning. The off-diagonal elements could be smaller than the product of $\psi_1$ and $\psi_2$, if the two polarizations are not completely coherent. + +The angle $\xi$ specifies the degree of coherency. If it is zero, the system is fully coherent, while the system is totally incoherent if $\xi$ is $90^\circ$. This can therefore be called the “decoherence angle.” + +While the most general form of the transformation applicable to the Jones vector is G of Equation (66), the transformation applicable to the coherency matrix is + +$$C' = G C G^{\dagger} \qquad (136)$$ + +The determinant of the coherency matrix is invariant under this transformation, and it is + +$$\det(C) = a^4 (\sin \xi)^2 \qquad (137)$$ + +Thus, angle $\xi$ remains invariant. In the language of the Lorentz transformation applicable to the four-vector, the determinant is equivalent to the (mass)$^2$ and is therefore a Lorentz-invariant quantity. +---PAGE_BREAK--- + +## 6.2. Two Radii of the Poincaré Sphere + +Let us write explicitly the transformation of Equation (136) as + +$$ \begin{pmatrix} S'_{11} & S'_{12} \\ S'_{21} & S'_{22} \end{pmatrix} = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix} \begin{pmatrix} S_{11} & S_{12} \\ S_{21} & S_{22} \end{pmatrix} \begin{pmatrix} \alpha^* & \gamma^* \\ \beta^* & \delta^* \end{pmatrix} \quad (138) $$ + +It is then possible to construct the following quantities, + +$$ S_0 = \frac{S_{11} + S_{22}}{2}, \qquad S_3 = \frac{S_{11} - S_{22}}{2} \quad (139) $$ + +$$ S_1 = \frac{S_{12} + S_{21}}{2}, \qquad S_2 = \frac{S_{12} - S_{21}}{2i} \quad (140) $$ + +These are known as the Stokes parameters, and constitute a four-vector ($S_0, S_3, S_1, S_2$) under the Lorentz transformation. + +In the Jones vector of Equation (128), the amplitudes of the two orthogonal components are equal. Thus, the two diagonal elements of the coherency matrix are equal. This leads to $S_3 = 0$, and the problem is reduced from the sphere to a circle. In the resulting two-dimensional subspace, we can introduce the polar coordinate system with + +$$ R = \sqrt{S_1^2 + S_2^2} \quad (141) $$ + +$$ S_1 = R \cos \phi \quad (142) $$ + +$$ S_2 = R \sin \phi \quad (143) $$ + +The radius $R$ is the radius of this circle, and is + +$$ R = a^2 \cos \zeta \quad (144) $$ + +The radius $R$ takes its maximum value $S_0$ when $\zeta = 0^\circ$. It decreases as $\zeta$ increases and vanishes when $\zeta = 90^\circ$. This aspect of the radius $R$ is illustrated in Figure 3. + +**Figure 3.** Radius of the Poincaré sphere. The radius $R$ takes its maximum value $S_0$ when the decoherence angle $\zeta$ is zero. It becomes smaller as $\zeta$ increases. It becomes zero when the angle reaches $90^\circ$. +---PAGE_BREAK--- + +In order to see its implications in special relativity, let us go back to the four-momentum matrix of $m(1,0,0,0)$. Its determinant is $m^2$ and remains invariant. Likewise, the determinant of the coherency matrix of Equation (132) should also remain invariant. The determinant in this case is + +$$S_0^2 - R^2 = a^4 \sin^2 \xi \quad (145)$$ + +This quantity remains invariant under the Hermitian transformation of Equation (138), which is a Lorentz transformation as discussed in Sections 3.2 and 6. This aspect is shown on the last row of Table 7. + +The coherency matrix then becomes + +$$C = a^2 \begin{pmatrix} 1 & (\cos \xi)e^{-i\phi} \\ (\cos \xi)e^{i\phi} & 1 \end{pmatrix} \quad (146)$$ + +Since the angle $\phi$ does not play any essential role, we can let $\phi = 0$, and write the coherency matrix as + +$$C = a^2 \begin{pmatrix} 1 & \cos \xi \\ \cos \xi & 1 \end{pmatrix} \quad (147)$$ + +The determinant of the above two-by-two matrix is + +$$a^4 (1 - \cos^2 \xi) = a^4 \sin^2 \xi \quad (148)$$ + +Since the Lorentz transformation leaves the determinant invariant, the change in this $\xi$ variable is not a Lorentz transformation. It is of course possible to construct a larger group in which this variable plays a role in a group transformation [6], but here we are more interested in its role in a particle gaining a mass from zero or the mass becoming zero. + +### 6.3. Extra-Lorentzian Symmetry + +The coherency matrix of Equation (146) can be diagonalized to + +$$a^2 \begin{pmatrix} 1 + \cos \xi & 0 \\ 0 & 1 - \cos \xi \end{pmatrix} \quad (149)$$ + +by a rotation. Let us then go back to the four-momentum matrix of Equation (70). If $p_x = p_y = 0$, and $p_z = p_0 \cos \xi$, we can write this matrix as + +$$p_0 \begin{pmatrix} 1 + \cos \xi & 0 \\ 0 & 1 - \cos \xi \end{pmatrix} \quad (150)$$ + +Thus, with this extra variable, it is possible to study the little groups for variable masses, including the small-mass limit and the zero-mass case. + +For a fixed value of $p_0$, the $(mass)^2$ becomes + +$$(mass)^2 = (p_0 \sin \xi)^2, \quad \text{and} \quad (momentum)^2 = (p_0 \cos \xi)^2 \quad (151)$$ + +resulting in + +$$(energy)^2 = (mass)^2 + (momentum)^2 \quad (152)$$ + +This transition is illustrated in Figure 4. We are interested in reaching a point on the light cone from mass hyperbola while keeping the energy fixed. According to this figure, we do not have to make +---PAGE_BREAK--- + +an excursion to infinite-momentum limit. If the energy is fixed during this process, Equation (152) tells +the mass and momentum relation, and Figure 5 illustrates this relation. + +**Figure 4.** Transition from the massive to massless case. (a) Transition within the framework of the Lorentz group; (b) Transition allowed in the symmetry of the Poincaré sphere. Within the framework of the Lorentz group, it is not possible to go from the massive to massless case directly, because it requires the change in the mass which is a Lorentz-invariant quantity. The only way is to move to infinite momentum and jump from the hyperbola to the light cone, and come back. The extra symmetry of the Poincaré sphere allows a direct transition + +**Figure 5.** Energy-momentum-mass relation. This circle illustrates the case where the energy is fixed, while the mass and momentum are related according to the triangular rule. The value of the angle $\xi$ changes from zero to 180°. The particle mass is negative for negative values of this angle. However, in the Lorentz group, only $(mass)^2$ is a relevant variable, and negative masses might play a role for theoretical purposes. +---PAGE_BREAK--- + +Within the framework of the Lorentz group, it is possible, by making an excursion to infinite momentum where the mass hyperbola coincides with the light cone, to then come back to the desired point. On the other hand, the mass formula of Equation (151) allows us to go there directly. The decoherence mechanism of the coherency matrix makes this possible. + +**7. Small-Mass and Massless Particles** + +We now have a mathematical tool to reduce the mass of a massive particle from its positive value to zero. During this process, the Lorentz-boosted rotation matrix becomes a gauge transformation for the spin-1 particle, as discussed Section 5.2. For spin-1/2 particles, there are two issues. + +1. It was seen in Section 5.2 that the requirement of gauge invariance lead to a polarization of massless spin-1/2 particle, such as neutrinos. What happens to anti-neutrinos? + +2. There are strong experimental indications that neutrinos have a small mass. What happens to the $E(2)$ symmetry? + +**7.1. Spin-1/2 Particles** + +Let us go back to the two-by-two matrices of Section 5.4, and the two-by-two D matrix. For a massive particle, its Wigner decomposition leads to + +$$D = \begin{pmatrix} \cos(\theta/2) & -e^{-\eta} \sin(\theta/2) \\ e^{\eta} \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \qquad (153)$$ + +This matrix is applicable to the spinors *u* and *v* defined in Equation (101) respectively for the spin-up and spin-down states along the *z* direction. + +Since the Lie algebra of SL(2, c) is invariant under the sign change of the Kᵢ matrices, we can consider the “dotted” representation, where the system is boosted in the opposite direction, while the direction of rotations remain the same. Thus, the Wigner decomposition leads to + +$$\tilde{D} = \begin{pmatrix} \cos(\theta/2) & -e^{\eta} \sin(\theta/2) \\ e^{-\eta} \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \qquad (154)$$ + +with its spinors + +$$\dot{u} = \begin{pmatrix} 1 \\ 0 \end{pmatrix}, \quad \text{and} \quad \dot{v} = \begin{pmatrix} 0 \\ 1 \end{pmatrix} \qquad (155)$$ + +For anti-neutrinos, the helicity is reversed but the momentum is unchanged. Thus, $D^\dagger$ is the appropriate matrix. However, $D^\dagger = \tilde{D}^{-1}$ as was noted in Section 5.4. Thus, we shall use $\tilde{D}$ for anti-neutrinos. + +When the particle mass becomes very small, + +$$e^{-\eta} = \frac{m}{2p} \qquad (156)$$ + +becomes small. Thus, if we let + +$$e^{\eta} \sin(\theta/2) = \gamma, \quad \text{and} \quad e^{-\eta} \sin(\theta/2) = \epsilon^2 \qquad (157)$$ + +then the *D* matrix of Equation (153) and the $\tilde{D}$ of Equation (154) become + +$$\begin{pmatrix} 1 - \gamma\epsilon^2/2 & -\epsilon^2 \\ \gamma & 1 - \gamma\epsilon^2 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 - \gamma\epsilon^2/2 & -\gamma \\ \epsilon^2 & 1 - \gamma\epsilon^2 \end{pmatrix} \qquad (158)$$ +---PAGE_BREAK--- + +respectively where $\gamma$ is an independent parameter and + +$$ \epsilon^2 = \gamma \left( \frac{m}{2p} \right)^2 \qquad (159) $$ + +When the particle mass becomes zero, they become + +$$ \begin{pmatrix} 1 & 0 \\ \gamma & 1 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix} \qquad (160) $$ + +respectively, applicable to the spinors $(u, v)$ and $(\hat{u}, \hat{v})$ respectively. + +For neutrinos, + +$$ \begin{pmatrix} 1 & 0 \\ \gamma & 1 \end{pmatrix} \begin{pmatrix} 1 \\ 0 \end{pmatrix} = \begin{pmatrix} 1 \\ \gamma \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 & 0 \\ \gamma & 1 \end{pmatrix} \begin{pmatrix} 0 \\ 1 \end{pmatrix} = \begin{pmatrix} 0 \\ 1 \end{pmatrix} \qquad (161) $$ + +For anti-neutrinos, + +$$ \begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix} \begin{pmatrix} 1 \\ 0 \end{pmatrix} = \begin{pmatrix} 1 \\ 0 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix} \begin{pmatrix} 0 \\ 1 \end{pmatrix} = \begin{pmatrix} -\gamma \\ 1 \end{pmatrix} \qquad (162) $$ + +It was noted in Section 5.2 that the triangular matrices of Equation (160) perform gauge transformations. Thus, for Equations (161) and (162) the requirement of gauge invariance leads to the polarization of neutrinos. The neutrinos are left-handed while the anti-neutrinos are right-handed. Since, however, nature cannot tell the difference between the dotted and undotted representations, the Lorentz group cannot tell which neutrino is right handed. It can say only that the neutrinos and anti-neutrinos are oppositely polarized. + +If the neutrino has a small mass, the gauge invariance is modified to + +$$ \begin{pmatrix} 1 - \gamma e^2/2 & -e^2 \\ \gamma & 1 - \gamma e^2/2 \end{pmatrix} \begin{pmatrix} 0 \\ 1 \end{pmatrix} = \begin{pmatrix} 0 \\ 1 \end{pmatrix} - e^2 \begin{pmatrix} 1 \\ \gamma/2 \end{pmatrix} \qquad (163) $$ + +and + +$$ \begin{pmatrix} 1 - \gamma e^2/2 & -\gamma \\ e^2 & 1 - \gamma e^2 \end{pmatrix} \begin{pmatrix} 1 \\ 0 \end{pmatrix} = \begin{pmatrix} 1 \\ 0 \end{pmatrix} + e^2 \begin{pmatrix} -\gamma/2 \\ 1 \end{pmatrix} \qquad (164) $$ + +respectively for neutrinos and anti-neutrinos. Thus the violation of the gauge invariance in both cases is proportional to $e^2$ which is $m^2/4p^2$. + +## 7.2. Small-Mass Neutrinos in the Real World + +Whether neutrinos have mass or not and the consequences of this relative to the Standard Model and lepton number is the subject of much theoretical speculation [24,25], and of cosmology [26], nuclear reactors [27], and high energy experimentations [28,29]. Neutrinos are fast becoming an important component of the search for dark matter and dark radiation [30]. Their importance within the Standard Model is reflected by the fact that they are the only particles which seem to exist with only one direction of chirality, i.e., only left-handed neutrinos have been confirmed to exist so far. + +It was speculated some time ago that neutrinos in constant electric and magnetic fields would acquire a small mass, and that right-handed neutrinos would be trapped within the interaction field [31]. Solving generalized electroweak models using left- and right-handed neutrinos has been discussed recently [32]. Today these right-handed neutrinos which do not participate in weak interactions are called “sterile” neutrinos [33]. A comprehensive discussion of the place of neutrinos in the scheme of physics has been given by Drewes [30]. We should note also that the three different neutrinos, namely $v_e$, $v_\mu$, and $v_\tau$, may have different masses [34]. +---PAGE_BREAK--- + +**8. Scalars, Four-Vectors, and Four-Tensors** + +In Sections 5 and 7, our primary interest has been the two-by-two matrices applicable to spinors for spin-1/2 particles. Since we also used four-by-four matrices, we indirectly studied the four-component particle consisting of spin-1 and spin-zero components. + +If there are two spin 1/2 states, we are accustomed to construct one spin-zero state, and one spin-one state with three degeneracies. + +In this paper, we are confronted with two spinors, but each spinor can also be dotted. For this reason, there are 16 orthogonal states consisting of spin-one and spin-zero states. How many spin-zero states? How many spin-one states? + +For particles at rest, it is known that the addition of two one-half spins result in spin-zero and spin-one states. In this paper, we have two different spinors behaving differently under the Lorentz boost. Around the z direction, both spinors are transformed by + +$$Z(\phi) = \exp(-i\phi J_3) = \begin{pmatrix} e^{-i\phi/2} & 0 \\ 0 & e^{i\phi/2} \end{pmatrix} \qquad (165)$$ + +However, they are boosted by + +$$B(\eta) = \exp(-i\eta K_3) = \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \qquad (166)$$ + +$$\dot{B}(\eta) = \exp(i\eta K_3) = \begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix} \qquad (167)$$ + +applicable to the undotted and dotted spinors respectively. These two matrices commute with each other, and also with the rotation matrix Z(φ) of Equation (165). Since K₃ and J₃ commute with each other, we can work with the matrix Q(η, φ) defined as + +$$Q(\eta, \phi) = B(\eta)Z(\phi) = \begin{pmatrix} e^{(\eta-i\phi)/2} & 0 \\ 0 & e^{-(\eta-i\phi)/2} \end{pmatrix} \qquad (168)$$ + +$$\dot{Q}(\eta, \phi) = \dot{B}(\eta)\dot{Z}(\phi) = \begin{pmatrix} e^{-(\eta+i\phi)/2} & 0 \\ 0 & e^{(\eta+i\phi)/2} \end{pmatrix} \qquad (169)$$ + +When this combined matrix is applied to the spinors, + +$$Q(\eta, \phi)u = e^{(\eta-i\phi)/2}u, \quad Q(\eta, \phi)v = e^{-(\eta-i\phi)/2}v \qquad (170)$$ + +$$\dot{Q}(\eta, \phi)\dot{u} = e^{-(\eta+i\phi)/2}\dot{u}, \quad \dot{Q}(\eta, \phi)\dot{v} = e^{(\eta+i\phi)/2}\dot{v} \qquad (171)$$ + +If the particle is at rest, we can construct the combinations + +$$uu, \quad \frac{1}{\sqrt{2}}(uv + vu), \quad vv \qquad (172)$$ + +to construct the spin-1 state, and + +$$\frac{1}{\sqrt{2}}(uv - vu) \qquad (173)$$ + +for the spin-zero state. There are four bilinear states. In the SL(2, c) regime, there are two dotted spinors. If we include both dotted and undotted spinors, there are 16 independent bilinear combinations. They are given in Table 8. This table also gives the effect of the operation of Q(η, φ). +---PAGE_BREAK--- + +**Table 8.** Sixteen combinations of the SL(2,c) spinors. In the SU(2) regime, there are two spinors leading to four bilinear forms. In the SL(2,c) world, there are two undotted and two dotted spinors. These four spinors lead to 16 independent bilinear combinations. + +
Spin 1Spin 0
uu, 1√2(uv + vu), vv,1√2(uv − vu)
úú, 1√2(úv + vú), vúv,1√2(úv − vú)
uú, 1√2(uø + vú), vúv,1√2(uø − vú)
úú, 1√2(úv + vú), vúv,1√2(úv − vú)
+ +After the Operation of Q(η, φ) and $\dot{Q}(\eta, \phi)$ + +$$ +\begin{aligned} +e^{-i\phi} e^{\eta} u u, & \quad \frac{1}{\sqrt{2}} (uv + vu), \quad e^{i\phi} e^{-\eta} v v, \quad \frac{1}{\sqrt{2}} (uv - vu) \\ +e^{-i\phi} e^{-\eta} u \dot{u}, & \quad \frac{1}{\sqrt{2}} (\dot{u}v + \dot{v}\dot{u}), \quad e^{i\phi} e^{\eta} \dot{v} \dot{v}, \quad \frac{1}{\sqrt{2}} (\dot{u}\dot{v} - \dot{v}\dot{u}) \\ +e^{-i\phi} u \dot{u}, & \quad \frac{1}{\sqrt{2}} (e^{\eta} u \dot{v} + e^{-\eta} v \dot{u}), \quad e^{i\phi} v \dot{v}, \quad \frac{1}{\sqrt{2}} (e^{\eta} u \dot{v} - e^{-\eta} v \dot{u}) \\ +e^{-i\phi} \dot{u} u, & \quad \frac{1}{\sqrt{2}} (\dot{u}v + \dot{v}u), \quad e^{i\phi} \dot{v} v, \quad \frac{1}{\sqrt{2}} (e^{-\eta} \dot{u} v - e^{\eta} \dot{v} u) +\end{aligned} +$$ + +Among the bilinear combinations given in Table 8, the following two are invariant under rotations and also under boosts. + +$$S = \frac{1}{\sqrt{2}}(uv - vu), \quad \text{and} \quad S = -\frac{1}{\sqrt{2}}(\dot{u}\dot{v} - \dot{v}\dot{u}) \qquad (174)$$ + +They are thus scalars in the Lorentz-covariant world. Are they the same or different? Let us consider the following combinations + +$$S_+ = \frac{1}{\sqrt{2}}(S + \hat{S}), \quad \text{and} \quad S_- = \frac{1}{\sqrt{2}}(S - \hat{S}) \qquad (175)$$ + +Under the dot conjugation, $S_+$ remains invariant, but $S_-$ changes its sign. + +Under the dot conjugation, the boost is performed in the opposite direction. Therefore it is the operation of space inversion, and $S_+$ is a scalar while $S_-$ is called the pseudo-scalar. + +## 8.1. Four-Vectors + +Let us consider the bilinear products of one dotted and one undotted spinor as $u\dot{u}$, $u\dot{v}$, $\dot{u}v$, $v\dot{v}$, and construct the matrix + +$$U = \begin{pmatrix} u\dot{v} & v\dot{v} \\ u\dot{u} & v\dot{u} \end{pmatrix} \qquad (176)$$ + +Under the rotation $Z(\phi)$ and the boost $B(\eta)$ they become + +$$ +\begin{pmatrix} +e^{\eta} u \dot{v} & e^{-i\phi} v \dot{v} \\ +e^{i\phi} u \dot{u} & e^{-\eta} v \dot{u} +\end{pmatrix} +\qquad +(177) +$$ + +Indeed, this matrix is consistent with the transformation properties given in Table 8, and transforms like the four-vector + +$$ +\begin{pmatrix} +t+z & x-iy \\ +x+iy & t-z +\end{pmatrix} +\qquad +(178) +$$ + +This form was given in Equation (65), and played the central role throughout this paper. Under the space inversion, this matrix becomes + +$$ +\begin{pmatrix} +t-z & -(x-iy) \\ +-(x+iy) & t+z +\end{pmatrix} +\qquad +(179) +$$ +---PAGE_BREAK--- + +This space inversion is known as the parity operation. + +The form of Equation (176) for a particle or field with four-components, is given by $(V_0, V_z, V_x, V_y)$. The two-by-two form of this four-vector is + +$$ U = \begin{pmatrix} V_0 + V_z & V_x - iV_y \\ V_x + iV_y & V_0 - V_z \end{pmatrix} \qquad (180) $$ + +If boosted along the z direction, this matrix becomes + +$$ \begin{pmatrix} e^{\eta} (V_0 + V_z) & V_x - iV_y \\ V_x + iV_y & e^{-\eta} (V_0 - V_z) \end{pmatrix} \qquad (181) $$ + +In the mass-zero limit, the four-vector matrix of Equation (181) becomes + +$$ \begin{pmatrix} 2A_0 & A_x - iA_y \\ A_x + iA_y & 0 \end{pmatrix} \qquad (182) $$ + +with the Lorentz condition $A_0 = A_z$. The gauge transformation applicable to the photon four-vector was discussed in detail in Section 5.2. + +Let us go back to the matrix of Equation (180), we can construct another matrix $\dot{U}$. Since the dot conjugation leads to the space inversion, + +$$ \dot{U} = \begin{pmatrix} \dot{u}\nu & \dot{\nu}\nu \\ \dot{u}u & \dot{\nu}u \end{pmatrix} \qquad (183) $$ + +Then + +$$ \dot{u}\nu \approx (t-z), \qquad \dot{\nu}u \approx (t+z) \qquad (184) $$ + +$$ \dot{\nu}\nu \approx -(x-iy), \quad \dot{u}u \approx -(x+iy) \qquad (185) $$ + +where the symbol $\simeq$ means “transforms like”. + +Thus, $U$ of Equation (176) and $\dot{U}$ of Equation (183) used up 8 of the 16 bilinear forms. Since there are two bilinear forms in the scalar and pseudo-scalar as given in Equation (175), we have to give interpretations to the six remaining bilinear forms. + +## 8.2. Second-Rank Tensor + +In this subsection, we are studying bilinear forms with both spinors dotted and undotted. In Section 8.1, each bilinear spinor consisted of one dotted and one undotted spinor. There are also bilinear spinors which are both dotted or both undotted. We are interested in two sets of three quantities satisfying the $O(3)$ symmetry. They should therefore transform like + +$$ (\overline{x+iy})/\sqrt{2}, \quad (\overline{x-iy})/\sqrt{2}, \quad z \qquad (186) $$ + +which are like + +$$ uu, \quad vv, \quad (\overline{uv} + \overline{vu})/\sqrt{2} \qquad (187) $$ + +respectively in the $O(3)$ regime. Since the dot conjugation is the parity operation, they are like + +$$ -\dot{u}\dot{u}, \quad -\dot{\nu}\dot{\nu}, \quad -(\overline{\dot{u}\dot{\nu}} + \overline{\dot{\nu}\dot{u}})/\sqrt{2} \qquad (188) $$ + +In other words, + +$$ (\overline{uu}) = -\dot{u}\dot{u}, \quad \text{and} \quad (\overline{vv}) = -\dot{\nu}\dot{\nu} \qquad (189) $$ +---PAGE_BREAK--- + +We noticed a similar sign change in Equation (184). + +In order to construct the z component in this $O(3)$ space, let us first consider + +$$f_z = \frac{1}{2} [(uv + vu) - (\dot{u}\dot{v} + \dot{v}\dot{u})], \quad g_z = \frac{1}{2i} [(uv + vu) + (\dot{u}\dot{v} + \dot{v}\dot{u})] \qquad (190)$$ + +where $f_z$ and $g_z$ are respectively symmetric and anti-symmetric under the dot conjugation or the parity operation. These quantities are invariant under the boost along the z direction. They are also invariant under rotations around this axis, but they are not invariant under boost along or rotations around the x or y axis. They are different from the scalars given in Equation (174). + +Next, in order to construct the x and y components, we start with $g_\pm$ as + +$$f_+ = \frac{1}{\sqrt{2}} (uu - \dot{u}\dot{u}) \qquad g_+ = \frac{1}{\sqrt{2}i} (uu + \dot{u}\dot{u}) \qquad (191)$$ + +$$f_- = \frac{1}{\sqrt{2}} (vv - \dot{v}\dot{v}) \qquad g_- = \frac{1}{\sqrt{2}i} (vv + \dot{v}\dot{v}) \qquad (192)$$ + +Then + +$$f_x = \frac{1}{\sqrt{2}} (f_+ + f_-) = \frac{1}{2} [(uu - \dot{u}\dot{u}) + (vv - \dot{v}\dot{v})] \qquad (193)$$ + +$$f_y = \frac{1}{\sqrt{2}i} (f_+ - f_-) = \frac{1}{2i} [(uu - \dot{u}\dot{u}) - (vv - \dot{v}\dot{v})] \qquad (194)$$ + +and + +$$g_x = \frac{1}{\sqrt{2}} (g_+ + g_-) = \frac{1}{2i} [(uu + \dot{u}\dot{u}) + (vv + \dot{v}\dot{v})] \qquad (195)$$ + +$$g_y = \frac{1}{\sqrt{2}i} (g_+ - g_-) = -\frac{1}{2} [(uu + \dot{u}\dot{u}) - (vv + \dot{v}\dot{v})] \qquad (196)$$ + +Here $f_x$ and $f_y$ are symmetric under dot conjugation, while $g_x$ and $g_y$ are anti-symmetric. + +Furthermore, $f_z$, $f_x$, and $f_y$ of Equations (190) and (193) transform like a three-dimensional vector. The same can be said for $g_i$ of Equations (190) and (195). Thus, they can be grouped into the second-rank tensor + +$$T = \begin{pmatrix} +0 & -g_z & -g_x & -g_y \\ +g_z & 0 & -f_y & f_x \\ +g_x & f_y & 0 & -f_z \\ +g_y & -f_x & f_z & 0 +\end{pmatrix} \qquad (197)$$ + +whose Lorentz-transformation properties are well known. The $g_i$ components change their signs under space inversion, while the $f_i$ components remain invariant. They are like the electric and magnetic fields respectively. + +If the system is Lorentz-booted, $f_i$ and $g_i$ can be computed from Table 8. We are now interested in the symmetry of photons by taking the massless limit. According to the procedure developed in Section 6, we can keep only the terms which become larger for larger values of $\eta$. Thus, + +$$f_x \rightarrow \frac{1}{2}(uu - \dot{u}\dot{v}), \qquad f_y \rightarrow \frac{1}{2i}(uu + \dot{u}\dot{v}) \qquad (198)$$ + +$$g_x \rightarrow \frac{1}{2i}(uu + \dot{u}\dot{v}), \qquad g_y \rightarrow -\frac{1}{2}(uu - \dot{u}\dot{v}) \qquad (199)$$ + +in the massless limit. +---PAGE_BREAK--- + +Then the tensor of Equation (197) becomes + +$$ +F = \begin{pmatrix} +0 & 0 & -E_x & -E_y \\ +0 & 0 & -B_y & B_x \\ +E_x & B_y & 0 & 0 \\ +E_y & -B_x & 0 & 0 +\end{pmatrix} \tag{200} +$$ + +with + +$$ +B_x \approx \frac{1}{2} (uu - \bar{u}\bar{v}), \quad B_y \approx \frac{1}{2i} (uu + \bar{u}\bar{v}) \qquad (201) +$$ + +$$ +E_x = \frac{1}{2i} (uu + \bar{u}\bar{v}), \quad E_y = -\frac{1}{2} (uu - \bar{u}\bar{v}) \tag{202} +$$ + +The electric and magnetic field components are perpendicular to each other. Furthermore, + +$$ +E_x = B_y, \quad E_y = -B_x \tag{203} +$$ + +In order to address this question, let us go back to Equation (191). In the massless limit, + +$$ +B_+ \approx E_+ \approx uu, \quad B_- \approx E_- \approx \bar{u}\bar{v} \tag{204} +$$ + +The gauge transformation applicable to $u$ and $\dot{v}$ are the two-by-two matrices + +$$ +\begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 & 0 \\ -\gamma & 1 \end{pmatrix} \tag{205} +$$ + +respectively as noted in Sections 5.2 and 7.1. Both $u$ and $\bar{u}$ are invariant under gauge transformations, while $i\dot{u}$ and $\bar{i}\dot{\bar{u}}$ do not. + +The $B_+$ and $E_+$ are for the photon spin along the $z$ direction, while $B_-$ and $E_-$ are for the opposite direction. In 1964 [35], Weinberg constructed gauge-invariant state vectors for massless particles starting from Wigner’s 1939 paper [1]. The bilinear spinors $uu$ and $\bar{u}\bar{v}$ correspond to Weinberg’s state vectors. + +8.3. Possible Symmetry of the Higgs Mechanism + +In this section, we discussed how the two-by-two formalism of the group SL(2, c) leads the scalar, four-vector, and tensor representations of the Lorentz group. We discussed in detail how the four-vector for a massive particle can be decomposed into the symmetry of a two-component massless particle and one gauge degree of freedom. This aspect was studied in detail by Kim and Wigner [20,21], and their results are illustrated in Figure 6. This decomposition is known in the literature as the group contraction. + +The four-dimensional Lorentz group can be contracted to the Euclidean and cylindrical groups. These contraction processes could transform a four-component massive vector meson into a massless spin-one particle with two spin components, and one gauge degree of freedom. + +Since this contraction procedure is spelled out detail in [21], as well as in the present paper, its reverse process is also well understood. We start with one two-component massless particle with one gauge degree of freedom, and end up with a massive vector meson with its four components. + +The mathematics of this process is not unlike the Higgs mechanism [36,37], where one massless field with two degrees of freedom absorbs one gauge degree freedom to become a quartet of bosons, namely that of W, Z± plus the Higgs boson. As is well known, this mechanism is the basis for the theory of electro-weak interaction formulated by Weinberg and Salam [38,39]. +---PAGE_BREAK--- + +**Figure 6.** Contractions of the three-dimensional rotation group. (a) Contraction in terms of the tangential plane and the tangential cylinder [20]; (b) Contraction in terms of the expansion and contraction of the longitudinal axis [21]. In both cases, the symmetry ends up with one rotation around the longitudinal direction and one translational degree along the longitudinal axis. The rotation and translation corresponds to the helicity and gauge degrees of freedom. + +The word “spontaneous symmetry breaking” is used for the Higgs mechanism. It could be an interesting problem to see that this symmetry breaking for the two Higgs doublet model can be formulated in terms of the Lorentz group and its contractions. In this connection, we note an interesting recent paper by Dée and Ivanov [40]. + +# 9. Conclusions + +The damped harmonic oscillator, Wigner's little groups, and the Poincaré sphere belong to the three different branches of physics. In this paper, it was noted that they are based on the same mathematical framework, namely the algebra of two-by-two matrices. + +The second-order differential equation for damped harmonic oscillators can be formulated in terms of two-by-two matrices. These matrices produce the algebra of the group $Sp(2)$. While there are three trace classes of the two-by-two matrices of this group, the damped oscillator tells us how to make transitions from one class to another. + +It is shown that Wigner's three little groups can be defined in terms of the trace classes of the $Sp(2)$ group. If the trace is smaller than two, the little group is for massive particles. If greater than two, the little group is for imaginary-mass particles. If the trace is equal to two, the little group is for massless particles. Thus, the damped harmonic oscillator provides a procedure for transition from one little group to another. + +The Poincaré sphere contains the symmetry of the six-parameter $SL(2, c)$ group. Thus, the sphere provides the procedure for extending the symmetry of the little group defined within the Lorentz group of three-dimensional Minkowski space to its full Lorentz group in the four-dimensional space-time. In addition, the Poincaré sphere offers the variable which allows us to change the symmetry of a massive particle to that of a massless particle by continuously decreasing the mass. + +In this paper, we extracted the mathematical properties of Wigner's little groups from the damped harmonic oscillator and the Poincaré sphere. In so doing, we have shown that the transition from one little group to another is tangentially continuous. + +This subject was initiated by İnönü and Wigner in 1953 as the group contraction [41]. In their paper, they discussed the contraction of the three-dimensional rotation group becoming contracted to the two-dimensional Euclidean group with one rotational and two translational degrees of freedom. While the $O(3)$ rotation group can be illustrated by a three-dimensional sphere, the plane tangential at +---PAGE_BREAK--- + +the north pole is for the $E(2)$ Euclidean group. However, we can also consider a cylinder tangential at the equatorial belt. The resulting cylindrical group is isomorphic to the Euclidean group [20]. While the rotational degree of freedom of this cylinder is for the photon spin, the up and down translations on the surface of the cylinder correspond to the gauge degree of freedom of the photon, as illustrated in Figure 6. + +It was noted also that the Bargmann decomposition of two-by-two matrices, as illustrated in Figure 1 and Figure 2, allows us to study more detailed properties of the little groups, including space and time reflection reflection properties. Also in this paper, we have discussed how the scalars, four-vectors, and four-tensors can be constructed from the two-by-two representation in the Lorentz-covariant world. + +In addition, it should be noted that the symmetry of the Lorentz group is also contained in the squeezed state of light [14] and the ABCD matrix for optical beam transfers [18]. We also mentioned the possibility of understanding the mathematics of the Higgs mechanism in terms of the Lorentz group and its contractions. + +## Acknowledgements + +In his 1939 paper [1], Wigner worked out the subgroups of the Lorentz group whose transformations leave the four momentum of a given particle invariant. In so doing, he worked out their internal space-time symmetries. In spite of its importance, this paper remains as one of the most difficult papers to understand. Wigner was eager to make his paper understandable to younger physicists. + +While he was the pioneer in introducing the mathematics of group theory to physics, he was also quite fond of using two-by-two matrices to explain group theoretical ideas. He asked one of the present authors (Young S. Kim) to rewrite his 1939 paper [1] using the language of those matrices. This is precisely what we did in the present paper. + +We are grateful to Eugene Paul Wigner for this valuable suggestion. + +## Author Contributions + +This paper is largely based on the earlier papers by Young S. Kim and Marilyn E. Noz, and those by Sibel Başkal and Young S. Kim. The two-by-two formulation of the damped oscillator in Section 2 was jointly developed by Sibel Başkal and Young S. Kim during the summer of 2012. Marilyn E. Noz developed the idea of the symmetry of small-mass neutrinos in Section 7. The limiting process in the symmetry of the Poincaré sphere was formulated by Young S. Kim. Sibel Başkal initially constructed the four-by-four tensor representation in Section 8. + +The initial organization of this paper was conceived by Young S. Kim in his attempt to follow Wigner's suggestion to translate his 1939 paper into the language of two-by-two matrices. Sibel Başkal and Marilyn E. Noz tightened the organization and filled in the details. + +## Conflicts of Interest + +The authors declare no conflicts of interest. + +## References + +1. Wigner, E. On unitary representations of the inhomogeneous Lorentz Group. *Ann. Math.* **1939**, *40*, 149–204. +2. Han, D.; Kim, Y.S.; Son, D. Eulerian parametrization of Wigner little groups and gauge transformations in terms of rotations in 2-component spinors. *J. Math. Phys.* **1986**, *27*, 2228–2235. +3. Born, M.; Wolf, E. *Principles of Optics*, 6th ed.; Pergamon: Oxford, UK, 1980. +---PAGE_BREAK--- + +4. Han, D.; Kim, Y.S.; Noz, M.E. Stokes parameters as a Minkowskian four-vector. Phys. Rev. E **1997**, 56, 6065-6076. + +5. Brosseau, C. *Fundamentals of Polarized Light: A Statistical Optics Approach*; John Wiley: New York, NY, USA, 1998. + +6. Başkal, S.; Kim, Y.S. De Sitter group as a symmetry for optical decoherence. J. Phys. A **2006**, 39, 7775-7788. + +7. Kim, Y.S.; Noz, M.E. Symmetries shared by the Poincaré Group and the Poincaré Sphere. *Symmetry* **2013**, *5*, 233–252. + +8. Han, D.; Kim, Y.S.; Son, D. E(2)-like little group for massless particles and polarization of neutrinos. Phys. Rev. D **1982**, *26*, 3717–3725. + +9. Han, D.; Kim, Y.S.; Son, D. Photons, neutrinos and gauge transformations. Am. J. Phys. **1986**, *54*, 818–821. + +10. Başkal, S.; Kim, Y.S. Little groups and Maxwell-type tensors for massive and massless particles. Europhys. Lett. **1997**, *40*, 375–380. + +11. Leggett, A.; Chakravarty, S.; Dorsey, A.; Fisher, M.; Garg, A.; Zwerger, W. Dynamics of the dissipative 2-state system. Rev. Mod. Phys. **1987**, *59*, 1–85. + +12. Başkal, S.; Kim, Y.S. One analytic form for four branches of the ABCD matrix. J. Mod. Opt. **2010**, *57*, 1251–1259. + +13. Başkal, S.; Kim, Y.S. Lens optics and the continuity problems of the ABCD matrix. J. Mod. Opt. **2014**, *61*, 161–166. + +14. Kim, Y.S.; Noz, M.E. *Theory and Applications of the Poincaré Group*; Reidel: Dordrecht, The Netherlands, 1986. + +15. Bargmann, V. Irreducible unitary representations of the Lorentz group. Ann. Math. **1947**, *48*, 568–640. + +16. Iwasawa, K. On some types of topological groups. Ann. Math. **1949**, *50*, 507–558. + +17. Guillemin, V.; Sternberg, S. *Symplectic Techniques in Physics*; Cambridge University Press: Cambridge, UK, 1984. + +18. Başkal, S.; Kim, Y.S. Lorentz Group in Ray and Polarization Optics. In *Mathematical Optics: Classical, Quantum and Computational Methods; Lakshminarayanan, V., Calvo, M.L., Alieva, T., Eds.*; CRC Taylor and Francis: New York, NY, USA, 2013; Chapter 9, pp. 303–340. + +19. Naimark, M.A. *Linear Representations of the Lorentz Group*; Pergamon: Oxford, UK, 1964. + +20. Kim, Y.S.; Wigner, E.P. Cylindrical group and massless particles. J. Math. Phys. **1987**, *28*, 1175–1179. + +21. Kim, Y.S.; Wigner, E.P. Space-time geometry of relativistic particles. J. Math. Phys. **1990**, *31*, 55–60. + +22. Georgieva, E.; Kim, Y.S. Iwasawa effects in multilayer optics. Phys. Rev. E **2001**, *64*, doi:10.1103/PhysRevE.64.026602. + +23. Saleh, B.E.A.; Teich, M.C. *Fundamentals of Photonics*, 2nd ed.; John Wiley: Hoboken, NJ, USA, 2007. + +24. Papoulias, D.K.; Kosmas, T.S. Exotic Lepton Flavour Violating Processes in the Presence of Nuclei. J. Phys.: Conf. Ser. **2013**, *410*, 012123:1–012123:5. + +25. Dinh, D.N.; Petcov, S.T.; Sasao, N.; Tanaka, M.; Yoshimura, M. Observables in neutrino mass spectroscopy using atoms. Phys. Lett. B **2013**, *719*, 154–163. + +26. Miramonti, L.; Antonelli, V. Advancements in Solar Neutrino physics. Int. J. Mod. Phys. E **2013**, *22*, 1–16. + +27. Li, Y.-F.; Cao, J.; Jun, Y.; Wang, Y.; Zhan, L. Unambiguous determination of the neutrino mass hierarchy using reactor neutrinos. Phys. Rev. D **2013**, *88*, 013008:1–013008:9. + +28. Bergstrom, J. Combining and comparing neutrinoless double beta decay experiments using different 584 nuclei. J. High Energy Phys. **2013**, *02*, 093:1–093:27. + +29. Han, T.; Lewis, I.; Ruiz, R.; Si, Z.-G. Lepton number violation and $W'$ chiral couplings at the LHC. Phys. Rev. D **2013**, *87*, 035011:1–035011:25. + +30. Drewes, M. The phenomenology of right handed neutrinos. Int. J. Mod. Phys. E **2013**, *22*, 1330019:1–1330019:75. + +31. Barut, A.O.; McEwan, J. The four states of the massless neutrino with pauli coupling by spin-gauge invariance. Lett. Math. Phys. **1986**, *11*, 67–72. + +32. Palcu, A. Neutrino Mass as a consequence of the exact solution of 3-3-1 gauge models without exotic electric charges. Mod. Phys. Lett. A **2006**, *21*, 1203–1217. + +33. Bilenky, S.M. Neutrino. Phys. Part. Nucl. **2013**, *44*, 1–46. + +34. Alhendi, H. A.; Lashin, E. I.; Mudlej, A. A. Textures with two traceless submatrices of the neutrino mass matrix. Phys. Rev. D **2008**, *77*, 013009:1–013009:1–13. + +35. Weinberg, S. Photons and gravitons in S-Matrix theory: Derivation of charge conservation and equality of gravitational and inertial mass. Phys. Rev. **1964**, *135*, B1049-B1056. + +36. Higgs, P.W. Broken symmetries and the masses of gauge bosons. Phys. Rev. Lett. **1964**, *13*, 508-509. + +Symmetry **2014**, *6*, 473–515 +---PAGE_BREAK--- + +37. Guralnik, G.S.; Hagen, C.R.; Kibble, T.W.B. Global conservation laws and massless particles. Phys. Rev. Lett. **1964**, *13*, 585–587. + +38. Weinberg, S. A model of leptons. Phys. Rev. Lett. **1967**, *19*, 1265–1266. + +39. Weinberg, S. *Quantum Theory of Fields, Volume II, Modern Applications*; Cambridge University Press: Cambridge, UK, 1996. + +40. Dée, A.; Ivanov, I.P. Higgs boson masses of the general two-Higgs-doublet model in the Minkowski-space formalism. Phys. Rev. D **2010**, *81*, 015012:1–015012:8. + +41. Inönü, E.; Wigner, E.P. On the contraction of groups and their representations. Proc. Natl. Acad. Sci. USA **1953**, *39*, 510–524. + +© 2014 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + + +---PAGE_BREAK--- + +# Chapter 2: +## Harmonic Oscillators in Modern Physics +---PAGE_BREAK--- + + +---PAGE_BREAK--- + +Article + +# Analytical Solutions of Temporal Evolution of Populations in Optically-Pumped Atoms with Circularly Polarized Light + +Heung-Ryoul Noh + +Department of Physics, Chonnam National University, Gwangju 500-757, Korea; hrnoh@chonnam.ac.kr; +Tel.: +82-62-530-3366 + +Academic Editor: Young Suh Kim + +Received: 10 December 2015; Accepted: 14 March 2016; Published: 19 March 2016 + +**Abstract:** We present an analytical calculation of temporal evolution of populations for optically pumped atoms under the influence of weak, circularly polarized light. The differential equations for the populations of magnetic sublevels in the excited state, derived from rate equations, are expressed in the form of inhomogeneous second-order differential equations with constant coefficients. We present a general method of analytically solving these differential equations, and obtain explicit analytical forms of the populations of the ground state at the lowest order in the saturation parameter. The obtained populations can be used to calculate lineshapes in various laser spectroscopies, considering transit time relaxation. + +**Keywords:** second-order differential equations; optical pumping; analytical solutions + +**PACS:** 02.30.Hq; 32.80.Xx; 32.30.-r + +## 1. Introduction + +When an atom is illuminated by single-mode laser light, the populations of the magnetic sublevels and coherences between them exhibit complicated temporal variations. This phenomenon is called optical pumping, which is widely used in the preparation of internal atomic states of interest [1,2]. It has recently been observed that optical pumping affects the lineshapes in saturated absorption spectroscopy (SAS) [3], electromagnetically induced transparency (EIT) [4], and absorption of cold atoms with a Λ-type three-level scheme [5]. Nonlinear effects in optical pumping have also been investigated [6,7]. + +The temporal dynamics of the internal states of an atom are accurately described by density matrix equations [8,9]. In some special cases, however, a simpler method can be employed to solve for the dynamics of the internal states of the atom, using rate equations [10,11]. Furthermore, when the intensity of light is weak, the rate equations can be solved analytically [12–15]. These analytical solutions are practically very useful; once they are obtained, it is readily possible to obtain analytically computed quantities such as the absorption coefficient of a probe beam and lineshape functions in nonlinear laser spectroscopy. We have previously reported analytical solutions for SAS [16,17] and polarization spectroscopy (PS) [18]. + +Interestingly, the equations governing the temporal dynamics of populations at the weak intensity limit are homogeneous or inhomogeneous second-order linear differential equations (DEs) with constant coefficients [12–15]. Unlike the harmonic oscillator in mechanics, where under- or over-damped motions are observed [19], the equations for optical pumping show only over-damped behaviors. However, this system exhibits a variety of inhomogeneous DEs. In a recent publication, we reported the method of solving these equations analytically, in the context of a pedagogical +---PAGE_BREAK--- + +description of the method of solving inhomogeneous DEs [15]. Although the method is straightforward in principle, it is not easy to obtain analytical solutions for complicated atomic structures, such as Cs. Extending the previous study [15], in this paper, we present a general method of analytically solving the DEs for such a complicated atom. + +## 2. Theory + +The energy level diagram under consideration is shown in Figure 1. Since alkali-metal atoms are considered, there are two ground states with $F_g = I + 1/2$ and $F_g = I - 1/2$ ($I$: nuclear spin angular momentum quantum number). We consider a $\sigma^+$ polarized weak laser beam, whose Rabi frequency is $\Omega$ and optical frequency is $\omega = \omega_0 + \delta$ ($\omega_0$ is the resonance frequency and $\delta$ is the laser frequency detuning). We assume that the laser frequency is tuned to the transition from one of the two ground states (in Figure 1, the state $F_g = I + 1/2$). Then, the other ground state (in Figure 1, the state $F_g = I - 1/2$) is not excited by laser light, and can be populated by spontaneous emission from the excited state when the optical transition is not cycling. The populations (and the states themselves) of the magnetic sublevels in the excited, upper ground, and lower ground states are labeled, respectively, as $g_{ir}$, $f_{ir}$ and $h_i$ with $i = 1, 2, \dots$. + +Figure 1. An energy level diagram for an optically pumped atom under the influence of circularly polarized light. + +The internal dynamics of the atom can be described by the density matrix equation in the frame rotating with frequency $\omega$: + +$$ \dot{\rho} = -(i/\hbar)[H, \rho] + \dot{\rho}_{\text{sp}} \quad (1) $$ + +where $\rho$ is the density operator. In Equation (1), the Hamiltonian, $H$, is given by + +$$ H = -\sum_j \hbar \delta |g_j\rangle \langle g_j| - \sum_j \hbar \Delta_g |h_j\rangle \langle h_j| - \frac{\hbar \Omega}{2} \sum_j C_j^{\dagger} |g_j\rangle \langle f_j| + \text{h.c.}, \quad (2) $$ + +where $\Delta_g$ is the hyperfine splitting between the two ground states and h.c. denotes the harmonic conjugate. In Equation (2), the first two terms in the right-hand side represent the bare atomic Hamiltonian and the rest terms denote the atom-photon interaction Hamiltonian [20]. $C_j^\dagger$ is the normalized transition strength between the states $f_i$ and $g_{j'}$, and $R_i^j \equiv (C_i^j)^2$ is given below (Equation (13)). In Equation (1), $\dot{\rho}_{\text{sp}}$ represents spontaneous emission term, whose matrix representations are given by: + +$$ \begin{align} \langle g_i | \dot{\rho}_{\text{sp}} | g_j \rangle &= -\Gamma \langle g_i | \rho | g_j \rangle, \\ \langle g_i | \dot{\rho}_{\text{sp}} | f_j \rangle &= -\frac{\Gamma}{2} \langle g_i | \rho | f_j \rangle, \quad \langle g_i | \dot{\rho}_{\text{sp}} | h_j \rangle = -\frac{\Gamma}{2} \langle g_i | \rho | h_j \rangle, \\ \langle f_i | \dot{\rho}_{\text{sp}} | f_j \rangle &= \Gamma \sum_{\epsilon=-2}^{0} C_i^{i+\epsilon} C_j^{j+\epsilon} \langle g_{i+\epsilon} | \rho | g_{j+\epsilon} \rangle, \\ \langle h_i | \dot{\rho}_{\text{sp}} | h_j \rangle &= \Gamma \sum_{\epsilon=-2}^{0} D_i^{i+\epsilon} D_j^{j+\epsilon} \langle g_{i+\epsilon} | \rho | g_{j+\epsilon} \rangle, \end{align} \quad (3) $$ +---PAGE_BREAK--- + +and $\langle \mu | \dot{\rho}_{sp} | v \rangle = \langle v | \dot{\rho}_{sp} | \mu \rangle^*$ when $\mu \neq v$, where $\Gamma$ is the decay rate of the excited state. $D_i^j$ is the normalized transition strength between the states $h_i$ and $g_j$, and $T_i^j = (D_i^j)^2$ is also given below (Equation (13)). Inserting Equations (2) and (3) into Equation (1), we can obtain the following differential equations for the optical coherences and populations: + +$$ \langle g_i | \dot{\rho} | f_i \rangle = \left(i\delta - \frac{\Gamma}{2}\right) \langle g_i | \rho | f_i \rangle + \frac{i}{2} C_i^{\dagger} \Omega (g_i - f_i), \quad (4) $$ + +$$ \dot{g}_i = -\Gamma g_i + \frac{i}{2} C_i^{\dagger} \Omega (\langle g_i | \rho | f_i \rangle - \langle f_i | \rho | g_i \rangle), \quad (5) $$ + +$$ \dot{f}_i = \Gamma \sum_{j=i-2}^{i} (C_i^{\dagger})^2 g_j - \frac{i}{2} C_i^{\dagger} \Omega (\langle g_i | \rho | f_i \rangle - \langle f_i | \rho | g_i \rangle), \quad (6) $$ + +$$ h_i = \Gamma \sum_{j=i-2}^{i} (D_i^j)^2 g_j, \quad (7) $$ + +where we use simplified expressions for the populations: $\langle g_i | \rho | g_i \rangle = g_i$, $\langle f_i | \rho | f_i \rangle = f_i$, and $\langle h_i | \rho | h_i \rangle = h_i$. In Equations (4)–(7), we assume that $\langle g_i | \rho | h_i \rangle = 0$ because $\Delta_g$ is much larger than $|\delta|$ and $\Gamma$. We note that, because the polarization of light is $\sigma^+$, and therefore the Zeeman coherences between the magnetic sublevels in the excited and ground states disappear. + +In Equation (4), the characteristic decay rate of the optical coherence is $\Gamma/2$, which is much larger than the characteristic decay rate of the populations ($\sim s\Gamma$; see Equation (12) below for definition of $s$). Thus, the optical coherences evolve much faster than the populations, which is called the rate equation approximation [21]. Owing to this rate equation approximation, $\langle g_i | \rho | f_i \rangle$ can be expressed in terms of the populations as follows by letting $\langle g_i | \dot{\rho} | f_i \rangle = 0$: + +$$ \langle g_i | \rho | f_i \rangle = \frac{C_i^{\dagger} \Omega}{i\Gamma + 2\delta} (f_i - g_i). \quad (8) $$ + +Then, inserting Equation (8) and its complex conjugate into Equations (5)–(7), we can obtain the following rate equations for the populations: + +$$ \dot{f}_i = -\frac{\Gamma}{2} s R_i^{\dagger} (f_i - g_i) + \sum_{j=i-2}^{i} \Gamma R_j^{\dagger} g_j, \quad (9) $$ + +$$ \dot{g}_i = -\frac{\Gamma}{2} s R_i^{\dagger} (f_i - g_i) - \Gamma g_i, \quad (10) $$ + +$$ h_i = \sum_{j=i-2}^{i} \Gamma T_j^{\dagger} g_j, \quad (11) $$ + +for $i=1,2,\dots$. In Equations (9)–(11), $s$ is the saturation parameter, which is given by + +$$ s = \frac{\Omega^2/2}{\delta^2 + \Gamma^2/4}, \quad (12) $$ + +and $R_i^j = (C_i^j)^2$ and $T_i^j = (D_i^j)^2$. We note that $s$ is a function of both the $\delta$ and $\Gamma$ frequency. Notably, the reference of the frequency detuning differs, depending on the transition line considered. When $i$ and $j$ refer to the states $|F_g, m_g\rangle$ and $|F_e, m_e\rangle$, respectively, the transition strength ($R_i^j$) is given by + +$$ R_{F_g, m_g}^{F_e, m_e} = (2L_e+1)(2J_e+1)(2J_g+1)(2F_e+1)(2F_g+1) \\ \times \left[ \begin{Bmatrix} L_e & J_e & S \\ J_g & L_g & 1 \end{Bmatrix} \begin{Bmatrix} J_e & F_e & I \\ F_g & J_g & 1 \end{Bmatrix} \begin{pmatrix} F_g & 1 & F_e \\ m_g & m_e - m_g & -m_e \end{pmatrix} \right]^2, \quad (13) $$ +---PAGE_BREAK--- + +where *L* and *S* denote the orbital and electron spin angular momenta, respectively, and the curly (round) brackets represent the 6J (3J) symbol. $T_i^j$ are similarly obtained by using different $F_g$ values in Equation (13). + +The explicit form of Equation (9) is given by + +$$ \dot{f}_i = \frac{\Gamma}{2} s R_i^i (g_i - f_i) + \Gamma \left( R_i^{i-2} g_{i-2} + R_i^{i-1} g_{i-1} + R_i^i g_i \right), \quad (14) $$ + +and $f_i$ can be expressed in terms of $\dot{g}_i$ and $g_i$ from Equation (10) at the lowest order in *s* as follows: + +$$ f_i = \frac{2}{\Gamma s R_i^i} (\dot{g}_i + \Gamma g_i). \qquad (15) $$ + +Insertion of Equations (10) and (15) into Equation (14) yields the following DE for $g_i$: + +$$ \begin{aligned} \dot{g}_i + \Gamma \left(1 + \frac{s}{2} R_i^i\right) \dot{g}_i + \frac{s}{2} \Gamma^2 R_i^i \left(1 - R_i^i\right) g_i &= \frac{s}{2} \Gamma^2 R_i^{i-2} R_i^i g_{i-2} + \frac{s}{2} \Gamma^2 R_i^{i-1} R_i^i g_{i-1}. \\ &= \frac{s}{2} \Gamma^2 R_i^{i-2} R_i^i g_{i-2} + \frac{s}{2} \Gamma^2 R_i^{i-1} R_i^i g_{i-1}. \end{aligned} \quad (16) $$ + +when $i=1$, the right-hand side of Equation (16) vanishes. Therefore, Equation (16) becomes a homogeneous DE. In contrast, when $i \neq 1$, Equation (16) becomes an inhomogeneous DE because the right-hand side terms are functions of $g_i$. + +We solve Equation (16) from $i=1$ consecutively. As is well-known, the solution of Equation (16) consists of two parts: a homogeneous solution and a particular solution. We first find the solutions of the homogeneous equation by inserting the equation $g_i \sim e^{\lambda_1 \Gamma t}$ into Equation (16). Then, we have two values ($\lambda_{2i-1}, \lambda_{2i}$) for $\lambda$ as follows: + +$$ \lambda_{2i-1(2i)} = \frac{1}{4} \left( -2 - sR_i^i - (+)\sqrt{4 - 4sR_i^i + s(8+s)(R_i^i)^2} \right), $$ + +which can be approximated as follows in the weak intensity limit: + +$$ \lambda_{2i-1} \approx -1 - \frac{s}{2} (R_i^i)^2, \quad \lambda_{2i} \approx -\frac{s}{2} R_i^i (1 - R_i^i). $$ + +We consider the case of $i=1$ in Equation (16). Then, the solution is given by: + +$$ g_1 = C_{1,1}e^{\lambda_1 \Gamma t} + C_{1,2}e^{\lambda_2 \Gamma t}, $$ + +where the coefficients $C_{1,1}$ and $C_{1,2}$ should be determined using the initial conditions. In the case of $i=2$, the right-hand side in Equation (16) contains the terms of $e^{\lambda_1 \Gamma t}$ and $e^{\lambda_2 \Gamma t}$. Therefore, $g_2$ has four exponential terms: + +$$ g_2 = C_{2,1}e^{\lambda_1 \Gamma t} + C_{2,2}e^{\lambda_2 \Gamma t} + C_{2,3}e^{\lambda_3 \Gamma t} + C_{2,4}e^{\lambda_4 \Gamma t}, $$ + +where the coefficients should also be determined. Therefore, we can express $g_j$ generally as follows: + +$$ g_j = \sum_{k=1}^{2j} C_{j,k} e^{\lambda_k \Gamma t}. \quad (17) $$ +---PAGE_BREAK--- + +We find $C_{j,k}$ with $k = 1, 2, \dots, 2j$ by means of recursion relations; i.e., $C_{j,k}$ are expressed in terms of $C_{i,l}$ with $i < j$ and $l = 1, 2, \dots, 2i$. Inserting Equation (17) into Equation (16), we obtain + +$$ +\begin{aligned} +g_i = C_{i,2i-1} & e^{\lambda_{2i}-1\Gamma t} + C_{i,2i} e^{\lambda_{2i}\Gamma t} \\ +& + \sum_{k=1}^{2(i-1)} \frac{(s/2) R_i^{i-1} R_{i-k}^{i} C_{i-1,k}}{\lambda_k^2 + \lambda_k + \frac{s}{2} R_i^i (1+\lambda_k - R_i^t)} e^{\lambda_k \Gamma t} \\ +& + \sum_{k=1}^{2(i-2)} \frac{(s/2) R_i^{i-2} R_{i-k}^{i} C_{i-2,k}}{\lambda_k^2 + \lambda_k + \frac{s}{2} R_i^i (1+\lambda_k - R_i^t)} e^{\lambda_k \Gamma t}. +\end{aligned} +\quad (18) +$$ + +Comparing Equations (17) and (18) gives + +$$ +\begin{align} +C_{i,k} &= \frac{(s/2)R_i^i (R_i^{i-1}C_{i-1,k} + R_i^{i-2}C_{i-2,k})}{\lambda_k^2 + \lambda_k + \frac{s}{2}R_i^i(1+\lambda_k - R_i^t)}, \tag{19} \\ +\text{for } k &= 1, 2, \dots, 2(i-2), \nonumber +\end{align} +$$ + +$$ +\begin{align} +C_{i,k} &= \frac{(s/2) R_i^{i-1} R_i^i C_{i-1,k}}{\lambda_k^2 + \lambda_k + \frac{s}{2} R_i^i (1 + \lambda_k - R_i^t)}, \tag{20} \\ +\text{for } k &= 2i-3 \text{ and } 2(i-1). \notag +\end{align} +$$ + +The remaining two coefficients, $C_{i,2i-1}$ and $C_{i,2i}$, can be derived from Equation (18) using two initial conditions for $g_i(0)$ and $\dot{g}_i(0)$: + +$$ g_i(0) = 0, \quad \dot{g}_i(0) = \frac{s}{2} p_0 R_i^i, $$ + +where $p_0$ is the population of each sublevel in the ground state at equilibrium, which is equal to $1/[2(2I+1)]$. Then, the results are given by + +$$ C_{i,2i-1} = \frac{1}{2Q_i} [2(A_i + 2A'_i + B_i + 2B'_i) + (A_i + B_i - 2p_0) sR_i^i] - \frac{A_i+B_i}{2}, \quad (21) $$ + +$$ C_{i,2i} = -\frac{1}{2Q_i} [2(A_i + 2A'_i + B_i + 2B'_i) + (A_i + B_i - 2p_0) sR_i^i] - \frac{A_i+B_i}{2}, \quad (22) $$ + +where + +$$ Q_i = \sqrt{4 + s R_i^i (-4 + (8+s) R_i^i)}, $$ + +$$ A_i = \sum_{k=1}^{2(i-1)} \frac{(s/2) R_i^{i-1} R_{i-k}^{i} C_{i-1,k}}{\lambda_k^2 + \lambda_k + \frac{s}{2} R_i^i (1+\lambda_k - R_i^t)}, \quad \text{for } i \ge 2 $$ + +$$ B_i = \sum_{k=1}^{2(i-2)} \frac{(s/2) R_i^{i-2} R_{i-k}^{i} C_{i-2,k}}{\lambda_k^2 + \lambda_k + \frac{s}{2} R_i^i (1+\lambda_k - R_i^t)}, \quad \text{for } i \ge 3, $$ + +$$ A'_i = \sum_{k=1}^{2(i-1)} \frac{(s/2) R_i^{i-1} R_k^i \lambda_k C_{i-1,k}}{\lambda_k^2 + \lambda_k + \frac{s}{2} R_i^i (1+\lambda_k - R_i^t)}, \quad \text{for } i \ge 2 $$ + +$$ B'_i = \sum_{k=1}^{2(i-2)} \frac{(s/2) R_i^{i-2} R_k^i \lambda_k C_{i-2,k}}{\lambda_k^2 + \lambda_k + \frac{s}{2} R_i^i (1+\lambda_k - R_i^t)}, \quad \text{for } i \ge 3, $$ + +and + +$A_1 = 0$, $A'_1 = 0$, $B_1 = B_2 = 0$, and $B'_1 = B'_2 = 0$. +---PAGE_BREAK--- + +The coefficients in $g_i$ from $g_1$ can be obtained by successively using the recursion relations in Equations (19)–(22). Once $g_i$ are obtained, $f_i$ can be obtained using Equation (15). Up to the lowest order in s, the result is given by + +$$f_i = \sum_{k=1}^{i} \frac{2C_{i,2k}}{sR_i^k} e^{\lambda_{2k}\Gamma t}. \quad (23)$$ + +Since $\lambda_k \sim -1$ for odd $k$, $g_i$ can be expressed as follows: + +$$g_i = \sum_{k=1}^{i} \left(C_{i,2k-1}e^{-\Gamma t} + C_{i,2k}e^{\lambda_{2k}\Gamma t}\right). \quad (24)$$ + +Taking the derivative of Equation (24) with respect to time and letting $t=0$, we have + +$$\dot{g}_i(0) = -\sum_{k=1}^{i} C_{i,2k-1},$$ + +up to the first order in $s$, since $\lambda_{2k}$ ($k=1, 2, \dots, i$) are already in the first order in $s$. Because one of the initial conditions is $\dot{g}_i(0) = sp_0 R_i^i/2$, and $g_i(0) = \sum_{k=1}^i (C_{i,2k-1} + C_{i,2k}) = 0$ from the other initial condition, we obtain the following equations: + +$$\sum_{k=1}^{i} C_{i,2k-1} = - \sum_{k=1}^{i} C_{i,2k} = -\frac{s}{2} p_0 R_i^i. \quad (25)$$ + +Using the relations in Equations (23) and (25), we find the simplified form of $g_i$ as follows: + +$$g_i = \frac{R_i^i s}{2} (f_i - p_0 e^{-\Gamma t}). \quad (26)$$ + +We obtain the populations of the sublevels in the ground state, which are not excited by laser light. The one or two magnetic sublevels with higher magnetic quantum numbers correspond to this case. We can easily obtain analytical populations by integrating the populations spontaneously transferred from the excited state, and the result is given by + +$$f_i = p_0 + \sum_{k=1}^{i-2} R_i^{i-2} C_{i-2,2k} \frac{e^{\lambda_{2k}\Gamma t} - 1}{\lambda_{2k}} + \sum_{k=1}^{i-1} R_i^{i-1} C_{i-1,2k} \frac{e^{\lambda_{2k}\Gamma t} - 1}{\lambda_{2k}}. \quad (27)$$ + +In several cases of atomic transition systems, $\lambda_k$ can duplicate, and the method of solving particular solutions given in Equation (18) no longer holds. We may solve for the particular solutions using the method presented in our previous paper [15]. However, it is also possible to solve by intentionally modifying $\lambda_k$ to satisfy the conditions that all $\lambda_k$ are unique. One possible method is setting $R_i' \to R_i'^{-} + \delta_{i,j}j\epsilon$, where $\epsilon$ is a constant that is taken as zero at the final stage of the calculation. Although this method is not novel, it is very efficient. + +The populations ($h_i$) of the sublevels in the ground state, which are not excited by laser light, can be easily obtained analytically by integrating the populations spontaneously transferred from the excited state (Equation (11)), and the result is given by + +$$h_i = p_0 + \sum_{l=-2}^{0} \sum_{k=1}^{i+1} T_l^{i+1} C_{i+1,2k} \frac{e^{\lambda_{2k}\Gamma t} - 1}{\lambda_{2k}}. \quad (28)$$ +---PAGE_BREAK--- + +### 3. Calculated Results + +Based on the method developed in Section 2, here we present the calculated results of the populations for the two transition schemes: (i) $F_g = 4 \rightarrow F_e = 5$ and (ii) $F_g = 3 \rightarrow F_e = 3$ for the D2 line of Cs. The energy level diagram for the Cs-D2 line is shown in Figure 2a, and the energy level diagrams for these two transitions are shown in Figure 2b,c. Owing to the large hyperfine splitting in the excited states, it is justifiable to neglect the off-resonant transitions; i.e., the $F_g = 4 \rightarrow F_e = 4$ and $F_g = 4 \rightarrow F_e = 3$ transitions can be neglected when the laser light is tuned to the $F_g = 4 \rightarrow F_e = 5$ transition line. Although it is in principle possible to include the off-resonant transitions in the analytical calculation of the populations [13], the complicated analytical solutions may not be practically useful. + +Figure 2. (a) Energy level diagram of the Cs-D2 line. (b) Energy level diagrams for the $F_g = 4 \rightarrow F_e = 5$ cycling transition line and (c) for the $F_g = 3 \rightarrow F_e = 3$ transition line illuminated by $\sigma^+$ polarized laser light. + +#### 3.1. Results for the $F_g = 4 \rightarrow F_e = 5$ Transition + +The $F_g = 4 \rightarrow F_e = 5$ transition shown in Figure 2b is cycling, and is used in many experiments, such as laser cooling and trapping [22]. Because $\sigma^+$ polarized laser light is illuminated, the sublevels with $m_e = -5$ and $-4$ are not optically excited. The normalized transition strengths, for the transitions presented in Figure 2b, are given by + +$$ (R_1^1, R_2^2, R_3^3, R_4^4, R_5^5, R_6^6, R_7^7, R_8^8, R_9^9) \\ = (\frac{1}{45}, \frac{1}{15}, \frac{2}{15}, \frac{2}{9}, \frac{1}{3}, \frac{7}{15}, \frac{28}{45}, \frac{4}{5}, 1). $$ + +For the transition for $i=1$, we obtain $\lambda_1 \approx -1$ and $\lambda_2 \approx -\frac{22}{2025}s$, and + +$$ C_{1,1} = -\frac{s}{1440}, \quad C_{1,2} = \frac{s}{1440}. $$ + +Thus, using Equation (23), we obtain + +$$ f_1 = \frac{1}{16} e^{-\frac{22s\Gamma t}{2025}}. $$ +---PAGE_BREAK--- + +The $\lambda_4$ for the transition for $i = 2$ is approximately given by $-\frac{7}{225}s$, and the coefficients are given by + +$$C_{2,1} = \frac{s}{240}, \quad C_{2,2} = \frac{s}{2460},$$ + +$$C_{2,3} = -\frac{s}{160}, \quad C_{2,4} = \frac{11}{6560}s.$$ + +Therefore, we have + +$$f_2 = \frac{1}{82}e^{-22s\Gamma t/2025} + \frac{33}{656}e^{-7s\Gamma t/225}.$$ + +The remaining $\lambda_{2k}$ ($k=2, \dots, 9$) values are given by + +$$ +\begin{aligned} +& (\lambda_6, \lambda_8, \lambda_{10}, \lambda_{12}, \lambda_{14}, \lambda_{16}, \lambda_{18}) \\ +& = \left( -\frac{13}{225}s, -\frac{7}{81}s, -\frac{s}{9}, -\frac{28}{225}s, -\frac{238}{2025}s, -\frac{2}{25}s, 0 \right), +\end{aligned} +$$ + +and the remaining populations are explicitly given by + +$$f_3 = \frac{413}{31160} e^{-22\tau/2025} + \frac{77}{2624} e^{-7\tau/225} + \frac{121}{6080} e^{-13\tau/225},$$ + +$$f_4 = \frac{2317}{264860} e^{-22\tau/2025} + \frac{693}{20992} e^{-7\tau/225} + \frac{1089}{44080} e^{-13\tau/225} - \frac{1001}{252416} e^{-7\tau/81},$$ + +$$f_5 = \frac{25577}{3072376} e^{-22\tau/2025} + \frac{4235}{125952} e^{-7\tau/225} + \frac{5203}{141056} e^{-13\tau/225} - \frac{5005}{504832} e^{-7\tau/81} - \frac{143}{22272} e^{-\tau/9},$$ + +$$f_6 = \frac{148693}{17666162} e^{-22\tau/2025} + \frac{1925}{47232} e^{-7\tau/225} + \frac{2057}{35264} e^{-13\tau/225} - \frac{1625}{63104} e^{-7\tau/81} - \frac{715}{16704} e^{-\tau/9} + \frac{13}{552} e^{-28\tau/225},$$ + +$$f_7 = \frac{921751}{8926068} e^{-22\tau/2025} + \frac{2519}{41984} e^{-7\tau/225} + \frac{891}{7424} e^{-13\tau/225} - \frac{49075}{504832} e^{-7\tau/81} - \frac{5555}{7424} e^{-\tau/9} - \frac{273}{736} e^{-28\tau/225} + \frac{209}{192} e^{-238\tau/2025},$$ + +$$f_8 = \frac{39041249}{2119939440} e^{-22\tau/2025} + \frac{1561}{10496} e^{-7\tau/225} + \frac{225071}{352640} e^{-13\tau/225} + \frac{219275}{126208} e^{-7\tau/81} + \frac{9955}{3712} e^{-\tau/9} + \frac{3367}{3680} e^{-28\tau/225} - \frac{77}{24} e^{-238\tau/2025} - \frac{459}{160} e^{-2\tau/25},$$ + +$$f_9 = \frac{9}{16} - \frac{1205666281}{8479757760} e^{-22\tau/2025} - \frac{74771}{188928} e^{-7\tau/225} - \frac{316701}{352640} e^{-13\tau/225} - \frac{404009}{252416} e^{-7\tau/81} - \frac{62953}{33408} e^{-\tau/9} - \frac{3133}{5520} e^{-28\tau/225} + \frac{407}{192} e^{-238\tau/2025} + \frac{459}{160} e^{-2\tau/25},$$ + +where we use a simplified notation: $\tau \equiv s\Gamma t$. Since the $F_g = 4 \rightarrow F_e = 5$ transition is cycling, the populations in the magnetic sublevels in the $F_g = 3$ ground state remain at their equilibrium value, 1/16. It should be also noted that the sum of the ground state populations is conserved, i.e., + +$$\sum_{i=1}^{9} f_i = \frac{9}{16}.$$ +---PAGE_BREAK--- + +From Equation (26), the populations of the sublevels in the excited state can be expressed in terms +of the populations in the ground state as follows: + +$$g_i = \frac{R_i^{\prime s}}{2} \left( f_i - \frac{1}{16} e^{-\Gamma t} \right).$$ + +The constants in $f_9$ and $g_9$ can be accurately calculated using Equation (10). In the steady-state regime, all the populations except $f_9$ and $g_9$ vanish, and these satisfy the following equations: + +$$\frac{\Gamma}{2}s[f_9(\infty) - g_9(\infty)] - \Gamma g_9(\infty) = 0, \quad f_9(\infty) + g_9(\infty) = \frac{9}{16},$$ + +with $R_9^9 = 1$. Then, we have + +$$f_9(\infty) = \frac{9(2+s)}{32(1+s)}, \quad g_9(\infty) = \frac{9s}{32(1+s)}.$$ + +which can be used in a more accurate calculation of the SAS spectrum. + +## 3.2. Results for the $F_g = 3 \rightarrow F_e = 3$ Transition + +Now we present the calculated results of the populations for the $F_g = 3 \rightarrow F_e = 3$ transition of the D2 line of Cs. The energy level diagram for the transition is shown in Figure 2c. The sublevel of the excited state with $m_e = -3$ is not optically excited, and thus the sublevel of the upper-ground state with $m_g = -4$ is not filled by spontaneous emission. We also obtain the solutions for the populations in the other ground state ($F_g = 4$). To prevent the duplication of the transition strengths in this transition, we introduce $\epsilon$ so that the transition strengths are given explicitly by + +$$\begin{aligned} & (R_1^1, R_2^2, R_3^3, R_4^4, R_5^5, R_6^6) \\ &= \left( \frac{3}{16} + \epsilon, \frac{5}{16} + 2\epsilon, \frac{3}{8} + 3\epsilon, \frac{3}{8} + 4\epsilon, \frac{5}{16} + 5\epsilon, \frac{3}{16} + 6\epsilon \right). \end{aligned}$$ + +We take $\epsilon \to 0$ at the final stage of the calculation. The $\lambda_{2k}$ ($k = 1, \dots, 6$) values at $\epsilon \to 0$ are given by + +$$\begin{aligned} & (\lambda_2, \lambda_4, \lambda_6, \lambda_8, \lambda_{10}, \lambda_{12}) \\ &= \left( -\frac{39}{512}s, -\frac{55}{512}s, -\frac{15}{128}s, -\frac{15}{128}s, -\frac{55}{512}s, -\frac{39}{512}s \right). \end{aligned}$$ + +We first find various $C_{ik}$ values using the recursion relations in Equations (19)–(22). For the transition for $i=1$, we obtain + +$$C_{1,1} = -\frac{3}{512}s, \quad C_{1,2} = \frac{3}{512}s;$$ + +thus, using Equation (23), we obtain + +$$f_1 = \frac{1}{16}e^{-39s\Gamma t/512}.$$ + +Using a similar method, we can obtain $f_2$ and $f_3$ as follows: + +$$f_2 = \frac{3}{64}e^{-39\tau/512} + \frac{1}{64}e^{-55\tau/512},$$ + +$$f_3 = \frac{25}{448}e^{-39\tau/512} + \frac{1}{64}e^{-55\tau/512} - \frac{1}{112}e^{-15\tau/128},$$ +---PAGE_BREAK--- + +where the simplified notation, $\tau \equiv s\Gamma t$, is used. In the calculation of $f_4$, because $\lambda_6$ and $\lambda_8$ are equal, $f_4$ may contain the term $\sim \tau e^{-15\tau/128}$. However, because the transition between $g_3$ and $f_4$ is prohibited, the particular solution for $f_4$ does not contain the term $\sim \tau e^{-15\tau/128}$. In contrast, $f_5$, $f_6$, and $f_7$ contain the terms proportional to $\tau$. The results for $f_4$, $f_5$, and $f_6$ are explicitly given by + +$$f_4 = \frac{15}{224}e^{-39\tau/512} + \frac{3}{32}e^{-55\tau/512} - \frac{11}{112}e^{-15\tau/128},$$ + +$$f_5 = \frac{135}{896}e^{-39\tau/512} + \left(-\frac{173}{640} + \frac{9\tau}{4096}\right)e^{-55\tau/512} + \frac{51}{280}e^{-15\tau/128},$$ + +$$f_6 = \left( \frac{269}{12544} + \frac{1125\tau}{114688} \right) e^{-39\tau/512} \\ + \left( \frac{19}{256} - \frac{45\tau}{16384} \right) e^{-55\tau/512} - \frac{13}{392}e^{-15\tau/128}.$$ + +Since $f_7$ is not excited by laser light, using Equation (27) yields, + +$$f_7 = \frac{68971}{327184} - \left( \frac{343323}{2119936} + \frac{10125\tau}{1490944} \right) e^{-39\tau/512} \\ + \left( \frac{1371}{30976} + \frac{135\tau}{180224} \right) e^{-55\tau/512} - \frac{3}{98}e^{-15\tau/128}.$$ + +The populations of the sublevels in the excited state, using Equation (26), can be expressed as follows: + +$$g_i = \frac{R_i^i s}{2} \left( f_i - \frac{1}{16} e^{-\Gamma i} \right).$$ + +The populations of the sublevels in the ground state $F_g = 4$ can be obtained using Equation (28), and are presented in the appendix. + +**4. Conclusions** + +We have presented a general method of solving homogeneous or inhomogeneous second-order DEs corresponding to the optical pumping phenomenon with $\sigma^+$ polarized laser light. Unlike the harmonic oscillator in mechanics or electrical circuits, this system only exhibits over-damped behavior. Although the method of solving inhomogeneous DEs with constant coefficients is straightforward in principle, obtaining accurate analytical solutions for the equations related to optically pumped atoms, in particular, those with complicated atomic structures, such as Cs, is cumbersome. Our method of solving the DEs provides an easy way to obtain analytical solutions at the weak intensity limit. This method is general and applicable to most atoms. As stated in Section 1, the obtained analytical form of the populations can be used in the calculation of spectroscopic lineshapes such as in saturated absorption spectroscopy (SAS) [16,17] and polarization spectroscopy (PS) [18]. Calculations of SAS and PS for Cs atoms are in progress. + +**Acknowledgments:** This research was supported by Basic Science Research Program through the National Research Foundation of Korea (NRF) funded by the Ministry of Science, ICT and future Planning (2014R1A2A2A01006654). + +**Conflicts of Interest:** The authors declare no conflict of interest. + +**Appendix** + +When the laser frequency is tuned to the $F_g = 3 \rightarrow F_e = 3$ transition (Figure 2c), the populations of the sublevels in the ground state $F_g = 4$ are given by +---PAGE_BREAK--- + +$$h_1 = \frac{23}{312} - \frac{7}{624}e^{-39\tau/512},$$ + +$$h_2 = \frac{93}{1144} - \frac{41}{2496}e^{-39\tau/512} - \frac{5}{2112}e^{-55\tau/512},$$ + +$$h_3 = \frac{895}{10296} - \frac{1109}{52416}e^{-39\tau/512} - \frac{3}{704}e^{-55\tau/512} + \frac{1}{1008}e^{-15\tau/128},$$ + +$$h_4 = \frac{235}{2574} - \frac{685}{26208}e^{-39\tau/512} - \frac{19}{1760}e^{-55\tau/512} + \frac{41}{5040}e^{-15\tau/128},$$ + +$$h_5 = \frac{10727}{113256} - \frac{3475}{104832}e^{-39\tau/512} \\ +- \left( \frac{2641}{232320} + \frac{3\tau}{45056} \right) e^{-55\tau/512} + \frac{31}{2520}e^{-15\tau/128},$$ + +$$h_6 = \frac{143477}{1472328} - \left( \frac{843497}{19079424} + \frac{125\tau}{1490944} \right) e^{-39\tau/512} \\ ++ \left( \frac{401}{30976} - \frac{45\tau}{180224} \right) e^{-55\tau/512} - \frac{13}{3528}e^{-15\tau/128},$$ + +$$h_7 = \frac{293731}{2944656} - \left( \frac{147347}{2725632} + \frac{125\tau}{212992} \right) e^{-39\tau/512} \\ ++ \left( \frac{7889}{154880} - \frac{63\tau}{180224} \right) e^{-55\tau/512} - \frac{43}{1260}e^{-15\tau/128},$$ + +$$h_8 = \frac{299023}{2944656} - \left( \frac{24497}{681408} + \frac{125\tau}{53248} \right) e^{-39\tau/512} \\ ++ \left( -\frac{959}{116160} + \frac{21\tau}{45056} \right) e^{-55\tau/512} + \frac{13}{2520}e^{-15\tau/128}.$$ + +Finally, we note that the sum of the populations is conserved, i.e., + +$$\frac{1}{16} + \sum_{i=1}^{7} f_i + \sum_{i=1}^{8} h_i = 1,$$ + +where $1/16$ is the population at the sublevel $m_g = -4$ in the upper ground state. + +## References + +1. Happer, W. Optical pumping. *Rev. Mod. Phys.* **1972**, *44*, 169–249. + +2. McClelland, J.J. Optical State Preparation of Atoms. In *Atomic, Molecular, and Optical Physics: Atoms and Molecules*; Dunning, F.B., Hulet, R.G., Eds.; Academic Press: San Diego, CA, USA, 1995; pp. 145–170. + +3. Smith, D.A.; Hughes, I.G. The role of hyperfine pumping in multilevel systems exhibiting saturated absorption. *Am. J. Phys.* **2004**, *72*, 631–637. + +4. Magnus, F.; Boatwright, A.L.; Flodin, A.; Shiell, R.C. Optical pumping and electromagnetically induced transparency in a lithium vapour. *J. Opt. B: Quantum Semiclass. Opt.* **2005**, *7*, 109–118. + +5. Han, H.S.; Jeong, J.E.; Cho, D. Line shape of a transition between two levels in a three-level Λ configuration. *Phys. Rev. A* **2011**, *84*, doi:10.1103/PhysRevA.84.032502. + +6. Sydoryk, I.; Bezuglov, N.N.; Beterov, I.I.; Miculis, K.; Saks, E.; Janovs, A.; Spels, P.; Ekers, A. Broadening and intensity redistribution in the Na(3p) hyperfine excitation spectra due to optical pumping in the weak excitation limit. *Phys. Rev. A* **2008**, *77*, doi:10.1103/PhysRevA.77.042511. + +7. Porfido, N.; Bezuglov, N.N.; Bruvelis, M.; Shayeganrad, G.; Birindelli, S.; Tantussi, F.; Guerri, I.; Viteau, M.; Fioretti, A.; Ciampini, D.; et al. Nonlinear effects in optical pumping of a cold and slow atomic beam. *Phys. Rev. A* **2015**, *92*, doi:10.1103/PhysRevA.92.043408. + +8. McClelland, J.J.; Kelley, M.H. Detailed look at aspects of optical pumping in sodium. *Phys. Rev. A* **1985**, *31*, 3704–3710. +---PAGE_BREAK--- + +9. Farrell, P.M.; MacGillivary, W.R.; Standage, M.C. Quantum-electrodynamic calculation of hyperfine-state populations in atomic sodium. *Phys. Rev. A* **1988**, *37*, 4240–4251. + +10. Balykin, V.I. Cyclic interaction of Na atoms with circularly polarized laser radiation. *Opt. Commun.* **1980**, *33*, 31–36. + +11. Liu, S.; Zhang, Y.; Fan, D.; Wu, H.; Yuan, P. Selective optical pumping process in Doppler-broadened atoms. *Appl. Opt.* **2011**, *50*, 1620–1624. + +12. Moon, G.; Shin, S.R.; Noh, H.R. Analytic solutions for the populations of an optically-pumped multilevel atom. *J. Korean Phys. Soc.* **2008**, *53*, 552–557. + +13. Moon, G.; Heo, M.S.; Shin, S.R.; Noh, H.R.; Jhe, W. Calculation of analytic populations for a multilevel atom at low laser intensity. *Phys. Rev. A* **2008**, *78*, doi:10.1103/PhysRevA.78.015404. + +14. Won, J.Y.; Jeong, T.; Noh, H.R. Analytical solutions of the time-evolution of the populations for $D_1$ transition line of the optically-pumped alkali-metal atoms with $I = 3/2$. *Optik* **2013**, *124*, 451–455. + +15. Noh, H.R. Analytical Study of Optical Pumping for the $D_1$ Line of $^{85}$Rb Atoms. *J. Korean Phys. Soc.* **2104**, *64*, 1630–1635. + +16. Moon, G.; Noh, H.R. Analytic solutions for the saturated absorption spectra. *J. Opt. Soc. Am. B* **2008**, *25*, 701–711. + +17. Moon, G.; Noh, H.R. Analytic Solutions for the Saturated Absorption Spectrum of the $^{85}$Rb Atom with a Linearly Polarized Pump Beam. *J. Korean Phys. Soc.* **2009**, *54*, 13–22. + +18. Do, H.D.; Heo, M.S.; Moon, G.; Noh, H.R.; Jhe, W. Analytic calculation of the lineshapes in polarization spectroscopy of rubidium. *Opt. Commun.* **2008**, *281*, 4042–4047. + +19. Thornton, T.; Marion, J.B. *Classical Dynamics of Particles and Systems*, 5th ed.; Brooks/Cole: New York, NY, USA, 2004. + +20. Cohen-Tannoudji, C.; Dupont-Roc, J.; Grynberg, G. *Atom-Photon Interactions Basic Processes and Applications*; Wiley: New York, NY, USA, 1992. + +21. Meystre, P.; Sargent, M., III. *Elements of Quantum Optics*; Springer: New York, NY, USA, 2007. + +22. Metcalf, H.J.; van der Straten, P. *Laser Cooling and Trapping*; Springer: New York, NY, USA, 1999. + +© 2016 by the author. Licensee MDPI, Basel, Switzerland. This article is an open access article distributed under the terms and conditions of the Creative Commons Attribution (CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +Local Dynamics in an Infinite Harmonic Chain + +M. Howard Lee + +Department of Physics and Astronomy, University of Georgia, Athens, GA 30602, USA; mhlee@uga.edu + +Academic Editor: Young Suh Kim + +Received: 26 February 2016; Accepted: 6 April 2016; Published: 15 April 2016 + +**Abstract:** By the method of recurrence relations, the time evolution in a local variable in a harmonic chain is obtained. In particular, the autocorrelation function is obtained analytically. Using this result, a number of important dynamical quantities are obtained, including the memory function of the generalized Langevin equation. Also studied are the ergodicity and chaos in a local dynamical variable. + +**Keywords:** recurrence relations; harmonic chain; local dynamics; ergodicity; chaos + +# 1. Introduction + +A harmonic chain has been a useful model for a variety of dynamical phenomena, such as the lattice vibrations in solids, Brownian motion and diffusion. It has also been a useful model for testing theoretical concepts, such as the thermodynamic limit, irreversibility and ergodicity. One can study these properties in a harmonic chain. In this work, we shall touch on most of these issues analytically. + +The dynamics in a chain of nearest-neighbor (nn) coupled monatomic oscillators (defined in Section 3) has been studied in the past almost exclusively by means of normal modes [1]. If there are *N* oscillators in a chain, the single-particle or individual coordinates of the oscillators $q_i$, $i = 1, 2, .., N$, are replaced by the total or collective coordinates $Q_j$, $j = 1, 2, .., N$. In the space of the collective coordinates, the “collective” oscillators are no longer coupled. As a result, their motions are simply periodic. Each collective oscillator would have a unique frequency associated with it (if degeneracy due to symmetry could be ignored). + +On the one hand, this collective picture is very helpful in understanding the dynamics of a harmonic chain by avoiding what might be a complicated picture due to a set of motions of coupled single particles. If only the collective behavior is required, this approach is certainly sufficient. + +On the other hand, if one wishes to know the dynamics of a single oscillator in a chain, the traditional approach becomes cumbersome. Why would one wish to know the dynamics of one oscillator in a chain? There may be a defect in a chain, for example. It may be a heavier or lighter mass than its neighbors'. Diffusivity is attributed to the motions of single oscillators. For these and other physical reasons that will become apparent, there is a need to study how a single oscillator embedded in a chain evolves in time. We shall term it local dynamics to be distinguished from total dynamics. + +In the 1980s, a new method of calculating the time evolution in a Hermitian system was developed, known as the method of recurrence relations [2]. It solves the Heisenberg equation of motion for a dynamical variable of physical interest, which may be the momentum of a single particle, the number or current density. Although it was intended to deal with dynamical variables of quantum origin, i.e., operators, it was found to be applicable to classical variables by replacing commutators with Poisson brackets. During the past three decades, this method has been widely applied to a variety of dynamical issues emanating from the electron gas, lattice spins, lattice vibrations and classical fluids. For reviews, see [3–7]. For a partial list of recent papers, see [8–21]. +---PAGE_BREAK--- + +Formally, this method shows what types of solutions are admissible [22]. It provides a deeper insight into the memory function and the Langevin equation. It has also provided a basis from which to develop the ergometric theory of the ergodic hypothesis. + +In Section 2, we will briefly introduce the method of recurrence relations, mostly by assertion, referring the proofs to the original sources and review articles. In Section 3, the dynamics of a local variable (a single particle) in an infinite harmonic chain will be solved by the method of recurrence relations. Some useful physical applications will follow to complete this work. + +## 2. Method of Recurrence Relations + +Let $A$ be a dynamical variable, e.g., a spin operator, and $H(A)$ an N-body Hamiltonian. The number of particles $N$ is not restricted initially. The Hamiltonian $H$ must however be Hermitian, which means that there is to be no dissipation in the dynamics of $A$. The time evolution of $A$ is to be given by the Heisenberg equation of motion: + +$$ \dot{A}(t) = i[H, A(t)] \qquad (1) $$ + +with $\hbar = 1$ and $[H, A] = HA - AH$. If $A$ is a classical variable, the rhs of Equation (1) is to be replaced by the Poisson brackets. + +A formal solution for Equation (1) may be viewed in geometrical terms. Let $A(t)$ be a vector in an inner product space $S$ of $d$ dimensions. This space is spanned by $d$ basis vectors $f_k$, $k = 0, 1, .., d-1, d \ge 2$. These basis vectors are mutually orthogonal: + +$$ (f_k, f_{k'}) = 0 \text{ if } k \neq k' \qquad (2) $$ + +where $(\cdot, \cdot)$ denotes an inner product, which defines the space $S$. Observe that they are time independent. In terms of these, $A(t)$ may be expressed as: + +$$ A(t) = \sum_k a_k(t) f_k \qquad (3) $$ + +where $a_k$, $k = 0, 1, .., d-1$, is a set of functions or basis functions conjugate to the basis vectors. They carry time dependence. + +As $t$ evolves, this vector $A(t)$ evolves in this space $S$. Its motion in $S$ is governed by Equation (1), so that it is $H$ specific. Since $||A(t)|| = ||A||$, that is $(A(t), A(t)) = (A, A)$, the "length" of $A(t)$ in $S$ is an invariant of time. As $t$ evolves, $A(t)$ may only rotate in $S$. This means that there is a Bessel equality, which limits what kind of rotation is allowed. + +Since both the basis vectors and functions are only formally stated, Equation (3) is not yet useful. One does not know what is $d$, the dimensionality of $S$. To make it useful, we need to realize $S$, an abstract space by defining the inner product in a physically-useful way. + +### 2.1. Kubo Scalar Product + +We shall realize $S$ by the Kubo scalar product (KSP) as follows: let $X$ and $Y$ be two vectors in $S$. The inner product of $X$ and $Y$ is defined as: + +$$ (X,Y) = 1/\beta \int_0^\beta d\lambda < X(\lambda)Y^* > - < X > < Y^* > \qquad (4) $$ + +where $\beta = 1/k_B T$, $T$ temperature, $< .. >$ means an ensemble average, * means Hermitian conjugation and: + +$$ X(\lambda) = e^{\lambda H} X e^{-\lambda H} \qquad (5) $$ + +Equation (4) is known as KSP in many body theory [23]. There is a deep physical reason for using KSP to realize $S$ [24]. When realized by KSP, it shall be denoted $\tilde{S}$. +---PAGE_BREAK--- + +## 2.2. Basis Vectors + +We have proved that the basis vectors in $\bar{S}$ satisfy the following recurrence relation, known as RR I: + +$$f_{k+1} = \dot{f}_k + \Delta_k f_{k-1}, \quad k = 0, 1, 2, \dots, d-1 \qquad (6)$$ + +where $\dot{f}_k = i[H, f_k]$, $\Delta_k = ||f_k||/||f_{k-1}||$, with $f_{-1} = 0$ and $\Delta_0 = 1$. + +If $k=0$ in Equation (6), $f_1 = \dot{f}_0$. With $f_0 = A$ (by choice), $f_1$ is obtained and, therewith, $\Delta_1$. + +Given $\Delta_1$, by setting $k=1$ in Equation (6), one can calculate $f_2$, therewith $\Delta_2$. If proceeding in this manner, $f_d = 0$ for some finite value of $d$ giving a finite dimensional $\bar{S}$ or $f_d \neq 0$ as $d \to \infty$ giving an infinite dimensional $\bar{S}$. By RR I, we can determine $d$ and, thus, generate all of the basis vectors needed to span $A(t)$ in $\bar{S}$ for a particular $H$. In addition, we can construct the hypersurface $\sigma$: + +$$\sigma = (\Delta_1, \Delta_2, \dots, \Delta_{d-1}) \qquad (7)$$ + +As we shall see, the dynamics is governed by $\sigma$. The $\Delta$'s known as the recurrants are successive ratios of the norms of $f_k$. They are static quantities, so that they are in principle calculable as a function of parameters, such as temperature, wave vectors, etc., for a given $H$. They collectively define the shape of $\bar{S}$, constraining what kind of trajectory is possible for $A(t)$. + +## 2.3. Basis Functions + +If RR I is applied to Equation (1), it yields a recurrence relation for the basis functions: with $a_{-1} = 0$, + +$$\Delta_{k+1} a_{k+1} = -\dot{a}_k + a_{k-1}, \quad k = 0, 1, \dots, d-1 \qquad (8)$$ + +where $\dot{a}_k = d/dt a_k$. Equation (8) is known as RR II. It is actually composed of two recurrence relations, one for $k=0$ (because of $a_{-1}=0$) and another for the rest $k=1, 2, \dots, d-1$. + +There is an important boundary condition on $a_k$. By Equation (3), $A(t=0) = A = f_0$. Thus, $a_0(t=0) = 1$ and $a_k(t=0) = 0$, $k \neq 0$. These basis functions are autocorrelation functions. For example, $a_0 = (A(t), A)/(A, A)$, $a_1 = (A(t), f_1)/(f_1, f_1) = (A(t), \hat{A})/(\hat{A}, \hat{A})$, etc. Hence, the static and dynamic information is to be contained in them. + +## 2.4. Continued Fractions + +If $a_0$ is known, the rest of the basis functions can be obtained one by one by RR II. To obtain it, let $L_z a_k(t) = \tilde{a}_k(z)$, $k = 0, 1, \dots, d-1$, where $L_z$ is the Laplace transform operator. The RR II is transformed to: + +$$1 = z\tilde{a}_0 + \Delta_1 \tilde{a}_1 \qquad (9)$$ + +$$\tilde{a}_{k-1} = z\tilde{a}_k + \Delta_{k+1}\tilde{a}_{k+1}, \quad k = 1, 2, \dots, d-1 \qquad (10)$$ + +From Equation (9), $\tilde{a}_0$ is obtained in terms of $\tilde{b}_1 = \tilde{a}_1/\tilde{a}_0$. By setting $k=1$ in Equation (10), $\tilde{b}_1$ in terms of $\tilde{b}_2 = \tilde{a}_2/\tilde{a}_1$. Proceeding term by term, we obtain the continued fraction form for $\tilde{a}_0$: + +$$\tilde{a}_0(z) = 1/(z + \Delta_1/(z + \dots + \Delta_{d-1}/z)) \qquad (11)$$ + +If the hypersurface is determined, the continued fraction may be summable. By taking $L_z^{-1}$ on Equation (11), we can obtain $a_0(t)$: + +$$a_0(t) = 1/2\pi i \int_C \tilde{a}_0(z) e^{zt} dz, \quad \text{Re } z > 0 \qquad (12)$$ + +where by $c$, we mean that the contour is to be on the right of all singularities contained in the rhs of Equation (11). If $a_0(t)$ is thus determined, the rest of the basis functions can be obtained one by one by +---PAGE_BREAK--- + +RR II. Hence, $A(t)$ (see Equation (3)) is completed solved if formally. This recurrence relation analysis can be implemented for a harmonic chain, described in Section 3. + +**3. Local Dynamics in a Harmonic Chain** + +Consider a classical harmonic chain of *N* equal masses in periodic boundary conditions (*N* even number, *m* mass and $\kappa$ the coupling constant) defined by the Hamiltonian: + +$$H = \sum_{-N/2}^{N/2-1} \frac{p_i^2}{2m} + \frac{1}{2\kappa} (q_i - q_{i+1})^2 \quad (13)$$ + +where $p_i$ and $q_i$ are the momentum and the coordinate of mass *m* at site *i*, and sites $-N/2$ and $N/2 - 1$ are nns. Let $A = p_0$ the momentum of mass *m* at Site 0. The time evolution of $p_0$ follows from the method of recurrence relations: in units $m = \kappa = 1$, + +$$p_0(t) = a_0(t) p_0 + a_1(t) ((q_{-1} + q_1)/2 - q_0) + a_2(t) (p_{-1} + p_1) + \dots \quad (14)$$ + +Let HC denote a harmonic chain of *N* masses defined by Equation (13). It has been shown that for HC, $d = N + 1$ and that there are *N* recurants in the hypersurface [25]. If the recurants are expressed in our dimensionless units, the hypersurface has a symmetric structure in the form: $\sigma(N = 2) = (2, 2)$, $\sigma(N = 4) = (2, 1, 1, 2)$, $\sigma(N = 6) = (2, 1, 1, 1, 1, 2)$, etc. We can conclude that for *N* oscillators (*N* even number), $\Delta_1$ and $\Delta_N = 2$ and $\Delta_k = 1$, $k = 2, 3, .., N - 1$, giving a general form: + +$$\sigma(N) = (2, 1, 1, \dots, 1, 1, 2) \quad (15)$$ + +If these recurants are substituted in Equation (11), they will realize Equation (11). If $N \to \infty$ ($d \to \infty$), + +$$\sigma = (2, 1, 1, \dots) \quad (16)$$ + +Taking this limit breaks the front-end symmetry. Equation (11) is summable: + +$$\tilde{a}_0(z) = \frac{1}{\sqrt{4+z^2}} \quad (17)$$ + +By taking the inverse transform, see Equation (12), we obtain: + +$$a_0(t) = f_0(2t) \quad (18)$$ + +where *J* is the Bessel function. This is a known result [26,27]. By RR II, we obtain: + +$$a_k(t) = f_k(2t), \quad k = 1, 2, \dots \quad (19)$$ + +Therewith, we have obtained the complete time evolution of $p_0$ in an infinite HC. + +Observe that $a_0(t \to \infty) = 0$. The vanishing of the autocorrelation function at $t = \infty$ is an indication of irreversibility. It is possible in a Hermitian system only by the thermodynamic limit being taken. This property is an important consideration for the ergodicity of the dynamical variable $A = p_0$, to be considered later [28]. + +*Langevin Dynamics* + +The equation of motion for A may also be expressed by the generalized Langevin equation [29]: + +$$\frac{d}{dt} A(t) + \int_{0}^{t} M(t-t')A(t')dt' = F(t) \quad (20)$$ +---PAGE_BREAK--- + +where *M* and *F* are the memory function and the random force, resp. They are important quantities in many dynamical issues, most often given phenomenologically or approximately [23]. For an infinite HC, we can provide exact expressions for them. + +In obtaining a continued fraction for $\tilde{a}_0(z)$, we have introduced $\tilde{b}_k = \tilde{a}_k / \tilde{a}_{k-1}$, $k = 1, 2, ..d - 1$. By convolution, we can determine $b_k$. They are the basis functions for $\tilde{S}_1$, a subspace of $\tilde{\mathcal{S}}$, spanned by $f_k$, $k = 1, 2, ..d - 1$. They satisfy RR II with the boundary condition that $b_1(t = 0) = 1$ and $b_k(t = 0) = 0$ if $k \neq 1$, with $b_0 = 0$. The hypersurface for this subspace is the same as Equation (7) with $\Delta_1$ removed. One can also express $\tilde{b}_1(z)$ in a continued fraction: + +$$ \tilde{b}_1(z) = 1/(z + \Delta_2/(z + \Delta_3/(z + \dots + \Delta_{d-1}/z))) \quad (21) $$ + +The random force is a vector in $\tilde{\mathcal{S}}_1$; thus, + +$$ F(t) = \sum b_k(t) f_k \quad (22) $$ + +and: + +$$ M(t) = \Delta_1 b_1(t) \quad (23) $$ + +For the infinite HC, $\sigma_1 = (1, 1, 1, \dots)$, summable to: + +$$ \tilde{b}_1(z) = 1/2 (\sqrt{z^2+4} - z) \quad (24) $$ + +By the inverse Laplace transform, we obtain: + +$$ b_1(t) = J_1(2t)/t \quad (25) $$ + +and the rest by RR II. Therewith, we have obtained exact expressions for the two Langevin quantities. + +## 4. Dispersion Relation for Harmonic Chain + +Equation (11) for $\tilde{a}_0$ shows that if $d$ the dimensionality of $\tilde{\mathcal{S}}$ is finite, the continued fraction may be expressed as a ratio of two polynomials in $z$. For HC, let us denote the rhs of Equation (11) by $\tilde{\Psi}_N(z)$ and the rhs of Equation (11) the continued fraction by two polynomials as: + +$$ \tilde{\Psi}_N(z) = P_N(z)/Q_N(z) \quad (26) $$ + +Since every $Q_N$ is found to contain $z(z^2+4)$ as a common factor, we express it as: + +$$ Q_N = z(z^2+4)q_N, \quad N = 2, 4, 6, \dots \quad (27) $$ + +Below, we list $P'$s and $q'$s for several values of $N$, sufficient to draw a general conclusion therefrom: + +(a) $N=2$, $\sigma = (2, 2)$ +$P_2 = z^2 + 2$, +$q_2 = 1$ + +(b) $N=4$, $\sigma = (2, 1, 1, 2)$ +$P_4 = z^4 + 4z^2 + 2$ +$q_4 = z^2 + 2$ + +(c) $N=6$; $\sigma = (2, 1, 1, 1, 1, 2)$ +$P_6 = z^6 + 6z^4 + 9z^2 + 2$ +$q_6 = z^4 + 4z^2 + 3$ + +(d) $N=8$; $\sigma = (2, 1, 1, 1, 1, 1, 1, 2)$ +$P_8 = z^8 + 8z^6 + 20z^4 + 16z^2 + 2$ +---PAGE_BREAK--- + +$$q_8 = z^6 + 6z^4 + 10z^2 + 4$$ + +(e) $N=10$; $\sigma = (2, 1, 1, 1, 1, 1, 1, 1, 2)$ +$P_{10} = z^{10} + 10z^8 + 35z^6 + 50z^4 + 25z^2 + 2$ +$q_{10} = z^8 + 8z^6 + 21z^4 + 20z^2 + 5$ + +(f) $N = 12$; $\sigma = (2, 1, 1, 1, 1, 1, 1, 1, 1, 2)$ +$P_{12} = z^{12} + 12z^{10} + 54z^8 + 112z^6 + 105z^4 + 36z^2 + 2$ +$q_{12} = z^{10} + 10z^8 + 36z^6 + 56z^4 + 35z^2 + 6$ + +If $z = 2is\alpha$, $\alpha \neq 0$, the above polynomials have simple expressions for all orders of N: + +$$P_N = 2\cos N\alpha \quad (28)$$ + +$$q_N = \sin N\alpha / \sin 2\alpha, \sin 2\alpha \neq 0 \quad (29)$$ + +## 4.1. Zeros of qN + +The dispersion relation can be deduced from $z_k$ the zeros of $q_N$: + +$$q_N(z) = \Pi(z - z_k) \quad (30)$$ + +From Equation (29), + +$$\sin N\alpha_k = 0 \quad (31)$$ + +with $\sin 2\alpha_k \neq 0$ and $\alpha_k \neq 0$. Hence, + +$$\alpha_k = (\pi/N)k, k = \pm 1, \pm 2, \dots, \pm(N/2-1) \quad (32)$$ + +Hence, with $k$ given above, + +$$z_k = 2i\sin\alpha_k \quad (33)$$ + +One may also write: + +$$\Pi(z - z_k)|_{z=2is\alpha} = \sin N\alpha / \sin 2\alpha \quad (34)$$ + +Since $Q_N = z(z^2 + 4)q_N$ (see Equation (26)), the prefactor contributes to the zeros of $Q_N$. They may be included in Equation (32) if the range of $k$ is made to include zero and $N/2$. + +## 4.2. $a_0(t)$ for Finite N + +Given the zeros of $Q_N$, it is now straightforward to obtain $a_0(t)$ by Equation (12). For example, if $N=6$, + +$$a_0(t) = 1/6[1 + 2\cos t + 2\cos\sqrt{3}t + \cos 2t] \quad (35)$$ + +A general expression would be: + +$$a_0(t) = \frac{1}{N} \sum_k \cos \omega_k t \quad (36)$$ + +where: + +$$\omega_k = 2|\sin(\pi v_k)|, v_k = k/N \quad (37)$$ + +$k = -N/2, -, -1, 0, 1, .. N/2$. Since Equation (36) is a dispersion relation, $v$'s will be termed "wave vectors". +---PAGE_BREAK--- + +4.3. $a_0(t)$ When $N \to \infty$ + +If $N \to \infty$, the sum in Equation (36) may be converted to an integral: + +$$ \text{rhs of Equation (36)} = 1/2\pi \int_{-\pi}^{\pi} e^{2i\sin\theta} d\theta \quad (38) $$ + +The rhs of Equation (38) is an integral representation of $J_0(2t)$. Hence, $a_0(t) = J_0(2t)$, the same as Equation (18). + +It is worth noting here that the zeros of $J_0(2t)$ can thus be obtained from Equation (36) by taking $N \to \infty$ by the condition: + +$$ \omega_k t = \pi/2(2n+1), \quad n=0,1,2,\dots \qquad (39) $$ + +If we write $J_0(2t) = \Pi(2t - 2t_k)$, by Equation (37): + +$$ 2t_k = \pi(2n+1)/|2\sin\pi k/N|, \quad k/N = (-1/2, 1/2) \qquad (40) $$ + +Evidently, there are infinitely many zeros in $J_0$ [30]. This result will be significant in Section 6. + +4.4. $\tilde{a}_0(z) = \Psi_N(z)$ When $N \to \infty$ + +By Equations (26)–(29), + +$$ \tilde{\Psi}_N(z) = V \frac{\cos N\alpha}{\sin N\alpha} \qquad (41) $$ + +where $V = 2\sin2\alpha/(z(z^2+4)) = d\alpha/dz$ (by $z = 2i\sin\alpha$). Furthermore: + +$$ \begin{aligned} \frac{\cos N\alpha}{\sin N\alpha} &= 1/N \frac{d}{d\alpha}(\log\sin N\alpha) \\ &= 1/N \frac{d}{d\alpha}\left[\log(\sin N\alpha/\sin 2\alpha) + \log\sin 2\alpha\right] \end{aligned} \qquad (42) $$ + +The second term on the rhs of Equation (42) may be dropped if $N \to \infty$. For the first term, by Equations (28) and (29), + +$$ \text{rhs of Equation (42)} = dz/d\alpha \frac{d}{dz} \log\Pi(z-z_k) = dz/d\alpha \sum \frac{1}{z-z_k} \qquad (43) $$ + +The prefactor $dz/d\alpha = 1/V$. Since $N \to \infty$, we can convert the above sum into an integral: writing $\tilde{\Psi} = \tilde{\Psi}_N$, $N \to \infty$, + +$$ \Psi(z) = \frac{1}{\pi} \int_{-\pi/2}^{\pi/2} \frac{d\theta}{z - 2i\sin\theta} = \frac{1}{\sqrt{4+z^2}} \qquad (44) $$ + +The above result is the same as Equation (17). + +The asymptotic results Equations (16) and (17) were obtained by taking the $N \to \infty$ limit first on the hypersurface. What is shown in Section 4 is that the same results are also obtained from finite N solutions for $a_0(t)$. + +5. Ergodicity of Dynamical Variable $A = p_0$ + +If A is a variable of a Hermitian system of N particles, $N \to \infty$, it is possible to determine whether it is ergodic. According to the ergometric theory of the ergodic hypothesis [31], A is ergodic if $W_A \neq 0$ or $\infty$, where: + +$$ W_A = \int_0^\infty r_A(t) dt \qquad (45) $$ +---PAGE_BREAK--- + +where $r_A(t) = (A(t), A)/(A, A) = a_0(t)$, the autocorrelation function of A. By Equation (12), + +$$W_A = \tilde{r}_A(z=0) \quad (46)$$ + +If $d \to \infty$ as $N \to \infty$, which is the case of HC, $z \to 0$ on Equation (11) yields an infinite product of the following form: + +$$W_A = \frac{\Delta_2 \times \Delta_4 \times \dots \times \Delta_{2n}}{\Delta_1 \times \Delta_3 \times \dots \Delta_{2n+1}}, \quad n \to \infty \quad (47)$$ + +Ordinarily, infinite products are difficult to evaluate, as they seem to require product rules that differ from those for finite products. However, they can be determined by Equation (45) or Equation (46) as illustrated below. + +## 5.1. Infinite Harmonic Chain + +If $A = p_0$ of HC, we can determine whether $A$ is ergodic by evaluating Equations (45)–(47). If $N \to \infty$, $\sigma = (2, 1, 1, ...)$ (see (16)), and $\Psi(t) = J_0(2t)$ (see Equation (18)). Hence, by Equation (45), $W_A = 1/2$. + +It was shown that $\Psi(z) = 1/\sqrt{z^2+4}$; see Equation (17). Hence, by Equation (46), $W_A = 1/2$. Finally, by $\sigma$, we can write down the infinite product: + +$$W_A = \frac{1 \times 1 \times 1 \times \dots}{2 \times 1 \times 1 \times \dots} = \frac{1}{2} \quad (48)$$ + +in agreement with the previous results. As noted above, computing infinite products is a delicate matter. The order of terms in an infinite product may not be altered, nor the terms themselves. In Equation (48), such a nicety did not enter since all elements are one but one. Compare with another example in Section 5.2 below. + +## 5.2. Infinite Harmonic Chain with One End Attached to a Wall + +We shall now change HC defined by Equation (13) slightly. Let the coupling between the oscillators at $q_{-2}$ and $q_{-1}$ be cut. Furthermore, let the mass of the oscillator at $q_{-1}$ be infinitely heavy, so that the oscillator at $q_0$ is attached as if to a wall. The rest of the chain is unchanged. The oscillators in this new configuration are labeled 0, 1, 2, ..., $N-1$, with one end attached to a wall and the other end free. Finally, let $N \to \infty$. + +If $A = p_0$, the recurrants are found to have the following form [27,32]: + +$$\Delta_1 = 2/1, \Delta_3 = 3/2, \Delta_5 = 4/3, \dots, \Delta_{2} = 1/2, \Delta_4 = 2/3, \Delta_6 = 3/4, \dots$$ + +Evidently, they may be put in the form: $\Delta_{2n-1} = (n+1)/n$ and $\Delta_{2n} = n/(n+1)$, $n = 1, 2, 3, ...$ These recurrants imply that for $A = p_0$ [27,32], + +$$a_0(t) = J_0(2t) - J_4(2t) \quad (49)$$ + +$$\tilde{a}_0(z) = \frac{1}{\sqrt{(z^2 + 4)}} [1 - \frac{1}{16} (\sqrt{z^2 + 4} - z)^4] \quad (50)$$ + +By Equation (47), + +$$W_A = \frac{1/2 \times 2/3 \times 3/4 \times \dots \times n/(n+1)}{2/1 \times 3/2 \times 4/3 \times \dots \times (n+1)/n}, \quad n \to \infty \quad (51)$$ + +Each term in the numerator is less than one, while each term in the denominator greater than one. If the terms and the order are preserved, $W_A \to 0$. By Equations (45) and (46), it may be tested using Equations (49) and (50). In both cases, we obtain $W_A = 0$ verifying the infinite product. +---PAGE_BREAK--- + +Since $W_A = 0$, $A = p_0$ is not ergodic in this chain. For this variable, the phase space is not transitive. If mass at Site 0 is slightly perturbed, the perturbed energy is not delocalized everywhere [33]. + +**6. Harmonic Chain and Logistic Map** + +The logistic map (LM) is sometimes called the Ising model of chaos for being possibly the simplest model exhibiting chaos [34]. If $x$ is a real number in an interval $(0,1)$, the map is defined by: + +$$f(x) = ax(1-x), \quad x = (0,1) \tag{52}$$ + +where $a$ is a control parameter, a real number limited to $1 < a \le 4$. Thus, the map is real and bounded as $x$. If there exists $x = x^*$, such that $f(x^*) = x^*$, it is termed a fixed point of $f(x)$. If $f^n$ is an $n$-fold nested function of $f$, i.e., $f^n(x) = f(f^{n-1}(x)) = f(...f(x)...)$, with $f^1 \equiv f$, there may be fixed points for $f^n : f^n(x^*) = x^*$. The values of the fixed points and the number of the fixed points will depend on the size of the control parameter $a$. + +If $a < 3$, there is only one fixed point for any $n$. There is a remarkable theorem due to Sharkovskii [35] on 1d continuous maps on the interval, such as LM. As applied to this map, this theorem says that if $a \ge 1 + \sqrt{8}$, there are infinitely many fixed points as $n \to \infty$. This implies that a trajectory starting from almost any point in $(0,1)$ is chaotic. At $a = 4$ (the largest possible value), the fixed points fill the interval $x = (0,1)$ densely with a unique distribution $\rho_x, \int \rho_x dx = 1$. This distribution is known as the invariant density of fixed points, first deduced by Ulam [36,37]: + +$$\rho_x = \frac{1}{\pi \sqrt{x(1-x)}}, \quad 0 < x < 1 \tag{53}$$ + +The invariant density refers to the spectrum of fixed points in $(0,1)$. The square-root singularity in Equation (53), a branch cut from 0–1, indicates that the spectrum is dense. If $\mu$ is a Lebesgue measure, $d\mu(x) = \rho_x dx$. Hence, $\mu = 1$. + +We wish to see whether $\rho_x$, a distribution of fixed points, bears a relationship to $\rho_\omega$, the power spectrum of frequencies in HC. For this purpose, consider the following transformations of variables: + +$$x = 1/2 + 1/4 \omega \tag{54}$$ + +and: + +$$\rho_x dx = \rho_\omega d\omega \tag{55}$$ + +By substituting Equation (54) in (53), we obtain by Equation (55): + +$$\begin{align} +\rho_{\omega} &= \frac{1}{\pi\sqrt{4-\omega^2}}, & -2 < \omega < 2 \tag{56} \\ +&= 0 \text{ if otherwise.} +\end{align}$$ + +For an infinite HC, $\tilde{a}_0(z = i\omega) = \pi\rho_\omega$. By Equation (17), or Equation (44), the rhs of Equation (56) is precisely the power spectrum for $A = p_0$. Equation (56) shows that the fixed points of LM at $a = 4$ ($LM_4$) correspond to the frequencies of HC. + +Since the frequencies in the power spectrum are positive quantities, let us express Equation (54) as: + +$$\omega = 2|1 - 2x|, \quad 0 < x < 1 \tag{57}$$ + +For $LM_4$, + +$$x = \sin^2 \pi y / 2 \tag{58}$$ + +$$y/2 = l/(2N+1), \quad l=1,2,\dots,N \tag{59}$$ +---PAGE_BREAK--- + +$y$ being the pre-fixed points of $x$ the fixed points. If Equation (59) is substituted in Equation (57) and $y$ replaced by $v + 1/2$: + +$$ \omega = 2|\sin\pi v| \quad (60) $$ + +The above is identical to Equation (37), the dispersion relation for HC. In the limit $N \to \infty$, both $v$ and $y$ lie in the same interval $(-1/2, 1/2)$. This property shows that the pre-fixed points of $LM_4$ also correspond to the wave vectors of HC. + +The correspondence between $x$ and $\omega$ and also between $y$ and $v$ indicate that the iteration dynamics of $LM_4$ and the time evolution in HC are isomorphic in their local variables. This implies that if a variable in HC is ergodic, a corresponding variable in $LM_4$ is also ergodic. If the trajectory of an initial value in $LM_4$ is chaotic, we must also conclude that the trajectory of a local variable in HC must also be chaotic. + +Chaos in HC? Let us first examine chaos in $LM_4$. According to Sharkovskii, chaos is implied where there are infinitely many periods. By our work, they form a set of uncountable pre-fixed points of Lebesgue measure 1. This results in an aleph cycle, which can never return to the initial point [34]. In an infinite HC, there are also infinitely many periods. See Equation (40). Thus, the HC has the necessary and possibly sufficient property for chaos. + +In an infinite HC attached to a wall (see Section 5.2), there is chaos also, as there are infinitely many periods. However, as was already shown, its variables are not ergodic. This indicates that ergodicity is a subtler property than chaos. In a continuous map, there may be chaos, but not ergodicity. + +## 7. Concluding Remarks + +In this work, we have dwelt with the dynamics of a monatomic chain with which to illustrate some of the finer points of the dynamics contained in it. This simplest of harmonic chains can be made richer in a variety of ways. One can make one oscillator to have a different mass than its neighbors [25]. It would be a model for an impurity or a defect. One could make it a periodic diatomic chain [8] or even an aperiodic diatomic chain [8]. We are providing a list of recent advances made by the method of recurrence relations on others [38–44]. For related studies on HC by Fokker-Planck dynamics and non-exponential decay, see [7,45,46]. + +**Acknowledgments:** I thank Joao Florencio for having kindled my interest in the dynamics of harmonic chains through our collaboration in the 1980s. I thank the University of Georgia Franklin College for supporting my research through the regents professorship. This work is dedicated to the memory of Bambi Hu. + +**Conflicts of Interest:** The author declares no conflict of interest. + +## References + +1. Mazur, P.; Montroll, E. Poincaré cycles, ergodicity, and irreversibility in assemblies of coupled harmonic oscillators. *J. Math. Phys.* **1960**, *1*, 70–84. + +2. Lee, M.H. Solutions of the generalized Langevin equation by a method of recurrence relations. *Phys. Rev. B* **1982**, *26*, 2547–2551. + +3. Pires, A.S.T. The memory function formalism in the study of the dynamics of a many body system. *Helv. Phys. Acta* **1988**, *61*, 988. + +4. Viswanath, V.S.; Mueller, G. *Recursion Method*; Springer-Verlag: Berlin, Germany, 1994. + +5. Balucani, U.; Lee, M.H.; Tognetti, V. Dynamical correlations. *Phys. Rep.* **2003**, *373*, 409–492. + +6. Mokshin, A.V. Self-consistent approach to the description of relaxation processes in classical multiparticle systems. *Theory Math. Phys.* **2015**, *183*, 449–477. + +7. Sen, S. Solving the Liouville equation for conservative systems: Continued fraction formalism and a simple application. *Phys. A* **2006**, *360*, 304–324. + +8. Kim, J.; Sawada, I. Dynamics of a harmonic oscillator on the Bethe lattice. *Phys. Rev. E* **2000**, *61*, R2172–R2175. + +9. Sawada, I. Dynamics of the S = 1/2 alternating chains at T = ∞. *Phys. Rev. Lett.* **1999**, *83*, 1668–1671. +---PAGE_BREAK--- + +10. Sen, S. Exact solution of the Heisenberg equation of motion for the surface spin in a semi-infinite S=1/2 XY chain at infinite temperatures. Phys. Rev. B **1991**, *44*, 7444-7450. + +11. Florencio, J.; Sá Barreto, F.C.S. Dynamics of the random one-dimensional transverse Ising model. Phys. Rev. B **1999**, *60*, 9555-9560. + +12. Silva Nunez, M.E.; Florencio, J. Effects of disorder on the dynamics of the XY chain. Phys. Rev. B **2003**, *68*, 144061-114065. + +13. Daligault, J.; Murillo, M.S. Continued fraction matrix representation of response functions in multicomponent systems. Phys. Rev. E **2003**, *68*, 154011-154014. + +14. Mokshin, A.V.; Yulmatyev, R.M.; Hanggi, P. Simple measure of memory for dynamical processes described by a generalized Langevin equation. Phys. Rev. Lett. **2005**, *95*, 200601. + +15. Hong, J., Kee, H.Y. Analytic treatment of Mott-Hubbard transition in the half-filled Hubbard model and its thermodynamics. Phys. Rev. B **1995**, *52*, 2415-2421. + +16. Liu, Z.-Q.; Kong, X.-M.; Chen, X.-S. Effects of Gaussian disorder on the dynamics of the random transverse Ising model. Phys. Rev. B **2006**, *73*, 224412. + +17. Chen, X.-S.; Shen, Y.-Y.; Kong, X.-M. Crossover of the dynamical behavior in two-dimensional random transverse Ising model. Phys. Rev. B **2010**, *82*, 174404. + +18. De Mello Silva, E. Time evolution in a two-dimensional ultrarelativistic-like electron gas by recurrence relations method. Acta Phys. Pol. B **2015**, *46*, 1135-1141. + +19. De Mello Silva, E. Dynamical class of a two-dimensional plasmonic Dirac system. Phys. Rev. E **2015**, *92*, 042146. + +20. Guimaraes, P.R.C.; Plascak, J.A.; de Alcantara Bonfim, O.F.; Florencio, J. Dynamics of the transverse Ising model with next-nearest-neighbor interactions. Phys. Rev. E **2015**, *92*, 042115. + +21. Sharma, N.L. Response and relaxation of a dense electron gas in D dimensions at long wavelengths. Phys. Rev. B **1992**, *45*, 3552-3556. + +22. Lee, M.H. Can the velocity autocorrelation function decay exponentially? Phys. Rev. Lett. **1983**, *51*, 1227-1230. + +23. Kubo, R. The fluctuation-dissipation theorem. Rep. Prog. Phys. **1966**, *29*, 255-284. + +24. Lee, M.H. Orthogonalization process by recurrence relations. Phys. Rev. Lett. **1982**, *49*, 1072-1075. + +25. Lee, M.H.; Florencio, J., Jr.; Hong, J. Dynamic equivalence of a two-dimensional quantum electron gas and a classical harmonic oscillator chain with an impurity mass. J. Phys. A **1989**, *22*, L331-L335. + +26. Fox, R.F. Long-time tails and diffusion. Phys. Rev. A **1983**, *27*, 3216-3233. + +27. Florencio, J., Jr.; Lee, M.H. Exact time evolution of a classical harmonic-oscillator chain. Phys. Rev. A **1985**, *31*, 3231-3236. + +28. Lee, M.H. Why Irreversibility is not a sufficient condition for ergodicity. Phys. Rev. Lett. **2007**, *98*, 190601. + +29. Lee, M.H. Derivation of the generalized Langevin equation by a method of recurrence relations. J. Math. Phys. **1983**, *24*, 2512-2514. + +30. Watson, G.N. *A Treatise on the Theory of Bessel Functions*; Cambridge U.P.: London, UK, 1980; Chapter 15. + +31. Lee, M.H. Ergodic theory, infinite products, and long time behavior in Hermitian models. Phys. Rev. Lett. **2001**, *87*, 250601/1-250601/4. + +32. Pestana Marino, E. Ph.D. Thesis, University of Georgia, Athens, GA, USA, 2011, unpublished. + +33. Lee, M.H. Birkhoff's theorem, many-body response functions, and the ergodic condition. Phys. Rev. Lett. **2007**, *98*, 110403. + +34. Lee, M.H. Solving for the fixed points of 3-cycle in the logistic map and toward realizing chaos by the theorems of Sharkovskii and Li-Yorke. Commu. Theor. Phys. **2014**, *62*, 485-496. + +35. Sharkovskii, A.N. Coexistence of cycles of a continuous transformation of a line into itself. Ukrainian Math. J. **1964**, *16*, 61-71 (in Russian); English transl.: Int. J. Bifurc. Chaos **1995**, *5*, 1363-1273. + +36. Ulam, S.M. *A Collection of Mathematical Problems*; Interscience: New York, NY, USA, 1960; pp. 73-74. + +37. Lee, M.H. Cyclic solutions in chaos and the Sharkovskii theorem. Acta Phys. Pol. B **2012**, *43*, 1053-1063. + +38. Yu, M.B. Momentum autocorrelation function of Fibonacci chains with finite number oscillators. Eur. J. Phys. B **2012**, *85*, 379. + +39. Yu, M.B. Momentum autocorrelation function of a classical oscillator chain with alternating masses. Eur. J. Phys. B **2013**, *86*, 57. + +40. Yu, M.B. Momentum autocorrelation function of an impurity in a classical oscillator chain with alternating masses - I. General theory. Phys. A **2014**, *398*, 252-263. + + +---PAGE_BREAK--- + +41. Yu, M.B. Momentum autocorrelation function of an impurity in a classical oscillator chain with alternating masses II. Illustrations. *Phys. A* **2015**, *438*, 469–486. + +42. Yu, M.B. Momentum autocorrelation function of an impurity in a classical oscillator chain with alternating masses III. Some limiting cases. *Phys. A* **2016**, *447*, 411–421. + +43. Wierling, A.; Sawada, I. Wave-number dependent current correlation for a harmonic oscillator. *Phys. Rev. E* **2010**, *82*, 051107. + +44. Wierling, A. Dynamic structure factor of linear harmonic chain - A recurrence relation approach. *Eur. J. Phys. B* **2012**, *85*, 20. + +45. Vitali, D.; Grigolini, P. Subdynamics, Fokker-Planck equation, and exponential decay of relaxation processes. *Phys. Rev. A* **1989**, *39*, 1486–1499. + +46. Grigolini, P. *Quantum Mechanical Irreversibility and Measurement*; World Scientific: Singapore, Singapore, 1993. + +© 2016 by the author. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +# Old Game, New Rules: Rethinking the Form of Physics + +**Christian Baumgarten** + +5244 Birrhard, Switzerland; christian-baumgarten@gmx.net + +Academic Editor: Young Suh Kim + +Received: 26 February 2016; Accepted: 28 April 2016; Published: 6 May 2016 + +**Abstract:** We investigate the modeling capabilities of sets of coupled *classical harmonic oscillators* (CHO) in the form of a modeling game. The application of the simple but restrictive rules of the game lead to conditions for an isomorphism between Lie-algebras and real Clifford algebras. We show that the correlations between two coupled classical oscillators find their natural description in the Dirac algebra and allow to model aspects of special relativity, inertial motion, electromagnetism and quantum phenomena including spin in one go. The algebraic properties of Hamiltonian motion of low-dimensional systems can generally be related to certain types of interactions and hence to the dimensionality of emergent space-times. We describe the intrinsic connection between phase space volumes of a 2-dimensional oscillator and the Dirac algebra. In this version of a phase space interpretation of quantum mechanics the (components of the) spinor wavefunction in momentum space are abstract canonical coordinates, and the integrals over the squared wave function represents second moments in phase space. The wave function in ordinary space-time can be obtained via Fourier transformation. Within this modeling game, 3+1-dimensional space-time is interpreted as a structural property of electromagnetic interaction. A generalization selects a series of Clifford algebras of specific dimensions with similar properties, specifically also 10- and 26-dimensional real Clifford algebras. + +**Keywords:** Hamiltonian mechanics; coupled oscillators; Lorentz transformation; Dirac equation + +**PACS:** 45.20.Jj, 47.10.Df, 41.75, 41.85, 03.65.Pm, 05.45.Xt, 03.30.+p, 03.65.-w,29.27.-a + +## 1. Introduction + +D. Hestenes had the joyful idea to describe physics as a modeling game [1]. We intend to play a modeling game with (ensembles of) classical harmonic oscillators (CHO). The CHO is certainly one of the most discussed and analyzed systems in physics and one of the few exactly solveable problems. One would not expect any substantially new discoveries related to this subject. Nevertheless there are aspects that are less well-known than others. One of these aspects concerns the transformation group of the symplectic transformations of $n$ coupled oscillators, $Sp(2n)$. We invite the reader to join us playing "a modeling game" and to discover some fascinating features related to possible reinterpretations of systems of two (or more) coupled oscillators. We will show that special relativity can be reinterpreted as a transformation theory of the second moments of the abstract canonical variables of coupled oscillator systems (The connection of the Dirac matrices to the symplectic group has been mentioned by Dirac in Reference [2]. For the connection of oscillators and Lorentz transformations (LTs) see also the papers of Kim and Noz [3–5] and references therein. The use of CHOs to model quantum systems has been recently described-for instance-by Briggs and Eisfeld [6–8]). We extend the application beyond pure LTs and show that the Lorentz force can be reinterpreted by the second moments of two coupled oscillators in proper time. Lorentz transformations can be modeled as symplectic transformations [4]. We shall show how Maxwell's equations find their place within the game. +---PAGE_BREAK--- + +The motivation for this game is to show that many aspects of modern physics can be understood on the basis of the classical notions of harmonic oscillation if these notions are appropriately reinterpreted. + +In Section 2 we introduce the rules of our game, in Section 3 we introduce the algebraic notions of the Hamilton formalism. In Section 4 we describe how geometry emerges from coupled oscillator systems, in Section 5 we describe the use of symplectic transformations and introduce the Pauli- and Dirac algebra. In Section 6 we introduce a physical interpretation of oscillator moments and in Section 7 we relate the phase space of coupled oscillators to the real Dirac algebra. Section 8 contains a short summary. + +## 2. The Rules Of The Game + +The first rule of our game is the principle of reason (POR): *No distinction without reason*—we should not add or remove something *specific* (an asymmetry, a concept, a distinction) from our model without having a clear and explicit reason. If there is no reason for a specific asymmetry or choice, then all possibilities are considered equivalently. + +The second rule is the principle of variation (POV): We postulate that change is immanent to all fundamental quantities in our game. From these two rules, we take that the mathematical object of our theory is a list (n-tuple) of quantities (variables) $\psi$, each of which varies at all times. + +The third rule is the principle of *objectivity* (POO): Any law within this game refers to measurements, defined as comparison of quantities (object properties) in relation to other object properties of the same type (i.e., unit). Measurements require reference standards (rulers). A measurement is objective if it is based on (properties of) the objects of the game. This apparent self-reference is unavoidable, as it models the *real* situation of physics as experimental science. Since all fundamental objects (quantities) in our model *vary at all times*, the only option to construct a constant quantity that might serve as a ruler, is given by *constants of motion* (COM). Hence the principle of objectivity requires that measurement standards are derived from constants of motion. + +This third rule implies that the fundamental variables can not be directly measured, but only functions of the fundamental variables of the same dimension (unit) of a COM. Thus the model has two levels: The level of the fundamental variable list $\psi$, which is experimentally not directly accessible and a level of *observables* which are (as we shall argue) even moments of the fundamental variables $\psi$. + +### 2.1. Discussion of the Rules + +E.T. Jaynes wrote that “Because of their empirical origins, QM and QED are not physical theories at all. In contrast, Newtonian celestial mechanics, Relativity, and Mendelian genetics are physical theories, because their mathematics was developed by reasoning out the consequences of clearly stated physical principles from which constraint the possibilities”. And he continues “To this day we have no constraining principle from which one can deduce the mathematics of QM and QED; [...] In other words, the mathematical system of the present quantum theory is [...] unconstrained by any physical principle” [9]. This remarkably harsh criticism of quantum mechanics raises the question of what we consider to be a physical principle. Are the rules of our game physical principles? We believe that they are no substantial physical principles but *formal* first principles, they are *preconditions* of a sensible theory. They contain no immediate physical content, but they define the *form* or the *idea* of physics. + +It is to a large degree immanent to science and specifically to physics to presuppose the existence of *reason*: Apples do not fall down by chance—there is a reason for this tendency. Usually this belief in reason implies the belief in causality, i.e., that we can also (at least in principle) explain why a specific apple falls at a specific time, but practically this latter belief can rarely be confirmed experimentally and therefore remains to some degree metaphysical. Thus, if, as scientists, we postulate that things have reason, then this is not a *physical* principle but a precondition, a first principle. + +The second rules (POV), is specific to the form (or idea) of physics, e.g., that it is the sense of physics to *recognize the pattern* of motion and to *predict future*. Therefore the notion of time in the form of change is indeed immanent to the physical description of reality. +---PAGE_BREAK--- + +The principle of objectivity (POO) is immanent to the very idea of physics: A measurement is the comparison of properties of objects with compatible properties of reference objects, e.g., requires “constant” rulers. Hence the rules of the game are to a large degree unavoidable: They follow from the very form of physics and therefore certain laws of physics are not substantial results of a physical theory. For instance a consistent “explanation” of the stability of matter is impossible as we presumed it already within the idea of measurement. More precisely: if this presumption does not follow within the framework of a physical theory, then the theory is flawed, since it can not reproduce its own presumptions. + +Einstein wrote with respect to relativity that “It is striking that the theory (except for the four-dimensional space) introduces two kinds of things, i.e., (1) measuring rods and clocks; (2) all other things, e.g., the electromagnetic field, the material point, etc. This, in a certain sense, is inconsistent; strictly speaking, measuring rods and clocks should emerge as solutions of the basic equations [...], not, as it were, as theoretically self-sufficient entities”. [10]. The more it may surprise that the stability of matter can not be obtained from classical physics as remarked by Elliott H. Lieb: “A fundamental paradox of classical physics is why matter, which is held together by Coulomb forces, does not collapse” [11]. This single sentence seems to rule out the possibility of a fundamental classical theory and uncovers the uncomfortable situation of theoretical physics today: Despite the overwhelming experimental and technological success, there is a deep-seated confusion concerning the theoretical foundations. Our game is therefore a meta-experiment. It is not the primary goal to find “new” laws of nature or new experimental predictions, but it is a conceptual “experiment” that aims to further develop our understanding of the consequences of principles: which ones are really required to derive central “results” of contemporary physics. In this short essay final answers can not be given, but maybe some new insights are possible. + +## 2.2. What about Space-Time? + +A theory has to make the choice between postulate and proof. If a 3+1 dimensional space-time is presumed, then it cannot be proven within the same theoretical framework. More precisely, the value of such a proof remains questionable. This is a sufficient reason to avoid postulates concerning the dimensionality of space-time. Another, even stronger, reason to avoid a direct postulate of space-time and its geometry has been given above: The fundamental variables that we postulated, can not be directly measured. This excludes space-time coordinates as primary variables (which can be directly measured), but with it almost all other apriori assumed concepts like velocity, acceleration, momentum, energy and so on. At some point these concepts certainly have to be introduced, but we suggest an approach to the formation of concepts that differs from the Newtonian axiomatic method. The POR does not allow to introduce distinctions between the fundamental variables into coordinates and momenta without reason. Therefore we are forced to use an interpretational method, which one might summarize as *function follows form*. We shall first derive equations and then we shall interpret the equations according to some formal criteria. This implies that we have to refer to already existing notions if we want to identify quantities according to their appearance within a certain formalism. The consequence for the game is, that we have to show how to give rise to *geometrical* notions: If we do not postulate space-time then we have to suggest a method to construct it. + +A consequence of our conception is that both, objects and fields have to be identified with dynamical structures, as there is simply nothing else available. This fits to the framework of structure preserving (symplectic) dynamics that we shall derive from the described principles. +---PAGE_BREAK--- + +### 3. Theory of Small Oscillations + +In this section we shall derive the theory of coupled oscillators from the rules of our game. According to the POO there exists a function (COM) $\mathcal{H}(\psi)$ such that (Let us first (for simplicity) assume that $\frac{\partial \mathcal{H}}{\partial t} = 0$): + +$$ \frac{d\mathcal{H}}{dt} = \sum_k \frac{\partial \mathcal{H}}{\partial \psi_k} \dot{\psi}_k = 0 \quad (1) $$ + +or in vector notation + +$$ \frac{d\mathcal{H}}{dt} = (\nabla_{\psi} \mathcal{H}) \cdot \dot{\psi} = 0 \quad (2) $$ + +The simplest solution is given by an arbitrary skew-symmetric matrix $\mathcal{X}$: + +$$ \dot{\psi} = \mathcal{X} \nabla_{\psi} \mathcal{H} \quad (3) $$ + +Note that it is only the skew-symmetry of $\mathcal{X}$, which ensures that it is always a solution to Equation (2) and which ensures that $\mathcal{H}$ is constant. If we now consider a state vector $\psi$ of dimension $k$, then there is a theorem in linear algebra, which states that for any skew-symmetric matrix $\mathcal{X}$ there exists a non-singular matrix $\mathcal{Q}$ such that we can write [12]: + +$$ \mathcal{Q}^T \mathcal{X} \mathcal{Q} = \operatorname{diag}(\eta_0, \eta_1, \eta_2, \ldots, 0, 0, 0) \quad (4) $$ + +where $\eta_0$ is the matrix + +$$ \eta_0 = \begin{pmatrix} 0 & 1 \\ -1 & 0 \end{pmatrix} \quad (5) $$ + +If we restrict us to orthogonal matrices $\mathcal{Q}$, then we may still write + +$$ \mathcal{Q}^T \mathcal{X} \mathcal{Q} = \operatorname{diag}(\lambda_0 \eta_0, \lambda_1 \eta_1, \lambda_2 \eta_2, \ldots, 0, 0, 0) \quad (6) $$ + +In both cases we may leave away the zeros, since they correspond to non-varying variables, which would be in conflict with the second rule of our modeling game. Hence $k=2n$ must be even and the square matrix $\mathcal{X}$ has the dimension $2n \times 2n$. As we have no specific reason to assume asymmetries between the different degrees of freedom (DOF), we have to choose all $\lambda_k = 1$ in Equation (6) and return to Equation (4) without zeros and define the block-diagonal so-called *symplectic unit matrix* (SUM) $\gamma_0$: + +$$ \mathcal{Q}^T \mathcal{X} \mathcal{Q} = \operatorname{diag}(\eta_0, \eta_1, \eta_2, \ldots, \eta_n) \equiv \gamma_0 \quad (7) $$ + +These few basic rules thus lead us directly to Hamiltonian mechanics: Since the state vector has even dimension and due to the form of $\gamma_0$, we can interpret $\psi$ as an ensemble of $n$ classical DOF-each DOF represented by a canonical pair of coordinate and momentum: $\psi = (q_1, p_1, q_2, p_2, \ldots, q_n, p_n)^T$. In this notation and after the application of the transformation $\mathcal{Q}$, Equation (3) can be written in form of the Hamiltonian equations of motion (HEQOM): + +$$ \begin{aligned} \dot{q}_i &= \frac{\partial \mathcal{H}}{\partial p_i} \\ \dot{p}_i &= -\frac{\partial \mathcal{H}}{\partial q_i} \end{aligned} \quad (8) $$ + +The validity of the HEQOM is of fundamental importance as it allows for the use of the results of Hamiltonian mechanics, of statistical mechanics and thermodynamics-but without the intrinsic presupposition that the $q_i$ have to be understood as positions in real space and the $p_i$ as the corresponding canonical momenta. This is legitimate as the theory of canonical transformations is independent from any specific physical interpretation of what the coordinates and momenta represent physically. As no other interpretation is at hand, we say that these canonical pairs are coordinates $q_i, p_i$ in an +---PAGE_BREAK--- + +abstract phase space and they are canonical coordinates and momenta only due to the form of the HEQOM. The choice of the specific form of $\gamma_0$ is for $n > 1$ DOF not unique. It could for instance be written as + +$$ \gamma_0 = \eta_0 \otimes \mathbf{1}_{n \times n} \qquad (9) $$ + +which corresponds a state vector of the form + +$$ \psi = (q_1, \dots, q_n, p_1, \dots, p_n)^T $$ + +or by + +$$ \gamma_0 = \mathbf{1}_{n \times n} \otimes \eta_0 \qquad (10) $$ + +as in Equation (7). Therefore we are forced to make an arbitrary choice (But we should keep in mind, that other "systems" with a different choice are possible. If we can not exclude their existence, then they should exist as well. With respect to the form of the SUM, we suggest that different "particle" types (different types of fermions for instance) have a different SUM). But in all cases the SUM $\gamma_0$ must be skew-symmetric and have the following properties: + +$$ \begin{aligned} \gamma_0^T &= -\gamma_0 \\ \gamma_0^2 &= -\mathbf{1} \end{aligned} \qquad (11) $$ + +which also implies that $\gamma_0$ is orthogonal and has unit determinant. Note also that all eigenvalues of $\gamma_0$ are purely imaginary. However, once we have chosen a specific form of $\gamma_0$, we have specified a set of canonical pairs $(q_i, p_i)$ within the state vector. This choice fixes the set of possible canonical (structure preserving) transformations. + +Now we write the Hamiltonian $\mathcal{H}(\psi)$ as a Taylor series, we remove the rule-violating constant term and cut it after the second term. We do not claim that higher terms may not appear, but we delay the discussion of higher orders to a later stage. All this is well-known in the theory of small oscillations. There is only one difference to the conventional treatment: We have no direct macroscopic interpretation for $\psi$ and following our first rule we have to write the second-order Hamiltonian $\mathcal{H}(\psi)$ in the most general form: + +$$ \mathcal{H}(\psi) = \frac{1}{2} \psi^T A \psi \qquad (12) $$ + +where $\mathcal{A}$ is only restricted to be *symmetric* as all non-symmetric terms *do not contribute* to $\mathcal{H}$. Since it is not unlikely to find more than a single constant of motion in systems with multiple DOFs, we distinguish systems with singular matrix $\mathcal{A}$ from those with a positive or negative definite matrix $\mathcal{A}$. Positive definite matrices are favoured in the sense that they allow to identify $\mathcal{H}$ with the amount of a substance or an amount of energy (It is immanent to the concept of substance that it is understood as something positive semidefinite). + +Before we try to interpret the elements in $\mathcal{A}$, we will explore some general algebraic properties of the Hamiltonian formalism. If we plug Equations (12) into (3), then the equations of motion can be written in the general form: + +$$ \dot{\psi} = \gamma_0 A \psi = F \psi \qquad (13) $$ + +The matrix $F = \gamma_0 A$ is the product of the symmetric (positive semi-definite) matrix $A$ and the skew-symmetric matrix $\gamma_0$. As known from linear algebra, the trace of such products is zero: + +$$ \mathrm{Tr}(F) = 0 \qquad (14) $$ + +Pure harmonic oscillation of $\psi$ is described by matrices $F$ with purely imaginary eigenvalues and those are the only stable solutions [12]. Note that Equation (13) may represent a tremendous amount of different types of systems-all linearly coupled systems in any dimension, chains or $d$-dimensional +---PAGE_BREAK--- + +lattices of linear coupled oscillators and wave propagation (However the linear approximation does +not allow for the description of the transport of heat). + +One quickly derives from the properties of $\gamma_0$ and $\mathcal{A}$ that + +$$ +\mathbf{F}^T = \mathcal{A}^T \gamma_0^T = -\mathcal{A} \gamma_0 = \gamma_0^2 \mathcal{A} \gamma_0 = \gamma_0 \mathbf{F} \gamma_0 \quad (15) +$$ + +Since any square matrix can be written as the sum of a symmetric and a skew-symmetric matrix, it is +nearby to also consider the properties of products of γ₀ with a skew-symmetric real square matrices B. +If C = γ₀ B, then + +$$ +\mathbf{C}^T = \mathcal{B}^T \gamma_0^T = \mathcal{B} \gamma_0 = -\gamma_0^2 \mathcal{B} \gamma_0 = -\gamma_0 \mathbf{C} \gamma_0 \quad (16) +$$ + +Symmetric $2n \times 2n$-matrices contain $2n(2n+1)/2$ different matrix elements and skew-symmetric +ones $2n(2n-1)/2$ elements, so that there are $v_s$ linear independent matrix elements in $\mathcal{A}$ + +$$ +v_s = n(2n + 1) \tag{17} +$$ + +and $v_c$ matrix elements in $\mathcal{B}$ with + +$$ +v_c = n(2n - 1) \qquad (18) +$$ + +In the theory of linear Hamiltonian dynamics, matrices of the form of F are known as “Hamiltonian” or +“infinitesimal symplectic” and those of the form of C as “skew-Hamiltonian” matrices. This convention +is a bit odd as F does not appear in the Hamiltonian and it is in general not symplectic. Furthermore +the term “Hamiltonian matrix” has a different meaning in quantum mechanics - in close analogy to A. +But it is known that this type of matrix is closely connected to symplectic matrices as every symplectic +matrix is a matrix exponential of a matrix F [12]. We consider the matrices as defined by Equations (15) +and (16) as too important and fundamental to have no meaningful and unique names: Therefore we +speak of a **symplex** (plural *symplices*), if a matrix holds Equation (15) and of a **cosymplex** if it holds +Equation (16). + +# Symplectic Motion and Second Moments + +So what is a symplectic matrix anyway? The concept of symplectic transformations is a specific formulation of the theory of canonical transformations. Consider we define a new state vector (or new coordinates) $\phi(\psi)$-with the additional requirement, that the transformation is reversible. Then the Jacobian matrix of the transformation is given by + +$$ +J_{ij} = \left( \frac{\partial \phi_i}{\partial \psi_j} \right) \tag{19} +$$ + +and the transformation is said to be symplectic, if the Jacobian matrix holds [12] + +$$ +\mathbf{J}\gamma_0\mathbf{J}^T = \gamma_0 +\quad (20) +$$ + +Let us see what this implies in the linear case: + +$$ +\begin{align*} +\mathbf{J} \psi &= \mathbf{J} \mathbf{F} \mathbf{J}^{-1} \mathbf{J} \psi \\ +\tilde{\psi} &= \mathbf{J} \psi \\ +\dot{\tilde{\psi}} &= \mathbf{J} \mathbf{F} \mathbf{J}^{-1} \tilde{\psi} \\ +\dot{\tilde{\psi}} &= \tilde{\mathbf{F}} \tilde{\psi} +\end{align*} +\tag{21} +$$ +---PAGE_BREAK--- + +and-by the use of Equation (20) one finds that $\tilde{\mathbf{F}}$ is still a symplex: + +$$ +\begin{align*} +\tilde{\mathbf{F}}^T &= (\mathbf{J}^{-1})^T \mathbf{F}^T \mathbf{J}^T \\ +\tilde{\mathbf{F}}^T &= (\mathbf{J}^{-1})^T \gamma_0 \mathbf{F} \gamma_0 \mathbf{J}^T \\ +\tilde{\mathbf{F}}^T &= -\gamma_0^2 (\mathbf{J}^{-1})^T \gamma_0 \mathbf{F} \mathbf{J}^{-1} \gamma_0 \tag{22} \\ +\tilde{\mathbf{F}}^T &= -\gamma_0 \mathbf{J} \gamma_0^2 \mathbf{F} \mathbf{J}^{-1} \gamma_0 \\ +\tilde{\mathbf{F}}^T &= \gamma_0 \mathbf{J} \mathbf{F} \mathbf{J}^{-1} \gamma_0 \\ +\tilde{\mathbf{F}}^T &= \gamma_0 \tilde{\mathbf{F}} \gamma_0 +\end{align*} +$$ + +Hence a symplectic transformation is first of all a similarity transformation, but secondly, it preserves the structure of all involved equations. Therefore the transformation is said to be *canonical* or *structure preserving*. The distinction between canonical and non-canonical transformations can therefore be traced back to the skew-symmetry of $\gamma_0$ and the symmetry of $\mathcal{A}$- both of them consequences of the rules of our physics modeling game. + +Recall that we argued that the matrix $\mathcal{A}$ should be symmetric *because* skew-symmetric terms do not contribute to the Hamiltonian. Let us have a closer look what this means. Consider the matrix of second moments $\Sigma$ that can be build from the variables $\psi$: + +$$ \Sigma = \langle \psi \psi^T \rangle \qquad (23) $$ + +in which the angles indicate some (yet unspecified) sort of average. The equation of motion of this +matrix is given by + +$$ +\begin{align} +\dot{\Sigma} &= \langle \dot{\psi} \psi^T \rangle + \langle \psi \dot{\psi}^T \rangle \\ +\dot{\Sigma} &= \langle \mathbf{F} \psi \psi^T \rangle + \langle \psi \psi^T \mathbf{F}^T \rangle \tag{24} +\end{align} +$$ + +Now, as long as $\mathbf{F}$ does not depend on $\psi$, we obtain + +$$ +\begin{align} +\dot{\Sigma} &= F\Sigma + \Sigma F^T \\ +\dot{\Sigma} &= F\Sigma + \Sigma \gamma_0 F \gamma_0 \\ +(\dot{\Sigma}\gamma_0) &= F(\Sigma\gamma_0) - (\Sigma\gamma_0)F \tag{25} \\ +\dot{\mathbf{S}} &= F\mathbf{S} - \mathbf{S}F +\end{align} +$$ + +where we defined the new matrix **S** ≡ Σ γ₀. For completeness we introduce the “adjunct” spinor +$\bar{\psi} = \psi^\dagger \gamma_0$ so that we may write + +$$ +\mathbf{S} = \langle \psi \bar{\psi} \rangle +\quad (26) +$$ + +Note that **S** is also a symplex. The matrix **S** (i.e., all second moments) is constant, iff **S** and **F** commute. +Now we define an *observable* to be an operator **O** with a (potentially) non-vanishing expectation +value, defined by: + +$$ +\langle \mathbf{O} \rangle = \langle \bar{\psi} \mathbf{O} \psi \rangle = \langle \psi^T \gamma_0 \mathbf{O} \psi \rangle +\quad (27) +$$ + +Thus, if the product $\gamma_0 \mathbf{O}$ is not skew-symmetric, i.e., contains a product of $\gamma_0$ with a symmetric matrix $\mathcal{B}$, then the expectation value is potentially non-zero: + +$$ +\langle O \rangle = \langle \psi^T \gamma_0 (\gamma_0 B) \psi \rangle = -\langle \psi^T B \psi \rangle +\quad (28) +$$ + +This means that only the symplex-part of an operator is "observable", while cosymplices yield a vanishing expectation value. Hence Equation (25) delivers the blueprint for the general definition of observables. Furthermore we find in the last line the constituting equation for Lax pairs [13]. Peter Lax has shown that for such pairs of operators **S** and **F** that obey Equation (25) there are the following constants of motion + +$$ +\mathrm{Tr}(\mathbf{S}^k) = \mathrm{const} \tag{29} +$$ +---PAGE_BREAK--- + +for arbitrary integer $k > 0$. Since $\mathbf{S}$ is a symplex and therefore by definition the product of a symmetric matrix and the skew-symmetric $\gamma_0$, Equation (29) is always zero and hence trivially true for $k = 1$. The same is true for any odd power of $\mathbf{S}$, as it can be easily shown that any odd power of a symplex is again a symplex (see Equation (35)), so that the only non-trivial general constants of motion correspond to even powers of $\mathbf{S}$, which implies that all observables are functions of even powers of the fundamental variables. + +To see the validity for $k > 1$ we have to consider the general algebraic properties of the trace operator. Let $\lambda$ be an arbitrary real constant and $\tau$ be a real parameter, then + +$$ +\begin{aligned} +\mathrm{Tr}(\mathbf{A}) &= \mathrm{Tr}(\mathbf{A}^T) \\ +\mathrm{Tr}(\lambda \mathbf{A}) &= \lambda \mathrm{Tr}(\mathbf{A}) \\ +\frac{d}{d\tau} \mathrm{Tr}(\mathbf{A}(\tau)) &= \mathrm{Tr}(\frac{d\mathbf{A}}{d\tau}) && (30) \\ +\mathrm{Tr}(\mathbf{A} + \mathbf{B}) &= \mathrm{Tr}(\mathbf{A}) + \mathrm{Tr}(\mathbf{B}) \\ +\mathrm{Tr}(\mathbf{A}\mathbf{B}) &= \mathrm{Tr}(\mathbf{B}\mathbf{A}) +\end{aligned} + $$ + +It follows that + +$$ +\begin{aligned} +0 &= \mathrm{Tr}(\mathbf{A}\mathbf{B} - \mathbf{B}\mathbf{A}) \\ +0 &= \mathrm{Tr}(\mathbf{A}^n \mathbf{B} - \mathbf{A}^{n-1} \mathbf{B}\mathbf{A}) \\ +0 &= \mathrm{Tr}[\mathbf{A}^{n-1} (\mathbf{A}\mathbf{B} - \mathbf{B}\mathbf{A})] +\end{aligned} + \quad (31) $$ + +From the last line of Equation (31) it follows with $\frac{d\mathbf{A}}{d\tau} = \lambda (\mathbf{A}\mathbf{B} - \mathbf{B}\mathbf{A})$ + +$$ \frac{d}{d\tau} \mathrm{Tr}(\mathbf{A}^n) = 0 \qquad (32) $$ + +Remark: This conclusion is not limited to simplices. + +However for single spinors $\psi$ and the corresponding second moments $\mathbf{S} = \sum_i \gamma_i = \psi\psi^\dagger\gamma_0$ we find: + +$$ +\begin{aligned} +\mathrm{Tr}(\mathbf{S}^k) &= \mathrm{Tr}[\psi \psi^\dagger \gamma_0 \cdots \psi \psi^\dagger \gamma_0] \\ +&= \mathrm{Tr}[(\psi^\dagger \gamma_0 \cdots \psi \psi^\dagger \gamma_0)] \\ +&= \mathrm{Tr}[(\psi^\dagger \gamma_0 \cdots \psi \psi^\dagger \gamma_0) \psi] \\ +&= \mathrm{Tr}[(\psi^\dagger \gamma_0 \psi) \cdots (\psi^\dagger \gamma_0 \psi)] = 0 +\end{aligned} + \quad (33) $$ + +since each single factor $(\psi^\dagger \gamma_0 \psi)$ vanishes due to the skew-symmetry of $\gamma_0$. Therefore the constants of motion as derived from Equation (29) are non-zero only for even $k$ and *after averaging over some kind of distribution* such that $\mathbf{S} = \langle \psi \psi^\dagger \gamma_0 \rangle$ has non-zero eigenvalues as in Equation (34) below. + +The symmetric matrix $2n \times 2n$-matrix $\Sigma$ (and also $\mathcal{A}$) is positive definite, if it can be written as a product $\Sigma = \Psi\Psi^\dagger$, where $\Psi$ is a non-singular matrix of size $2n \times m$ with $m \ge 2n$. + +For $n = m/2 = 1$, the form of $\Psi$ may be chosen as + +$$ +\begin{aligned} +\Psi &= \frac{1}{\sqrt{q^2+p^2}} \begin{pmatrix} q & -p \\ p & q \end{pmatrix} = \frac{1}{\sqrt{q^2+p^2}} (\mathbf{1}\psi, \eta_0 \psi) \\ +\Rightarrow \quad & \Sigma = \Psi\Psi^\dagger = \Psi^\dagger\Psi = \mathbf{1} \\ +\mathbf{S} &= \gamma_0 +\end{aligned} + \quad (34) $$ + +so that for $k=2$ the average of two "orthogonal" column-vectors $\psi$ and $\eta_0\psi$ gives a non-zero constant of motion via Lax pairs as $\gamma_0^2 = -1$. + +These findings have some consequences for the modeling game. The first is that we have found constants of motion-though some of them are physically meaningful only for a non-vanishing volume in phase space, i.e., by the combination of several spinors $\psi$. Secondly, a stable state $\dot{\mathbf{S}} = 0$ implies that the matrix operators forming the Lax pair have the same eigenvectors: a density distribution in phase space (as described by the matrix of second moments) is stable if it is adapted or *matched* to the +---PAGE_BREAK--- + +symplex F. The phase space distribution as represented by **S** and the driving terms (the components of F) must fit to each other in order to obtain a stable “eigenstate”. But we also found a clear reason, why generators (of symplectic transformations) are always observables and vice versa: Both, the generators as well as the observables are symplexes of the same type. There is a one-to-one correspondence between them, not only as *generators of infinitesimal transformations*, but also algebraically. + +Furthermore, we may conclude that (anti-) commutators are an essential part of “classical” +Hamiltonian mechanics and secondly that the matrix **S** has the desired properties of observables: +Though **S** is based on continuously varying fundamental variables, it is constant, if it commutes with +F, and it varies otherwise (In accelerator physics, Equation (25) describes the envelope of a beam in +linear optics. The matrix of second moments Σ is a covariance matrix-and therefore our modeling +game is connected to probability theory exactly when observables are introduced). + +Hence it appears sensible to take a closer look on the (anti-) commutation relations of (co-) +symplices and though the definitions of (co-) symplices are quite plain, the (anti-) commutator algebra +that emerges from them has a surprisingly rich structure. If we denote symplices by Sk and cosymplices +by Ck, then the following rules can quickly be derived: + +$$ +\left. +\begin{array}{l} +S_1 S_2 - S_2 S_1 \\ +C_1 C_2 - C_2 C_1 \\ +C S + S C \\ +S^{2n+1} +\end{array} +\right\} \Rightarrow \text{symplex} +$$ + +$$ +\left. +\begin{array}{l} +S_1 S_2 + S_2 S_1 \\ +C_1 C_2 + C_2 C_1 \\ +C S - S C \\ +S^{2n} \\ +C^n +\end{array} +\right\} \Rightarrow \text{cosymplex} \qquad (35) +$$ + +This *Hamiltonian* algebra of (anti-)commutators is of fundamental importance insofar as we derived it in a few steps from first principles (i.e., the rules of the game) and it defines the structure of Hamiltonian dynamics in phase space. The distinction between symplices and cosymplices is also the distinction between observables and non-observables. It is the basis of essential parts of the following considerations. + +4. Geometry from Hamiltonian Motion + +In the following we will demonstrate the geometrical content of the algebra of (co-)symplices +(Equation (35)) which emerges for specific numbers of DOF $n$. As shown above, pairs of canonical +variables (DOFs) are a direct consequence of the abstract rules of our game. Though single DOFs +are poor "objects", it is remarkable to find physical structures emerging from our abstract rules at all. +This suggests that there might be more structure to discover when $n$ DOF are combined, for instance +geometrical structures. The following considerations obey the rules of our game, since they are +based purely on symmetry considerations like those that guided us towards Hamiltonian dynamics. +The objects of interest in our algebraic interpretation of Hamiltonian dynamics are matrices. The first +matrix (besides $\mathcal{A}$) with a specific form that we found, is $\gamma_0$. It is a symplex: + +$$ +\gamma_0^T = -\gamma_0 = \gamma_0 \gamma_0 \gamma_0 \tag{36} +$$ + +According to Equation (17) there are $v_s = n(2n+1)$ ($i.e., v_s \ge 3$) symplices. Hence it is nearby to ask if other symplices with similar properties like $\gamma_0$ exist-and if so, what the relations between these matrices are. According to Equation (35) the commutator of two symplices is again a symplex, while the anti-commutator is a cosymplex. As we are primarily interested in *observables* and components of the +---PAGE_BREAK--- + +Hamiltonian (i.e., symplices), respectively, we look for further symplices that anti-commute with $\gamma_0$ and with each other. In this case, the product of two such matrices is also a symplex, i.e., another potential contribution to the general Hamiltonian matrix F. + +Assumed we had a set of $N$ mutually anti-commuting orthogonal simplices $\gamma_0$ and $\gamma_k$ with $k \in [1...N-1]$, then a Hamiltonian matrix F might look like + +$$F = \sum_{k=0}^{N-1} f_k \gamma_k + \dots \quad (37)$$ + +The $\gamma_k$ are simplices and anti-commute with $\gamma_0$: + +$$\gamma_0 \gamma_k + \gamma_k \gamma_0 = 0 \quad (38)$$ + +Multiplication from the left with $\gamma_0$ gives: + +$$-\gamma_k + \gamma_0 \gamma_k \gamma_0 = -\gamma_k + \gamma_k^T = 0 \quad (39)$$ + +so that all other possible simplices $\gamma_k$, which anticommute with $\gamma_0$, are symmetric and square to 1. This is an important finding for what follows, as it can (within our game) be interpreted as a classical proof of the uniqueness of (observable) time-dimension: Time is one-dimensional as there is no other skew-symmetric symplex that anti-commutes with $\gamma_0$. We can choose different forms for $\gamma_0$, but the emerging algebra allows for no second “direction of time”. + +The second order derivative of $\psi$ is (for constant $F$) given by $\ddot{\psi} = F^2 \psi$ which yields: + +$$F^2 = \sum_{i=0}^{N-1} f_i^2 \gamma_i^2 + \sum_{i \neq j} f_i f_j (\gamma_i \gamma_j + \gamma_j \gamma_i) \quad (40)$$ + +Since the anti-commutator on the right vanishes by definition, we are left with: + +$$F^2 = \left( \sum_{k=1}^{N-1} f_k^2 - f_0^2 \right) 1 \quad (41)$$ + +Thus-we find a set of (coupled) oscillators, if + +$$f_0^2 > \sum_{k=1}^{N-1} f_k^2 \quad (42)$$ + +such that + +$$\ddot{\psi} = -\omega^2 \psi \quad (43)$$ + +Given such matrix systems exist-then they generate a Minkowski type "metric" as in Equation (41) (Indeed it appears that Dirac derived his system of matrices from this requirement [14]). The appearance of this metric shows how a Minkowski type geometry emerges from the driving terms of oscillatory motion. This is indeed possible- at least for simplices of certain dimensions as we will show below. The first thing needed is some kind of measure to define the length of a "vector". Since the length is a measure that is invariant under certain transformations, specifically under rotations, we prefer to use a quantity with certain invariance properties to define a length. The only one we have at hand is given by Equation (29). Accordingly we define the (squared) length of a matrix representing a "vector" by + +$$\|\mathbf{A}\|^2 = \frac{1}{2n} \mathrm{Tr}(\mathbf{A}^2) \quad (44)$$ +---PAGE_BREAK--- + +The division by 2 $n$ is required to make the unit matrix have unit norm. Besides the norm we need a scalar product, i.e., a definition of orthogonality. Consider the Pythagorean theorem which says that two vectors $\vec{a}$ and $\vec{b}$ are orthogonal iff + +$$ (\vec{a} + \vec{b})^2 = \vec{a}^2 + \vec{b}^2 \quad (45) $$ + +The general expression is + +$$ (\vec{a} + \vec{b})^2 = \vec{a}^2 + \vec{b}^2 + 2 \vec{a} \cdot \vec{b} \quad (46) $$ + +The equations are equal, iff $\vec{a} \cdot \vec{b} = 0$. Hence the Pythagorean theorem yields a reasonable definition of orthogonality. However, we had no method yet to define vectors within our game. Using matrices **A** and **B** we may then write + +$$ +\begin{aligned} +\|\mathbf{A} + \mathbf{B}\|^2 &= \frac{1}{2n} \mathrm{Tr}[(\mathbf{A} + \mathbf{B})^2] \\ +&= \|\mathbf{A}\|^2 + \|\mathbf{B}\|^2 + \frac{1}{2n} \mathrm{Tr}(\mathbf{A}\mathbf{B} + \mathbf{B}\mathbf{A}) +\end{aligned} +\quad (47) $$ + +If we compare this to Equations (45) and (46), respectively, then the obvious definition of the inner product is given by: + +$$ \mathbf{A} \cdot \mathbf{B} = \frac{\mathbf{A}\mathbf{B} + \mathbf{B}\mathbf{A}}{2} \quad (48) $$ + +Since the anticommutator does in general not yield a scalar, we have to distinguish between inner product and scalar product: + +$$ (\mathbf{A} \cdot \mathbf{B})_S = \frac{1}{4n} \mathrm{Tr}(\mathbf{A}\mathbf{B} + \mathbf{B}\mathbf{A}) \quad (49) $$ + +where we indicate the scalar part by the subscript “S”. Accordingly we define the exterior product by the commutator + +$$ \mathbf{A} \wedge \mathbf{B} = \frac{\mathbf{A}\mathbf{B} - \mathbf{B}\mathbf{A}}{2} \quad (50) $$ + +Now that we defined the products, we should come back to the unit vectors. The only “unit vector” that we explicitely defined so far is the symplectic unit matrix $\gamma_0$. If it represents anything at all then it must be “the direction” of change, the direction of evolution in time as it was derived in this context and is the only “dimension” found so far. As we have already shown, all other unit vectors $\gamma_k$ must be symmetric, if they are simplices. And vice versa: If $\gamma_k$ is symmetric and anti-commutes with $\gamma_0$, then it is a symplex. As only simplices represent observables and are generators of symplectic transformations, we can have only a single “time” direction $\gamma_0$ and a yet unknown number of *symmetric* unit vectors (Thus we found a simple answer to the question, why only a single time direction is possible, a question also debated in Reference [15]). However, for $n > 1$, there might be different equivalent choices of $\gamma_0$. Whatever the specific form of $\gamma_0$ is, we will show that in combination with some general requirements like completeness, normalizability and observability it determines the structure of the complete algebra. Though we don't yet know how many symmetric and pairwise anti-commuting unit vectors $\gamma_k$ exist- we have to interpret them as unit vectors in “spatial directions” (The meaning of what a spatial direction is, especially in contrast to the direction of time $\gamma_0$, has to be derived from the form of the emerging equations, of course. As meaning follows form, we do not define space-time, but we identify structures that fit to the known concept of space-time). Of course unit vectors must have unit length, so that we have to demand that + +$$ \|\gamma_k\|^2 = \frac{1}{2n} \mathrm{Tr}(\gamma_k^2) = \pm 1 \quad (51) $$ + +Note that (since our norm is not positive definite), we explicitely allow for unit vectors with negative “length” as we find it for $\gamma_0$. Note furthermore that all skew-symmetric unit vectors square to $-1$ while the symmetric ones square to **1** [16]. + +Indeed systems of $N = p+q$ anti-commuting real matrices are known as real representations of Clifford algebras $Cl_{p,q}$. The index $p$ is the number of unit elements (“vectors”) that square to +1 +---PAGE_BREAK--- + +and $q$ is the number of unit vectors that square to $-1$. Clifford algebras are not necessarily connected to Hamiltonian motion, rather they can be regarded as purely mathematical "objects". They can be defined without reference to matrices whatsoever. Hence in mathematics, sets of matrices are merely "representations" of Clifford algebras. But our game is about physics and due to the proven one-dimensionality of time we concentrate on Clifford algebras $Cl_{N-1,1}$ which link CHOs in the described way with the generators of a Minkowski type metric. Further below it will turn out that the representation by matrices is-within the game-indeed helpful, since it leads to an overlap of certain symmetry structures. The unit elements (or unit "vectors") of a Clifford algebra, $\mathbf{e}_k$, are called the *generators* of the Clifford algebra. They pairwise anticommute and they square to $\pm 1$ (The role as *generator* of the Clifford algebra should not be confused with the role as generators of symplectic transformations (i.e., simplices). Though we are especially interested in Clifford algebras in which all generators are simplices, not all simplices are generators of the Clifford algebra. Bi-vectors for instance are simplices, but not generators of the Clifford algebra). Since the inverse of the unit elements $\mathbf{e}_k$ of a Clifford algebra must be unique, the products of different unit vectors form new elements and all possible products including the unit matrix form a group. There are $\binom{N}{k}$ possible combinations (products without repetition) of $k$ elements from a set of $N$ generators. We therefore find $\binom{N}{2}$ bi-vectors, which are products of two generators, $\binom{N}{3}$ trivectors) and so on. The product of all $N$ basic matrices is called pseudoscalar. The total number of all k-vectors then is (We identify $k=0$ with the unit matrix 1.): + +$$ \sum_{k=0}^{N} \binom{N}{k} = 2^N \quad (52) $$ + +If we desire to construct a complete system, then the number of variables of the Clifford algebra has to match the number of variables of the used matrix system: + +$$ 2^N = (2n)^2 \quad (53) $$ + +Note that the root of this equation gives an even integer $2^{N/2} = 2n$ so that $N$ must be even. Hence all Hamiltonian Clifford algebras have an even dimension. Of course not all elements of the Clifford algebra may be simplices. The unit matrix (for instance) is a cosymplex. Consider the Clifford algebra $Cl_{1,1}$ with $N=2$, which has two generators, say $\gamma_0$ with $\gamma_0^2 = -1$ and $\gamma_1$ with $\gamma_1^2 = 1$. Since these two anticommute (by definition of the Clifford algebra), so that we find (besides the unit matrix) a fourth matrix formed by the product $\gamma_0\gamma_1$: + +$$ \begin{aligned} \gamma_0 \gamma_1 &= -\gamma_1 \gamma_0 \\ (\gamma_0 \gamma_1)^2 &= \gamma_0 \gamma_1 \gamma_0 \gamma_1 \\ &= -\gamma_0 \gamma_0 \gamma_1 \gamma_1 = \mathbf{1} \end{aligned} \quad (54) $$ + +The completeness of the Clifford algebras as we use them here implies that any $2n \times 2n$-matrix $\mathbf{M}$ with $(2n)^2 = 2^N$ can be written as a linear combination of all elements of the Clifford algebra: + +$$ \mathbf{M} = \sum_{k=0}^{4n^2-1} m_k \gamma_k \quad (55) $$ + +The coefficients can be computed from the scalar product of the unit vectors with the matrix **M**: + +$$ m_k = (\gamma_k \cdot \mathbf{M})_S = \frac{s_k}{4n} \operatorname{Tr}(\gamma_k \mathbf{M} + \mathbf{M} \gamma_k) \quad (56) $$ + +Recall that skew-symmetric $\gamma_k$ have a negative length and therefore we included a factor $s_k$ which represents the “signature” of $\gamma_k$, in order to get the correct sign of the coefficients $m_k$. +---PAGE_BREAK--- + +Can we derive more properties of the constructable space-times? One restriction results from representation theory: A theorem from the theory of Clifford algebras states that $Cl_{p,q}$ has a representation by real matrices if (and only if) [17] + +$$p-q=0 \text{ or } 2 \operatorname{mod} 8 \qquad (57)$$ + +The additional requirement that all generators must be simplices so that $p = N-1$ and $q = 1$ then restricts $N$ to + +$$N-2=0 \text{ or } 2 \operatorname{mod} 8 \qquad (58)$$ + +Hence the only matrix systems that have the required symmetry properties within our modeling game are those that represent Clifford algebras with the dimensions $1+1, 3+1, 9+1, 11+1, 17+1, 19+1, 25+1, 27+1$ and so on. These correspond to matrix representations of size $2 \times 2, 4 \times 4, 32 \times 32, 64 \times 64, 512 \times 512$ and so on. The first of them is called *Pauli algebra*, the second one is the *Dirac algebra*. Do these two have special properties that the higher-dimensional algebras do not have? Yes, indeed. + +Firstly, since dynamics is based on canonical pairs, the real Pauli algebra describes the motion of a single DOF and the Dirac algebra describes the simplest system with interaction between two DOF. This suggests the interpretation that within our game, objects (Dirac-particles) are not located “within space-time”, since we did not define space at all up to this point, but that space-time can be modeled as an emergent phenomenon. Space-time is in between particles. + +Secondly, if we equate the number of fundamental variables ($2n$) of the oscillator phase space with the dimension of the Clifford space $N$, then Equation (53) leads to + +$$2^N = N^2 \qquad (59)$$ + +which allows for $N=2$ and $N=4$ only. But why should it be meaningful to assume $N=2n$? The reason is quite simple: If $2n > N$ as for all higher-dimensional state vectors, there are less generators of the algebra than independent variables. This discrepancy increases with $n$. Hence the described objects can not be pure vectors anymore, but must contain tensor-type components ($k$-vectors) (For a deeper discussion of the dimensionality of space-time, see Reference [16] and references therein). + +But before we describe a formal way to interpret Equation (59), let us first investigate the physical and geometrical implications of the game as described so far. + +## Matrix Exponentials + +We said that the unit vectors $\gamma_0$ and $\gamma_k$ are simplices and therefore generators of symplectic transformations. All symplectic matrices are matrix exponentials of simplices [12]. The computation of matrix exponentials is in the general case non-trivial. However, in the special case of matrices that square to $\pm 1$ (e.g., along the “axis” $\gamma_k$ of the coordinate system), the exponentials are readily evaluated: + +$$\exp(\gamma_a \tau) = \sum_{k=0}^{\infty} \frac{(\gamma_a \tau)^k}{k!} \qquad (60)$$ + +$$\exp(\gamma_a \tau) = \sum_{k=0}^{\infty} s^k \frac{\tau^{2k}}{(2k)!} + \gamma_a \sum_{k=0}^{\infty} s^k \frac{\tau^{2k+1}}{(2k+1)!}$$ + +where $s = \pm 1$ is the sign of the matrix square of $\gamma_a$. For $s = -1$ ($\gamma_a^2 = -1$), it follows that + +$$\mathbf{R}_a(\tau) = \exp(\gamma_a \tau) = \cos(\tau) + \gamma_a \sin(\tau) \qquad (61)$$ + +and for $s = 1$ ($\gamma_a^2 = 1$): + +$$\mathbf{B}_a(\tau) = \exp(\gamma_a \tau) = \cosh(\tau) + \gamma_a \sinh(\tau) \qquad (62)$$ + +We can indentify skew-symmetric generators with rotations and (as we will show in more detail below) symmetric generators with boosts. +---PAGE_BREAK--- + +The (hyperbolic) sine/cosine structure of symplectic matrices are not limited to the generators but are a general property of the matrix exponentials of the symplex F (These properties are the main motivation to choose the nomenclature of "symplex" and "cosymplex".): + +$$ +\mathbf{M}(t) = \exp(\mathbf{F} t) = \mathbf{C} + \mathbf{S} \tag{63} +$$ + +where the (co-) symplex S ( C ) is given by: + +$$ +\begin{align*} +\mathbf{S} &= \sinh(\mathbf{F} t) \\ +\mathbf{C} &= \cosh(\mathbf{F} t) +\end{align*} +\tag{64} +$$ + +since (the linear combination of) all odd powers of a symplex is again a symplex and the sum of all even powers is a cosymplex. The inverse transfer matrix $\mathbf{M}^{-1}(t)$ is given by: + +$$ +\mathbf{M}^{-1}(t) = \mathbf{M}(-t) = \mathbf{C} - \mathbf{S} \quad (65) +$$ + +The physical meaning of the matrix exponential results from Equation (13), which states that (for constant simplices F) the solutions are given by the matrix exponential of F: + +$$ +\psi(t) = \mathbf{M}(t) \psi(0) \tag{66} +$$ + +A symplectic transformation can be regarded as the result of a possible evolution in time. There is no proof that non-symplectic processes are forbidden by nature, but that only symplectic transformations are *structure preserving*. Non-symplectic transformations are then *structure defining*. Both play a fundamental role in the physics of our model reality, because fundamental particles are according to our model-represented by dynamical structures. Therefore symplectic transformations describe those processes and interactions, in which structure is preserved, i.e., in which the type of the particle is not changed. The fundamental variables are just “carriers” of the dynamical structures. Non-symplectic transformations can be used to transform the structure. This could also be described by a rotation of the direction of time. Another interpretation is that of a gauge-transformation [18]. + +**5. The Significance of (De-)Coupling** + +In physics it is a standard technique to reduce complexity of problems by a suitable change of variables. In case of linear systems, the change of variables is a linear canonical transformation. The goal of such transformations is usually to substitute the solution of a complicated problem by the solution of multiple simpler problems. This technique is known under various names, one of these names is decoupling, but it is also known as principal component analysis or (as we will later show) transformation into the “rest frame”. In other branches of science one might refer to it as pattern recognition. + +In the following we investigate, how to transform a general oscillatory $2n \times 2n$-dimensional symplex to normal form. Certainly it would be preferable to find a "physical method", i.e., a method that matches to the concepts that we introcuded so far and that has inherently physical significance. Or at least significance and explanatory power with respect to our modeling game. Let us start from the simplest systems, i.e., with the Pauli and Dirac algebras which correspond to matrices of size 2 × 2 and 4 × 4, respectively. +---PAGE_BREAK--- + +5.1. *The Pauli Algebra* + +The fundamental significance of the Pauli algebra is based on the even dimensionality of (classical) +phase space. The algebra of 2 × 2 matrices describes the motion of a single (isolated) DOF. Besides η₀, +the real Pauli algebra includes the following three matrices: + +$$ +\begin{align*} +\eta_1 &= \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix} \\ +\eta_2 &= \eta_0 \eta_1 = \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \\ +\eta_3 &= \mathbf{1} = \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} +\end{align*} +$$ + +(67) + +All except the unit matrix $\eta_3$ are simplices. If $\eta_0$ and $\eta_1$ are chosen to represent the generators of the corresponding Clifford algebra $Cl_{1,1}$, then $\eta_2$ is the only possible bi-vector. A general symplex has the form: + +$$ +\begin{align} +\mathbf{F} &= a\eta_0 + b\eta_1 + c\eta_2 \nonumber \\ +&= \begin{pmatrix} c & a+b \\ -a+b & -c \end{pmatrix} \tag{68} +\end{align} +$$ + +The characteristic equation is given by $\det(\mathbf{F} - \lambda \mathbf{1}) = 0$ + +$$ +\begin{align} +0 &= (c - \lambda)(-c - \lambda) - (a + b)(-a + b) \notag \\ +\lambda &= \pm \sqrt{c^2 + b^2 - a^2} \tag{69} +\end{align} +$$ + +The eigenvalues $\lambda_{\pm}$ are both either real for $a^2 < c^2 + b^2$ or both imaginary $a^2 > c^2 + b^2$ (or both zero). +Systems in stable oscillation have purely imaginary eigenvalues. This case is most interesting for our +modeling game. + +Decoupling is usually understood in the more general sense to treat the interplay of several +(at least two) DOF, but here we ask, whether all possible oscillating systems of $n = 1$ are isomorphic to +normal form oscillators. Since there are 3 parameters in F and only one COM, namely the frequency +$\omega$, we need at least two parameters in the transformation matrix. Let us see, if we can choose these +two transformations along the axis of the Clifford algebra. In this case we apply sequentially two +symplectic transformations along the axis $\eta_0$ and $\eta_2$. Applying the symplectic transformation matrix +$\exp(\eta_0 \tau/2)$ we obtain: + +$$ +\begin{align} +\mathbf{F}_1 &= \exp(\eta_0 \tau / 2) \mathbf{F} \exp(-\eta_0 \tau / 2) \notag \\ +&= a' \eta_0 + b' \eta_1 + c' \eta_2 \tag{70} +\end{align} +$$ + +(The "half-angle" argument is for convenience). The transformed coefficients $a'$, $b'$ and $c'$ are given by + +$$ +\begin{align*} +a' &= a \\ +b' &= b \cos \tau - c \sin \tau \\ +c' &= c \cos \tau + b \sin \tau +\end{align*} +$$ + +(71) + +so that depending on the "duration of the pulse", we can chose to transform into a coordinate system +in which either $b' = 0$ or $c' = 0$. If we choose $t = \arctan(-c/b)$, then $c' = 0$, so that + +$$ +\mathbf{F}' = a\eta_0 + \sqrt{b'^2 + c'^2}\eta_1 = a'\eta_0 + b'\eta_1 \quad (72) +$$ + +If we chose the next generator to be $\eta_2$, then: + +$$ +\begin{align*} +a'' &= a' \cosh \tau - b' \sinh \tau \\ +b'' &= b' \cosh \tau - a' \sinh \tau +\end{align*} +$$ + +(73) +---PAGE_BREAK--- + +In this case we have to distinguish between the case, where $a' > b'$ and $a' < b'$. The former is the oscillatory system and in this case the transformation with $\tau = \operatorname{artanh}(b'/a')$ leads to the normal form of a 1-dim. oscillator: + +$$ +\begin{aligned} +a'' &= \sqrt{a^2 - b^2 - c^2} \\ +b'' &= 0 \\ +c'' &= 0 +\end{aligned} +\qquad (74) $$ + +and the matrix $F''$ has the form + +$$ F'' = \sqrt{a^2 - b^2 - c^2} \eta_0 \qquad (75) $$ + +If the eigenvalues are imaginary, then $\lambda = \pm i\omega$ and hence + +$$ F'' = \omega \eta_0 \qquad (76) $$ + +so that the solution is for constant frequency-given by the matrix exponential: + +$$ +\begin{aligned} +\psi(t) &= \exp(\omega \eta_0 t) \psi(0) \\ +&= (\mathbf{1} \cos(\omega t) + \eta_0 \sin(\omega t)) \psi(0) +\end{aligned} +\qquad (77) $$ + +This shows that in the context of stable oscillator algebras the real Pauli algebra can be reduced to the complex number system: This becomes evident, if we consider possible representations of the complex numbers. Clearly we need two basic elements- the unit matrix and $\eta_0$, i.e., a matrix that commutes with the unit matrix and squares to -1. If we write "i" instead of $\eta_0$, then it is easily verified that (See also References [17,19] and Equation (34) in combination with Reference [20].): + +$$ +\begin{aligned} +z &= x + iy = Z = \begin{pmatrix} x & y \\ -y & x \end{pmatrix} \\ +\bar{z} &= x - iy = Z^T = x\mathbf{1} + \eta_0^T y \\ +\exp(i\phi) &= \cos(\phi) + i\sin(\phi) \\ +\|z\|^2 &= ZZ^T = z\bar{z} = x^2 + y^2 +\end{aligned} +\qquad (78) $$ + +The theory of holomorphic functions is based on series expansions and can be equally well formulated with matrices. Viewed from our perspective the complex numbers are a special case of the real Pauli algebra- since we have shown above that any one-dimensional oscillator can be canonically transformed into a system of the form of Equation (76). Nevertheless we emphasize that the complex numbers interpreted this way can only represent the normal form of an oscillator. The normal form excludes a different scaling of coordinates and momenta as used in classical mechanics, i.e., it avoids intrinsically the appearance of different "spring constants" and masses (There have been several attempts to explain the appearance of the complex numbers in quantum mechanics [21–27]. A general discussion of the use of complex numbers in physics is beyond the scope of this essay, therefore we add just a remark. Gary W. Gibbons wrote that "In particular there can be no evolution if $\psi$ is real" [24]. We agree with Gibbons that the unit imaginary can be related to evolution in time as it implies oscillation, but we do not agree with his conclusion. Physics was able to describe evolution in time without imaginaries before quantum mechanics and it still is. The unconscious use of the unit imaginary did not prevent quantum mechanics from being experimentally successful. But it prevents physicists from understanding its structure). + +## 5.2. The Dirac Algebra + +In this subsection we consider the oscillator algebra for two coupled DOF, the algebra of 4 × 4 matrices. In contrast to the real Pauli algebra, where the parameters *a*, *b* and *c* did not suggest a specific physical meaning, the structure of the Dirac algebra bears geometrical significance +---PAGE_BREAK--- + +as has been pointed out by David Hestenes and others [28–30]. The (real) Dirac algebra is the +simplest real algebra that enables for a description of two DOF and the interaction between them. +Furthermore the eigenfrequencies of a Dirac symplex F may be complex, while the spectrum of the +Pauli matrices does not include complex numbers off the real and imaginary axis. The spectrum of +general 2n × 2n-symplices has a certain structure - since the coefficients of the characteristic polynomial +are real: If λ is an eigenvalue of F, then its complex conjugate $\bar{\lambda}$ as well as λ and $-\bar{\lambda}$ are also eigenvalues. +As we will show, this is the spectrum of the Dirac algebra and therefore any 2n × 2n-system can, at +least in principle, be block-diagonalized using 4 × 4-blocks. The Dirac algebra is therefore the simplest +algebra that covers the general case. + +The structure of Clifford algebras follows Pascal’s triangle. The Pauli algebra has the structure 1 − 2 − 1 (scalar-vector-bivector), the Dirac algebra has the structure 1 − 4 − 6 − 4 − 1, standing for unit element (scalar), vectors, bi-vectors, tri-vectors and pseudoscalar. The vector elements are by convention indexed with γμ with μ = 0 ... 3, i.e., the generators of the algebra (According to Pauli’s fundamental theorem of the Dirac algebra, all possible choices of the Dirac matrices are, as long as the “metric tensor” gμν remains unchanged, equivalent [31].): + +$$ +\begin{align} +\gamma_0 &= \begin{pmatrix} +0 & 1 & 0 & 0 \\ +-1 & 0 & 0 & 0 \\ +0 & 0 & 0 & 1 \\ +0 & 0 & -1 & 0 +\end{pmatrix} & +\gamma_1 &= \begin{pmatrix} +0 & -1 & 0 & 0 \\ +-1 & 0 & 0 & 0 \\ +0 & 0 & 0 & 1 \\ +0 & 0 & 1 & 0 +\end{pmatrix} \notag \\ +\gamma_2 &= \begin{pmatrix} +0 & 0 & 0 & 1 \\ +0 & 0 & 1 & 0 \\ +0 & 1 & 0 & 0 \\ +1 & 0 & 0 & 0 +\end{pmatrix} & +\gamma_3 &= \begin{pmatrix} +-1 & 0 & 0 & 0 \\ +0 & 1 & 0 & 0 \\ +0 & 0 & -1 & 0 \\ +0 & 0 & 0 & 1 +\end{pmatrix} \tag{79} +\end{align} +$$ + +We define the following numbering scheme for the remaining matrices (The specific choice of the +matrices is not unique. A table of the different systems can be found in Reference ([32]).): + +$$ +\begin{align*} +\gamma_{14} &= \gamma_0 \gamma_1 \gamma_2 \gamma_3; && \gamma_{15} &= \mathbf{1} \\ +\gamma_4 &= \gamma_0 \gamma_1; && \gamma_7 &= \gamma_{14} \gamma_0 \gamma_1 = \gamma_2 \gamma_3 \\ +\gamma_5 &= \gamma_0 \gamma_2; && \gamma_8 &= \gamma_{14} \gamma_0 \gamma_2 = \gamma_3 \gamma_1 \\ +\gamma_6 &= \gamma_0 \gamma_3; && \gamma_9 &= \gamma_{14} \gamma_0 \gamma_3 = \gamma_1 \gamma_2 \\ +\gamma_{10} &= \gamma_{14} \gamma_0 &&=&& \gamma_1 \gamma_2 \gamma_3 \\ +\gamma_{11} &= \gamma_{14} \gamma_1 &&=&& \gamma_0 \gamma_2 \gamma_3 \\ +\gamma_{12} &= \gamma_{14} \gamma_2 &&=&& \gamma_0 \gamma_3 \gamma_1 \\ +\gamma_{13} &= \gamma_{14} \gamma_3 &&=&& \gamma_0 \gamma_1 \gamma_2 +\end{align*} +\tag{80} +$$ + +According to Equation (17) we expect 10 simplices and since the 4 vectors and 6 bi-vectors are +simplices, all other elements are cosimplices. With this ordering, the general 4 × 4-symplex F can be +written as (instead of Equation (55)): + +$$ +F = \sum_{k=0}^{9} f_k \gamma_k +\quad (81) +$$ + +In Reference [32] we presented a detailed survey of the Dirac algebra with respect to symplectic Hamiltonian motion. The essence of this survey is the insight that the real Dirac algebra describes Hamiltonian motion of an ensembles of two-dimensional oscillators, but as well the motion of a “point particle” in 3-dimensional space, *i.e.*, that Equation (25) is, when expressed by the real Dirac algebra, *isomorphic to the Lorentz force equation* as we are going to show in Section 6.3. Or, in other words, the Dirac algebra allows to model a point particle and its interaction with the electromagnetic field in terms of the classical statistical ensemble of abstract oscillators. +---PAGE_BREAK--- + +## 6. Electromechanical Equivalence (EMEQ) + +The number and type of simplices within the Dirac algebra (80) suggests to use the following vector notation for the coefficients [32,33] of the observables: + +$$ +\begin{aligned} +\mathcal{E} & \equiv f_0 \\ +\vec{\mathcal{P}} & \equiv (f_1, f_2, f_3)^T \\ +\vec{\mathcal{E}} & \equiv (f_4, f_5, f_6)^T \\ +\vec{\mathcal{B}} & \equiv (f_7, f_8, f_9)^T +\end{aligned} +\qquad (82) $$ + +where the “clustering” of the coefficients into 3-dimensional vectors will be explained in the following. The first four elements $\mathcal{E}$ and $\vec{\mathcal{P}}$ are the coefficients of the generators of the Clifford algebra and the remaining simplices are 3 symmetric bi-vectors $\vec{\mathcal{E}}$ and skew-symmetric bi-vectors $\vec{\mathcal{B}}$. As explained above, the matrix exponentials of pure Clifford elements are readily evaluated (Equations (61) and (62)). The effect of a symplectic similarity transformation on a symplex + +$$ +\begin{aligned} +\tilde{\psi} &= \mathbf{R}(\tau/2) \psi \\ +\tilde{\mathbf{F}} &= \mathbf{R}(\tau/2) \mathbf{F} \mathbf{R}^{-1}(\tau/2) \\ +&= \mathbf{R}(\tau/2) \mathbf{F} \mathbf{R}(-\tau/2) +\end{aligned} +\qquad (83) $$ + +can then be computed component-wise as in the following case of a rotation (using Equation (81)): + +$$ +\begin{aligned} +\tilde{\mathbf{F}} &= \sum_{k=0}^{n} f_k \mathbf{R}_a \gamma_k \mathbf{R}_a^{-1} \\ +\mathbf{R}_a \gamma_k \mathbf{R}_a^{-1} &= (\cos(\tau/2) + \gamma_a \sin(\tau/2)) \gamma_k (\cos(\tau/2) - \gamma_a \sin(\tau/2)) \\ +&= \gamma_k \cos^2(\tau/2) - \gamma_a \gamma_k \gamma_a \sin^2(\tau/2) + (\gamma_a \gamma_k - \gamma_k \gamma_a) \cos(\tau/2) \sin(\tau/2) +\end{aligned} +\qquad (84) $$ + +Since all Clifford elements either commute or anti-commute with each other, we have two possible solutions. The first ($\gamma_k$ and $\gamma_a$ commute) yields with $\gamma_a^2 = -1$: + +$$ \mathbf{R}_a \gamma_k \mathbf{R}_a^{-1} = \gamma_k \cos^2(\tau/2) - \gamma_a^2 \gamma_k \sin^2(\tau/2) = \gamma_k \qquad (85) $$ + +but if ($\gamma_k$ and $\gamma_a$ anti-commute) we obtain a rotation: + +$$ +\begin{aligned} +\mathbf{R}_a \gamma_k \mathbf{R}_a^{-1} &= \gamma_k (\cos^2(\tau/2) - \sin^2(\tau/2)) + \gamma_a \gamma_k 2 \cos(\tau/2) \sin(\tau/2) \\ +&= \gamma_k \cos(\tau) + \gamma_a \gamma_k \sin(\tau) +\end{aligned} +\qquad (86) $$ + +For $a=9$ ($\gamma_a = \gamma_1 \gamma_2$) for instance we find: + +$$ +\begin{aligned} +\tilde{\gamma}_1 &= \gamma_1 \cos(\tau) + \gamma_1 \gamma_2 \gamma_1 \sin(\tau) = \gamma_1 \cos(\tau) - \gamma_2 \sin(\tau) \\ +\tilde{\gamma}_2 &= \gamma_2 \cos(\tau) + \gamma_1 \gamma_2 \gamma_2 \sin(\tau) = \gamma_2 \cos(\tau) + \gamma_1 \sin(\tau) \\ +\tilde{\gamma}_3 &= \gamma_3, +\end{aligned} +\qquad (87) $$ + +which is formally equivalent to a rotation of $\vec{\mathcal{P}}$ about the “z-axis”. If the generator $\gamma_a$ of the transformation is symmetric, we obtain: + +$$ +\begin{aligned} +\mathbf{R}_a \gamma_k \mathbf{R}_a^{-1} &= (\cosh(\tau/2) + \gamma_a \sinh(\tau/2)) \gamma_k (\cosh(\tau/2) - \gamma_a \sinh(\tau/2)) \\ +&= \gamma_k \cosh^2(\tau/2) - \gamma_a \gamma_k \gamma_a \sinh^2(\tau/2) + (\gamma_a \gamma_k - \gamma_k \gamma_a) \cosh(\tau/2) \sinh(\tau/2) +\end{aligned} +\qquad (88) $$ + +so that (if $\gamma_a$ and $\gamma_k$ commute): + +$$ +\begin{aligned} +\tilde{\gamma}_k &= \gamma_k \cosh^2(\tau/2) - \gamma_a^2 \gamma_k \sinh^2(\tau/2) \\ +\tilde{\gamma}_l &= \gamma_k (\cosh^2(\tau/2) - \sinh^2(\tau/2)) = \gamma_l +\end{aligned} +\qquad (89) $$ +---PAGE_BREAK--- + +and if $\gamma_a$ and $\gamma_k$ anticommute: + +$$ +\begin{aligned} +\tilde{\gamma}_k &= \gamma_k (\cosh^2(\tau/2) + \sinh^2(\tau/2)) + 2\gamma_a \gamma_k \cosh(\tau/2) \sinh(\tau/2) \\ +&= \gamma_k \cosh(\tau) + \gamma_a \gamma_k \sinh(\tau), +\end{aligned} +\quad (90) $$ + +which is equivalent to a boost when the following parametrization of “rapidity” $\tau$ is used: + +$$ +\begin{aligned} +\tanh(\tau) &= \beta \\ +\sinh(\tau) &= \beta\gamma \\ +\cosh(\tau) &= \gamma \\ +\gamma &= \frac{1}{\sqrt{1-\beta^2}} +\end{aligned} +\quad (91) $$ + +A complete survey of these transformations and the (anti-) commutator tables can be found in Reference [32] (This formalism corresponds exactly to the relativistic invariance of a Dirac spinor in QED as described for instance in Reference [34], although the Dirac theory uses complex numbers and a different sign-convention for the metric tensor). The “spatial” rotations are generated by the bi-vectors associated with $\vec{B}$ and Lorentz boosts by the components associated with $\vec{E}$. The remaining 4 generators of symplectic transformations correspond to $\mathcal{E}$ and $\vec{P}$. They where named *phase-rotation* (generated by $\gamma_0$) and *phase-boosts* (generated by $\vec{\gamma} = (\gamma_1, \gamma_2, \gamma_3)$) and have been used for instance for symplectic decoupling as described in Reference [33]. + +It is nearby (and already suggested by our notation) to consider the possibility that the EMEQ (Equation (82)) allows to model a relativistic particle as represented by energy $\mathcal{E}$ and momentum **P** either in an external electromagnetic field given by $\vec{E}$ and $\vec{B}$ or-alternatively-in an accelerating and/or rotating reference frame, where the elements $\vec{E}$ and $\vec{B}$ correspond to the axis of acceleration and rotation, respectively. We assumed, that all components of the state vector $\psi$ are equivalent in meaning and unit. Though we found that the state vector is formally composed of canonical pairs, the units are unchanged and identical for all elements of $\psi$. From Equation (13) we take, that the simplex **F** (and also **A**) have the unit of a frequency. If the Hamiltonian $\mathcal{H}$ is supposed to represent energy, then the components of $\psi$ have the unit of the square root of action. + +If the coefficients are supposed to represent the electromagnetic field, then we need to express these fields in the unit of frequency. This can be done, but it requires to involve natural conversion factors like $\hbar$, charge $e$, velocity $c$ and a mass, for instance the electron mass $m_e$. The magnetic field (for instance) is related to a “cyclotron frequency” $\omega_c$ by $\omega_c \propto \frac{e}{m_e B}$. + +However, according to the rules of the game, the distinction between particle properties and “external” fields requires a reason, an explanation. Especially as it is physically meaningless for macroscopic coupled oscillators. In References [32,33] this nomenclature was used in a merely *formal* way, namely to find a descriptive scheme to order the symplectic generators, so to speak an *equivalent circuit* to describe the general possible coupling terms for two-dimensional coupled linear optics as required for the description of charged particles beams. + +Here we play the reversed modeling game: Instead of using the EMEQ as an equivalent circuit to describe ensembles of oscillators, we now use ensembles of oscillators as an equivalent circuit to describe point particles. The motivation for Equation (82) is nevertheless similar, i.e., it follows from the formal structure of the Dirac Clifford algebra. The grouping of the coefficients comes along with the number of vector- and bi-vector-elements, 4 and 6, respectively. The second criterium is to distinguish between generators of rotations and boost, i.e., between symmetric and skew-symmetric simplices, which separates energy from momentum and electric from magnetic elements. Third of all, we note that even (Even k-vectors are those with even $k = 2m$, where m is a natural number) elements (scalar, bi-vectors, 4-vectors etc.) of even-dimensional Clifford algebras form a sub-algebra. This means that we can generate the complete Clifford algebra from the vector-elements by matrix multiplication (this is why we call them generators), but we can not generate vectors from bi-vectors by multiplication. And therefore the vectors are the particles (which are understood as the sources of fields) and the +---PAGE_BREAK--- + +bi-vectors are the fields, which are generated by the objects and influence their motion. The full Dirac symplex-algebra includes the description of a particle (vector) in a field (bi-vector). But why would the field be *external*? Simply, because it is impossible to generate bi-vectors from a single vector-type object, since any single vector-type object written as $\mathcal{E}\gamma_0 + \vec{P} \cdot \hat{\gamma}$ squares to a scalar. Therefore, the fields must be the result of interaction with other particles and hence we call them “external”. This is in some way a “first-order” approach, since there might be higher order processes that we did not consider yet. But in the linear approach (i.e., for second-order Hamiltonians), this distinction is reasonable and hence a legitimate move in the game. + +Besides the Hamiltonian structure (symplices vs. co-symplices) and the Clifford algebraic structure (distinguishing vectors, bi-vectors, tri-vectors etc.) there is a third essential symmetry, which is connected to the real matrix representation of the Dirac algebra and to the fact that it describes the general Hamiltonian motion of coupled oscillators: To distinguish the even from the odd elements with respect to the block-diagonal matrix structure. We used this property in Reference [33] to develop a general geometrical decoupling algorithm (see also Section 6.2). + +Now it may appear that we are cheating somehow, as relativity is usually "derived" from the constancy of the speed of light, while in our modeling game, we did neither introduce spatial notions nor light at all. Instead we directly arrive at notions of quantum electrodynamics (QED). How can this be? The definition of "velocity" within wave mechanics usually involves the dispersion relation of waves, i.e., the velocity of a wave packet is given by the group velocity $\vec{v}_{gr}$ defined by + +$$ \vec{v}_{gr} = \vec{\nabla}_{\vec{k}} \omega(\vec{k}) \quad (92) $$ + +and the so-called phase velocity $v_{ph}$ defined by + +$$ v_{ph} = \frac{\omega}{k} \quad (93) $$ + +It is then typically mentioned that the product of these two velocities is a constant $v_{gr} v_{ph} = c^2$. By the use of the EMEQ and Equation (29), the eigenvalues of $\mathbf{F}$ can be written as: + +$$ K_1 = -\mathrm{Tr}(\mathbf{F}^2)/4 $$ + +$$ K_2 = \mathrm{Tr}(\mathbf{F}^4)/16 - K_1^2/4 $$ + +$$ \omega_1 = \sqrt{K_1 + 2\sqrt{K_2}} $$ + +$$ \omega_2 = \sqrt{K_1 - 2\sqrt{K_2}} \quad (94) $$ + +$$ \omega_1^2 \omega_2^2 = K_1^2 - 4K_2 = \mathrm{Det}(\mathbf{F}) $$ + +$$ K_1 = \epsilon^2 + \vec{B}^2 - \vec{E}^2 - \vec{P}^2 $$ + +$$ K_2 = (\epsilon \vec{B} + \vec{E} \times \vec{P})^2 - (\vec{E} \cdot \vec{B})^2 - (\vec{P} \cdot \vec{B})^2 $$ + +Since symplectic transformations are similarity transformations, they do not alter the eigenvalues of the matrix $\mathbf{F}$ and since all possible evolutions in time (which can be described by the Hamiltonian) are symplectic transformations, the eigenvalues (of closed systems) are conserved. If we consider a “free particle”, we obtain from Equation (94): + +$$ \omega_{1,2} = \pm \sqrt{\epsilon^2 - \vec{p}^2} \quad (95) $$ + +As we mentioned before both, energy and momentum, have (within this game) the unit of frequencies. If we take into account that $\omega_{1,2} \equiv m$ is fixed, then the dispersion relation for “the energy” $\epsilon = \omega$ is + +$$ \epsilon = \omega = \sqrt{m^2 + \vec{p}^2} \quad (96) $$ +---PAGE_BREAK--- + +which is indeed the correct relativistic dispersion. But how do we make the step from pure oscillations to *waves*? (The question if Quantum theory requires Planck's constant $\hbar$, has been answered negative by John P. Ralston [35]). + +## 6.1. Moments and The Fourier Transform + +In case of "classical" probability distribution functions (PDFs) $\phi(x)$ we may use the Taylor terms of the characteristic function $\tilde{\phi}_x(t) = \langle \exp itx \rangle_x$, which is the Fourier transform of $\phi(x)$, at the origin. The $k$-th moment is then given by + +$$ \langle x^k \rangle = i^k \tilde{\phi}^{(k)}(0) \quad (97) $$ + +where $\phi^{(k)}$ is the $k$-th derivative of $\tilde{\phi}_x(t)$. + +A similar method would be of interest for our modeling game. Since a (phase space-) density is positive definite, we can always take the square root of the density instead of the density itself: $\phi = \sqrt{\rho}$. The square root can also defined to be a complex function, so that the density is $\rho = \phi\phi^* = \|\phi\|^2$ and, if mathematically well-defined (convergent), we can also define the Fourier transform of the complex root, i.e., + +$$ \tilde{\phi}(\omega, \vec{k}) = N \int \phi(t, \vec{x}) \exp(i\omega t - i\vec{k}\cdot\vec{x}) dt d^3x \quad (98) $$ + +and vice versa: + +$$ \tilde{\phi}(t, \vec{x}) = \tilde{N} \int \phi(\omega, \vec{k}) \exp(-i\omega t + i\vec{k}\cdot\vec{x}) d\omega d^3k \quad (99) $$ + +In principle, we may *define* the density no only by real and imaginary part, but by an arbitrary number of components. Thus, if we consider a four-component spinor, we may of course mathematically define its Fourier transform. But in order to see, why this might be more than a mathematical “trick”, but *physically meaningful*, we need to go back to the notions of classical statistical mechanics. Consider that we replace the single state vector by an “ensemble”, where we leave the question open, if the ensemble should be understood as a single phase space trajectory, averaged over time, or as some (presumably large) number of different trajectories. It is well-known, that the phase space density $\rho(\psi)$ is stationary, if it depends only on constants of motion, for instance if it depends only on the Hamiltonian itself. With the Hamiltonian of Equation (12), the density could for example have the form + +$$ \rho(H) \propto \exp(-\beta H) = \exp(-\beta \psi A \psi / 2) \quad (100) $$ + +which corresponds to a multivariate Gaussian. But more important is the insight, that the density exclusively depends on the second moments of the phase space variables as given by the Hamiltonian, i.e., in case of a "free particle" it depends on $\mathcal{E}$ and $\vec{P}$. And therefore we should be able to use energy and momentum as frequency $\omega$ and wave-vector $\vec{k}$. + +But there are more indications in our modeling game that suggest the use of a Fourier transform as we will show in the next section. + +## 6.2. The Geometry of (De-)Coupling + +In the following we give a (very) brief summary of Reference [33]. As already mentioned, decoupling is meant-despite the use of the EMEQ-first of all purely technical-mathematical. Let us delay the question, if the notions that we define in the following have any physical relevance. Here we +---PAGE_BREAK--- + +refer first of all to block-diagonalization, i.e., we treat the symplex F just as a "Hamiltonian" matrix. +From the definition of the real Dirac matrices we obtain F in explicit 4 × 4 matrix form: + +$$ +\mathbf{F} = +\begin{pmatrix} +-E_x & E_z + B_y & E_y - B_z & B_x \\ +E_z - B_y & E_x & -B_x & -E_y - B_z \\ +E_y + B_z & B_x & E_x & E_z - B_y \\ +-B_x & -E_y + B_z & E_z + B_y & -E_x \\ +-P_z & \varepsilon - P_x & 0 & P_y \\ +-\varepsilon - P_x & P_z & P_y & 0 \\ +0 & P_y & -P_z & \varepsilon + P_x \\ +P_y & 0 & -\varepsilon + P_x & P_z +\end{pmatrix} +\tag{101} +$$ + +If we find a (sequence of) symplectic similarity transformations that would allow to reduce the +4 × 4-form to a block-diagonal form, then we would obtain two separate systems of size 2 × 2 and we +could continue with the transformations of Section 5.1. + +Inspection of Equation (101) unveils that $\mathbf{F}$ is block-diagonal, if the coefficients $E_u, P_u, B_x$ and $B_z$ +vanish. Obviously this implies that $\vec{E} \cdot \vec{B} = 0$ and $\vec{P} \cdot \vec{B} = 0$. Or vice versa, if we find a symplectic +method that transforms into a system in which $\vec{E} \cdot \vec{B} = 0$ and $\vec{P} \cdot \vec{B} = 0$, then we only need to apply +appropriate rotations to achieve block-diagonal form. As shown in Reference [33] this can be done +in different ways, but in general it requires the use of the “phase rotation” $\gamma_0$ and “phase boosts” +$\tilde{\gamma}$. Within the conceptual framework of our game, the application of these transformations +equals the use of “matter fields”. But furthermore, this shows that block-diagonalization has also +geometric significance within the Dirac algebra and, with respect to the Fourier transformation, +the requirement $\vec{P} \cdot \vec{B} = 0$ indicates a divergence free magnetic field, as the replacement of $\vec{P}$ by +$\vec{\nabla}$ yields $\vec{\nabla} \cdot \vec{B} = 0$. The additional requirement $\vec{E} \cdot \vec{B} = 0$ also fits well to our physical picture of +e.m. waves. Note furthermore, that there is no analogous requirement to make $\vec{P} \cdot \vec{E}$ equal to zero. +Thus (within this analogy) we can accept $\vec{\nabla} \cdot \vec{E} \neq 0$. + +But this is not everything to be taken from this method. If we analyze in more detail, which expressions are required to vanish and which may remain, then it appears that $\vec{P} \cdot \vec{B}$ is explicitly given by + +$$ +\begin{align*} +P_x B_x \gamma_1 \gamma_2 \gamma_3 + P_y B_y \gamma_2 \gamma_3 \gamma_1 + P_z B_z \gamma_3 \gamma_1 \gamma_2 &= (\vec{P} \cdot \vec{B}) \gamma_{10} \\ +E_x B_x \gamma_4 \gamma_2 \gamma_3 + E_y B_y \gamma_5 \gamma_3 \gamma_1 + E_z B_z \gamma_6 \gamma_1 \gamma_2 &= (\vec{E} \cdot \vec{B}) \gamma_{14} \\ +P_x E_x \gamma_1 \gamma_4 \gamma_3 + P_y E_y \gamma_2 \gamma_5 \gamma_1 + P_z E_z \gamma_3 \gamma_6 \gamma_2 &= -(\vec{P} \cdot \vec{E}) \gamma_0 +\end{align*} +\tag{102} +$$ + +That means that exactly those products have to vanish which yield *cosymplices*. This can be interpreted +via the structure preserving properties of symplectic motion. Since within our game, the particle *type* +can only be represented by the structure of the dynamics, and since electromagnetic processes do not +change the type of a particle, then they are quite obviously *structure preserving* which then implies +the non-appearance of co-symplices. Or in other words-electromagnetism is of Hamiltonian nature. +We will come back to this point in Section 6.4. + +6.3. *The Lorentz Force* + +In the previous section we constituted the distinction between the “mechanical” elements +**P** = **ε** γ₀ + **γ̃** ⋅ **P** of the general matrix **F** and the electrodynamical elements **F** = γ₀ **γ̃** ⋅ **E** + γ₁₄ γ₀ **γ̃** ⋅ **B**. +Since the matrix **S** = Σ γ₀ is a symplex, let us assume to be equal to **P** and apply Equation (25). We then +find (with the appropriate relative scaling between **P** and **F** as explained above): + +$$ +\frac{d\mathbf{P}}{d\tau} = \mathbf{P} = \frac{q}{2m} (\mathbf{F}\mathbf{P} - \mathbf{P}\mathbf{F}) \quad (103) +$$ + +which yields written with the coefficients of the real Dirac matrices: +---PAGE_BREAK--- + +$$ +\begin{align} +\frac{d\mathcal{E}}{d\tau} &= \frac{q}{m} \vec{P} \cdot \vec{E} \\ +\frac{d\vec{P}}{d\tau} &= \frac{q}{m} (\varepsilon \vec{E} + \vec{P} \times \vec{B}) +\end{align} +\tag{104} +$$ + +where $\tau$ is the proper time. If we convert to the lab frame time $t$ using $dt = \frac{d\tau}{\gamma}$ Equation (103) yields +(setting $c = 1$): + +$$ +\begin{align*} +\gamma \frac{d\mathcal{E}}{dt} &= q \gamma \vec{\nu} \cdot \vec{E} \\ +\gamma \frac{d\vec{P}}{dt} &= \frac{q}{m} (m \gamma \vec{E} + m \gamma \vec{\nu} \times \vec{B}) \tag{105} \\ +\frac{d\mathcal{E}}{dt} &= q \vec{\nu} \cdot \vec{E} \\ +\frac{d\vec{P}}{dt} &= q (\vec{E} + \vec{\nu} \times \vec{B}) +\end{align*} +$$ + +which is the Lorentz force. Therefore the Lorentz force acting on a charged particle in 3 spatial dimensions can be modeled by an ensemble of 2-dimensional CHOs. The isomorphism between the observables of the perceived 3-dimensional world and the second moments of density distributions in the phase space of 2-dimensional oscillators is remarkable. + +In any case, Equation (103) clarifies two things within the game. Firstly, that both, energy $\mathcal{E}$ and momentum $\vec{p}$, have to be interpreted as mechanical energy and momentum (and not canonical), secondly the relative normalization between fields and mechanical momentum is fixed and last, but not least, it clarifies the relation between the time related to mass (proper time) and the time related to $\gamma_0$ and energy, which appears to be the laboratory time. + +6.4. *The Maxwell Equations* + +As we already pointed out, waves are (within this game) the result of a Fourier transformation +(FT). But there are different ways to argue this. In Reference [16] we argued that Maxwell’s equations +can be derived within our framework by (a) the postulate that space-time emerges from interaction, +i.e., that the fields $\vec{E}$ and $\vec{B}$ have to be constructed from the 4-vectors. $\mathbf{X} = t\,\gamma_0 + \vec{x}\cdot\vec{\gamma}, \mathbf{J} = \rho\gamma_0 + \vec{j}\cdot\vec{\gamma}$ +and $\mathbf{A} = \Phi\gamma_0 + \vec{\Lambda}\cdot\vec{\gamma}$ with (b) the requirement that no co-symplices emerge. But we can also argue +with the FT of the density (see Section 6.1). + +If we introduce the 4-derivative + +$$ +\partial = -\partial_t \gamma_0 + \partial_x \gamma_1 + \partial_y \gamma_2 + \partial_z \gamma_3 +\quad (106) +$$ + +The non-abelian nature of matrix multiplication requires to distinguish differential operators acting to +the right and to the left, i.e., we have $\partial$ as defined in Equation (106), $\overleftrightarrow{\partial}$ and $\overleftarrow{\partial}$ which is written to the +right of the operand (thus indicating the order of the matrix multiplication) so that + +$$ +\begin{equation} +\begin{aligned} +\overleftarrow{\mathbf{H}} &\equiv -\partial_t \mathbf{H} \gamma_0 + \partial_x \mathbf{H} \gamma_1 + \partial_y \mathbf{H} \gamma_2 + \partial_z \mathbf{H} \gamma_3 \\ +\overrightarrow{\partial \mathbf{H}} &\equiv -\gamma_0 \partial_t \mathbf{H} + \gamma_1 \partial_x \mathbf{H} + \gamma_2 \partial_y \mathbf{H} + \gamma_3 \partial_z \mathbf{H} +\end{aligned} +\tag{107} +\end{equation} +$$ + +The we find the following general rules (see Equation (35)) that prevent from non-zero cosymplices: + +$$ +\begin{align*} +& \frac{1}{2} \left( \overrightarrow{\partial} \text{ vector} - \text{vector} \overleftarrow{\partial} \right) &&\Rightarrow && \text{bi-vector} \\ +& \frac{1}{2} \left( \overrightarrow{\partial} \text{ bi-vector} - \text{bi-vector} \overleftarrow{\partial} \right) &&\Rightarrow && \text{vector} \\ +& \frac{1}{2} \left( \overrightarrow{\partial} \text{ bi-vector} + \text{bi-vector} \overleftarrow{\partial} \right) &&\Rightarrow && \text{axial vector } = 0 \\ +& \frac{1}{2} \left( \overrightarrow{\partial} \text{ vector} + \text{vector} \overleftarrow{\partial} \right) &&\Rightarrow && \text{scalar } = 0 +\end{align*} +\tag{108} +$$ +---PAGE_BREAK--- + +Application of these derivatives yields: + +$$ +\begin{align*} +\mathbf{F} &= \frac{1}{2} \left( \vec{\partial} \mathbf{A} - \mathbf{A} \vec{\partial} \right) \\ +4\pi \mathbf{J} &= \frac{1}{2} \left( \vec{\partial} \mathbf{F} - \mathbf{F} \vec{\partial} \right) \\ +0 &= \vec{\partial} \mathbf{F} + \mathbf{F} \vec{\partial} \\ +0 &= \frac{1}{2} \left( \vec{\partial} \mathbf{A} + \mathbf{A} \vec{\partial} \right) \\ +0 &= \frac{1}{2} \left( \vec{\partial} \mathbf{J} + \mathbf{J} \vec{\partial} \right) +\end{align*} +\tag{109} +$$ + +The first row of Equation (109) corresponds to the usual definition of the bi-vector fields from a vector potential $\mathbf{A}$ and is (written by components) given by + +$$ +\begin{align} +\vec{E} &= -\vec{\nabla}\phi - \partial_t \vec{A} \\ +\vec{B} &= \vec{\nabla} \times \vec{A} +\end{align} +\tag{110} +$$ + +The second row of Equation (109) corresponds to the usual definition of the 4-current J as sources of the fields and the last three rows just express the impossibility of the appearance of cosyplices. They explicitely represent the homogenuous Maxwell equations + +$$ +\begin{align} +\vec{\nabla} \cdot \vec{B} &= 0 \\ +\vec{\nabla} \times \vec{E} + \partial_t \vec{B} &= 0 +\end{align} +\tag{111} +$$ + +the continuity equation + +$$ +\partial_t \rho + \vec{\nabla} \cdot \vec{j} = 0 +\quad +(112) +$$ + +and the so-called “Lorentz gauge” + +$$ +\partial_t \Phi + \vec{\nabla} \cdot \vec{A} = 0 +\qquad +(113) +$$ + +The simplest idea about the 4-current within QED is to assume that it is proportional to the “probability current”, which is within our game given by the vector components of $\mathbf{S} = \Sigma \gamma_0$. + +7. The Phase Space + +Up to now, our modeling game referred to the second moments and the elements of S are second +moments such that the observables are given by (averages over) the following quadratic forms: + +$$ +\begin{align*} +\mathcal{E} &\propto \psi^T \psi = q_1^2 + p_1^2 + q_2^2 + p_2^2 \\ +p_x &\propto -q_1^2 + p_1^2 + q_2^2 - p_2^2 \\ +p_y &\propto 2(q_1 q_2 - p_1 p_2) \\ +p_z &\propto 2(q_1 p_1 + q_2 p_2) \\ +E_x &\propto 2(q_1 p_1 - q_2 p_2) \\ +E_y &\propto -2(q_1 p_2 + q_2 p_1) \\ +E_z &\propto q_1^2 - p_1^2 + q_2^2 - p_2^2 \\ +B_x &\propto 2(q_1 q_2 + p_1 p_2) \\ +B_y &\propto q_1^2 + p_1^2 - q_2^2 - p_2^2 \\ +B_z &\propto 2(q_1 p_2 - p_1 q_2) +\end{align*} +\tag{114} +$$ + +If we analyze the real Dirac matrix coefficients of $\mathbf{S} = \psi \psi^T \gamma_0$ in terms of the EMEQ and evaluate the +quadratic relations between those coefficients, then we obtain: +---PAGE_BREAK--- + +$$ +\begin{align*} +\vec{P}^2 &= \vec{E}^2 = \vec{B}^2 = \varepsilon^2 \\ +0 &= \vec{E}^2 - \vec{B}^2 \\ +\varepsilon^2 &= \frac{1}{2}(\vec{E}^2 + \vec{B}^2) \\ +\varepsilon \vec{P} &= \vec{E} \times \vec{B} \\ +\varepsilon^3 &= \vec{P} \cdot (\vec{E} \times \vec{B}) \\ +m^2 &\propto \varepsilon^2 - \vec{P}^2 = 0 \\ +\vec{P} \cdot \vec{E} &= \vec{E} \cdot \vec{B} = \vec{P} \cdot \vec{B} = 0 +\end{align*} +$$ + +Besides a missing renormalization these equations describe an object without mass but with the geometric properties of light as described by electrodynamics, e.g., by the electrodynamic description of electromagnetic waves, which are $\vec{E} \cdot \vec{B} = 0$, $\vec{P} \propto \vec{E} \times \vec{B}$, $\vec{E}^2 = \vec{B}^2$ and so on. Hence single spinors are light-like and can not represent massive particles. + +Consider the spinor as a vector in a four-dimensional Euclidean space. We write the symmetric matrix $\mathcal{A}$ (or $\Sigma$, respectively) as a product in the form of a Gramian: + +$$ +\mathcal{A} = \mathcal{B}^T \mathcal{B} \tag{116} +$$ + +or-componentwise: + +$$ +\begin{align} +\mathcal{A}_{ij} &= \sum_k (\mathcal{B}^T)_{ik} \mathcal{B}_{kj} \nonumber \\ +&= \sum_k \mathcal{B}_{ki} \mathcal{B}_{kj} \tag{117} +\end{align} +$$ + +The last line can be read such that matrix element $\mathcal{A}_{ij}$ is the conventional 4-dimensional scalar product of column vector $\mathcal{B}_i$ with column vector $\mathcal{B}_j$. + +From linear algebra we know that Equation (116) yields a non-singular matrix $\mathcal{A}$, iff the column-vectors of the matrix $\mathcal{B}$ are linearly independent. In the orthonormal case, the matrix $\mathcal{A}$ simply is the pure form of a non-singular matrix, i.e., the unit matrix. Hence, if we want to construct a massive object from spinors, we need several spinors to fill the columns of $\mathcal{B}$. The simplest case is the orthogonal case: the combination of four mutual orthogonal vectors. Given a general 4-component Hamiltonian spinor $\psi = (q_1, p_1, q_2, p_2)$, how do we find a spinor that is orthogonal to this one? In 3 (i.e., odd) space dimensions, we know that there are two vectors that are perpendicular to any vector $(x, y, z)^T$, but without fixing the first vector, we can't define the others. In even dimensions this is different: it suffices to find a non-singular skew-symmetric matrix like $\gamma_0$ to generate a vector that is orthogonal to $\psi$, namely $\gamma_0 \psi$. As in Equation (3), it is the skew-symmetry of the matrix that ensures the orthogonality. A third vector $\gamma_k \psi$ must then be orthogonal to $\psi$ and to $\gamma_0 \psi$. It must be skew-symmetric and it must hold $\psi^T \gamma_k^T \gamma_0 \psi = 0$. This means that the product $\gamma_k^T \gamma_0$ must also be skew-symmetric and hence that $\gamma_k$ must anti-commute with $\gamma_0$: + +$$ +\begin{align} +(\gamma_k^T \gamma_0)^T &= \gamma_0^T \gamma_k = -\gamma_k^T \gamma_0 \\ +\Rightarrow \quad &= \gamma_0^T \gamma_k + \gamma_k^T \gamma_0 = 0 \tag{118} \\ +0 &= \gamma_0 \gamma_k + \gamma_k \gamma_0 +\end{align} +$$ + +Now let us for a moment return to the question of dimensionality. There are in general $2n(2n-1)/2$ non-zero independent elements in a skew-symmetric square $2n \times 2n$ matrix. But how many matrices are there in the considered phase space dimensions, i.e., in $1+1$, $3+1$ and $9+1$ (etc.) dimensions which anti-commute with $\gamma_0$? We need at least $2n-1$ skew-symmetric anti-commuting elements to obtain a diagonal $\mathcal{A}$. However, this implies at least $N-1$ anticommuting elements of the Clifford algebra that square to $-1$. Hence the ideal case is $2n=N$, which is only true for the Pauli and Dirac algebra. For the Pauli algebra, there is one skew-symmetric element, namely $\eta_0$. In the Dirac algebra there are 6 skew-symmetric generators that contain two sets of mutually anti-commuting skew-symmetric +---PAGE_BREAK--- + +matrices: $\gamma_0, \gamma_{10}$ and $\gamma_{14}$ on the one hand and $\gamma_7, \gamma_8$ and $\gamma_9$ on the other hand. The next considered Clifford algebra with $N = 9+1$ dimensions requires a representation by $2n = 32 = \sqrt{2}^{10}$-dimensional real matrices. Hence this algebra may not represent a Clifford algebra with more than 10 unit elements-certainly not $2n$. Hence, we can not use the algebra to generate purely massive objects (e.g., diagonal matrices) without further restrictions (i.e., projections) of the spinor $\psi$. + +But what exactly does this mean? Of course we can easily find 32 linearly independent spinors to generate an orthogonal matrix $B$. So what exactly is special in the Pauli- and Dirac algebra? To see this, we need to understand, what it means that we can use the matrix $B$ of mutually orthogonal column-spinors + +$$ B = (\psi, \gamma_0 \psi, \gamma_{10} \psi, \gamma_{14} \psi) \tag{119} $$ + +This form implies that we can define the *mass* of the “particle” algebraically, and since we have $N-1=3$ anticommuting skew-symmetric matrices in the Dirac algebra, we can find a multispinor $B$ for any arbitrary point in phase space. This does not seem to be sensational at first sight, since this appears to be a property of any Euclidean space. The importance comes from the fact that $\psi$ is a “point” in a very special space-a point in phase space. In fact, we will argue in the following that this possibility to factorize $\psi$ and the density $\rho$ is everything but self-evident. + +If we want to simulate a phase space distribution, we can either define a phase space density $\rho(\psi)$ or we use the technique of Monte-Carlo simulations and represent the phase space by (a huge number of random) samples. If we generate a random sample and we like to implement a certain exact symmetry of the density in phase space, then we would (for instance) form a symmetric sample by appending not only a column-vector to $B$, but also its negative $-\psi$. In this way we obtain a sample with an exact symmetry. In a more general sense: If a phase space symmetry can be represented by a matrix $\gamma_s$ that allows to associate to an arbitrary phase space point $\psi$ a second point $\gamma_s \psi$ where $\gamma_s$ is skew-symmetric, then we have a certain continuous linear rotational symmetry in this phase space. As we have shown, phase-spaces are intrinsically structured by $\gamma_0$ and insofar much more restricted than Euclidean spaces. This is due to the distinction of symplectic from non-symplectic transformations and due to the intrinsic relation to Clifford algebras: Phase spaces are spaces structured by time. Within our game, the phase space is the only possible fundamental space. + +We may imprint the mentioned symmetry to an arbitrary phase space density $\rho$ by taking all phase space samples that we have so far and adding the same number of samples, each column multiplied by $\gamma_s$. Thus, we have a single rotation in the Pauli algebra and two of them in the Dirac algebra: + +$$ +\begin{aligned} +B_0 &= \psi \\ +\gamma_0 &\rightarrow B_1 = (\psi, \gamma_0 \psi) \\ +\gamma_{14} &\rightarrow B_2 = (\psi, \gamma_0 \psi, \gamma_{14} \psi, \gamma_{14} \gamma_0 \psi) \\ +&= (\psi, \gamma_0 \psi, \gamma_{14} \psi, \gamma_{10} \psi) +\end{aligned} +\tag{120} $$ + +or: + +$$ +\begin{aligned} +B_0 &= \psi \\ +\gamma_7 &\rightarrow B_1 = (\psi, \gamma_7 \psi) \\ +\gamma_8 &\rightarrow B_2 = (\psi, \gamma_7 \psi, \gamma_8 \psi, \gamma_8 \gamma_7 \psi) \\ +&= (\psi, \gamma_7 \psi, \gamma_8 \psi, -\gamma_9 \psi) +\end{aligned} +\tag{121} $$ + +Note that order and sign of the column-vectors in $B$ are irrelevant—at least with respect to the autocorrelation matrix $BB^T$. Thus we find that there are two fundamental ways to represent a positive mass in the Dirac algebra and one in the Pauli-algebra. The 4-dimensional phase space of the Dirac algebra is in two independent ways self-matched. +---PAGE_BREAK--- + +Our starting point was the statement that 2 $n$ linear independent vectors are needed to generate mass. If we can't find 2 $n$ vectors in the way described above for the Pauli and Dirac algebra, then this does (of course) not automatically imply that there are not 2 $n$ linear independent vectors. + +But what does it mean that the dimension of the Clifford algebra of observables (N) does not match the dimension of the phase space (2 $n$) in higher dimensions? There are different physical descriptions given. Classically we would say that a positive definite 2 $n$-component spinor describes a system of $n$ (potentially) coupled oscillators with $n$ frequencies. If $B$ is orthogonal, then all oscillators have the same frequency, i.e., the system is degenerate. But for $n > 2$ we find that not all eigenmodes can involve the complete 2 $n$-dimensional phase space. This phenomenon is already known in 3 dimensions: The trajectory of the isotropic three-dimensional oscillator always happens in a 2-dimensional plane, i.e., in a subspace. If it did not, then the angular momentum would not be conserved. In this case the isotropy of space would be broken. Hence one may say in some sense that the *isotropy of space* is the reason for a 4-dimensional phase-space and hence the reason for the 3 + 1-dimensional observable space-time of objects. Or in other words: higher-dimensional spaces are incompatible with isotropy, i.e., with the conservation of angular momentum. There is an intimate connection of these findings to the impossibility of Clifford algebras $Cl_{p,1}$ with $p > 3$ to create a homogeneous "Euclidean" space: Let $\gamma_0$ represent time and $\gamma_k$ with $k \in [1, ..., N-1]$ the spatial coordinates. The spatial rotators are products of two spatial basis vectors. The generator of rotations in the (1,2)-plane is $\gamma_1 \gamma_2$. Then we have 6 rotators in 4 "spatial" dimensions: + +$$ \gamma_1 \gamma_2, \ \gamma_1 \gamma_3, \ \gamma_1 \gamma_4, \ \gamma_2 \gamma_3, \ \gamma_2 \gamma_4, \ \gamma_3 \gamma_4 \qquad (122) $$ + +However, we find that some generators commute and while others anticommute and it can be taken from combinatorics that only sets of 3 mutual anti-commuting rotators can be formed from a set of symmetric anti-commuting $\gamma_k$. The 3 rotators + +$$ \gamma_1 \gamma_2, \ \gamma_2 \gamma_3, \ \gamma_1 \gamma_3 \qquad (123) $$ + +mutually anticommute, but $\gamma_1 \gamma_2$ and $\gamma_3 \gamma_4$ commute. Furthermore, in 9 + 1 dimensions, the spinors are either projections into 4-dimensional subspaces or there are non-zero off-diagonal terms in $\mathcal{A}$, i.e., there is "internal interaction". + +Another way to express the above considerations is the following: Only in 4 phase space dimensions we may construct a massive object from a matrix $B$ that represents a multispinor $\Psi$ of exactly $N = 2n$ single spinors and construct a wave-function according to + +$$ \Psi = \phi B \qquad (124) $$ + +where $\rho = \phi^2$ is the phase space density. + +It is easy to prove and has been shown in Reference [16] that the elements $\gamma_0, \gamma_{10}$ and $\gamma_{14}$ represent parity, time reversal and charge conjugation. The combination of these operators to form a multispinor, may lead (with normalization) to the construction of symplectic matrices $M$. Some examples are: + +$$ M = (\mathbf{1}\psi, \gamma_0\psi, -\gamma_{14}\psi, -\gamma_{10}\psi)/\sqrt{\psi^T\psi} $$ + +$$ M \gamma_0 M^T = \gamma_0 $$ + +$$ M = (\mathbf{1}\psi, -\gamma_{14}\psi, -\gamma_{10}\psi, \gamma_0\psi)/\sqrt{\psi^T\psi} $$ + +$$ M \gamma_{10} M^T = \gamma_{10} \qquad (125) $$ + +$$ M = (\gamma_{10}\psi, -\mathbf{1}\psi, -\gamma_{14}\psi, \gamma_0\psi)/\sqrt{\psi^T\psi} $$ + +$$ M \gamma_{14} M^T = \gamma_{14} $$ +---PAGE_BREAK--- + +Hence the combination of the identity and CPT-operators can be arranged such that the multispinor **M** is symplectic with respect to the directions of time γ₀, γ₁₀ and γ₁₄, but not with respect to γ₇, γ₈ or γ₉. As we tried to explain, the specific choice of the skew-symmetric matrix γ₀ is determined by a structure defining transformation. Since particles are nothing but dynamical structures in this game, the 6 possible SUMs should stand for 6 different particle types. However, for each direction of time, there are also two choices of the spatial axes. For γ₀ we have chosen γ₁, γ₂ and γ₃, but we could have used γ₄ = γ₀γ₁, γ₅ = γ₀γ₂ and γ₆ = γ₀γ₃ as well. + +Thus, there should be either 6 or 12 different types of structures (types of fermions) that can +be constructed within the Dirac algebra. The above construction allows for three different types +corresponding to three different forms of the symplectic unit matrix, further three types are expected +to be related to γ7, γ8 and γ9: + +$$ +\begin{align*} +\mathbf{M} &= (\mathbf{1}\psi, -\gamma_9\psi, -\gamma_8\psi, -\gamma_7\psi) / \sqrt{\psi^T\psi} \\ +\mathbf{M}\gamma_7 \mathbf{M}^T &= \gamma_7 +\end{align*} +$$ + +$$ +\begin{equation} +\begin{aligned} +\mathbf{M} &= (\mathbf{1}\psi, -\gamma_8\psi, -\gamma_7\psi, -\gamma_9\psi) / \sqrt{\psi^T\psi} \\ +\mathbf{M}\gamma_8 \mathbf{M}^T &= \gamma_8 +\end{aligned} +\tag{126} +\end{equation} +$$ + +$$ +\begin{equation} +\begin{aligned} +\mathbf{M} &= (\gamma_7 \psi, -\mathbf{1} \psi, -\gamma_8 \psi, -\gamma_9 \psi) / \sqrt{\psi^T \psi} \\ +\mathbf{M} \gamma_9 \mathbf{M}^T &= \gamma_9 +\end{aligned} +\tag{127} +\end{equation} +$$ + +These matrices describe specific symmetries of the 4-dimensional phase space, i.e., geometric objects in phase space. Therefore massive multispinors can be described as volumes in phase space. If we deform the figure by stretching parameters *a*, *b*, *c*, *d* such that + +$$ +\tilde{\mathbf{M}} = (a \mathbf{1} \psi, -b \gamma_0 \psi, -c \gamma_{14} \psi, -d \gamma_{10} \psi) / \sqrt{\psi^T \psi} \quad (127) +$$ + +then one obtains with $f_k$ taken from Equation (114): + +$$ +\begin{align*} +\tilde{\mathbf{M}} \tilde{\mathbf{M}}^T \gamma_0 &= \sum_{k=0}^{9} g_k f_k \gamma_k / \sqrt{\psi^T \psi} \\ +g_0 &= a^2 + b^2 + c^2 + d^2 \\ +g_1 &= -g_2 = g_3 = a^2 - b^2 + c^2 - d^2 \\ +g_4 &= -g_5 = g_6 = a^2 - b^2 - c^2 + d^2 \\ +g_7 &= g_8 = g_9 = a^2 + b^2 - c^2 - d^2 +\end{align*} +\tag{128} +$$ + +This result reproduces the quadratic forms $f_k$ of Equation (114), but furthermore the phase space radii $a, b, c$ and $d$ reproduce the structure of the Clifford algebra, i.e., the classification into the 4 types of observables $\mathcal{E}, \vec{\mathcal{P}}, \vec{\mathcal{E}}$ and $\vec{\mathcal{B}}$. This means that a deformation of the phase space “unit cell” represents momenta and fields, i.e., the dimensions of the phase space unit cell are related to the appearance of certain simplices: + +$$ +(a = b) \text{ AND } (c = d) \Rightarrow \vec{P} = \vec{E} = 0 \\ +(a = c) \text{ AND } (b = d) \Rightarrow \vec{E} = \vec{B} = 0 \\ +(a = d) \text{ AND } (b = c) \Rightarrow \vec{P} = \vec{B} = 0 +$$ + +(129) + +while for $a = b = c = d$ all vectors but $\mathcal{E}$ vanish. Only in this latter case, the matrix **M** is symplectic for $a=b=c=d=1$. These relations confirm the intrinsic connection between a classical 4-dimensional Hamiltonian phase space and Clifford algebras in dimension 3+1. +---PAGE_BREAK--- + +## 8. Summary and Discussion + +Based on three fundamental principles, which describe the form of physics, we have shown that the algebraic structure of coupled classical degrees of freedom is (depending on the number of the DOFs) isomorph to certain Clifford algebras that allow to explain the dimensionality of space-time, to model Lorentz-transformations, the relativistic energy-momentum relation and even Maxwell's equations. + +It is usually assumed that we have to define the properties of space-time in the first place: "In Einstein's theory of gravitation matter and its dynamical interaction are based on the notion of an intrinsic geometric structure of the space-time continuum" [36]. However, as we have shown within this "game", it has far more explanatory power to derive and explain space-time from the principles of interaction. Hence we propose to reverse the above statement: The intrinsic geometric structure of the space-time continuum is based on the dynamical interaction of matter. A rigorous consequence of this reversal of perspective is that "space-time" does not need to have a fixed and unique dimensionality at all. It appears that the dimensionality is a property of the type of interaction. However, supposed higher-dimensional space-times (see Reference [16]) would emerge in analogy to the method presented here, for instance in nuclear interaction, then these space-times would not simply be Euclidean spaces of higher dimension. Clifford algebras, especially if they are restricted by symplectic conditions by a Hamiltonian function, have a surprisingly complicated intrinsic structure. As we pointed out, if all generators of a Clifford algebra are simplices, then in 9 + 1 dimensions, we find k-vectors with $k \in [0,10]$ but k-vectors generated from simplices are themselves simplices only for $k \in [1,2,5,6,9,10,...]$. However, if space-time is constraint by Hamiltonian motion, then ensembles of oscillators may also clump together to form "objects" with 9 + 1 or 25 + 1-dimensional interactions, despite the fact that we gave strong arguments for the fundamentality of the 3 + 1-dimensional Hamiltonian algebra. + +There is no a priori reason to exclude higher order terms-whenever they include constants of motion. However, as the Hamiltonian then involves terms of higher order, we might then need to consider higher order moments of the phase space distribution. In this case we would have to invent an action constant in order to scale $\psi$. + +Our game is based a few general rules and symmetry considerations. The math used in our derivation-taken the results of representation theory for granted-is simple and can be understood on an undergraduate level. And though we never intended to find a connection to string theory, we found-besides the 3 + 1-dimensional interactions a list of possible higher-dimensional candidates, two of which are also in the focus of string theories, namely $9+1=10$-dimensional and $25+1=26$-dimensional theories [37]. + +We understand this modeling game as a contribution to the demystification (and unification) of our understanding of space-time, relativity, electrodynamics and quantum mechanics. Despite the fact that it has become tradition to write all equations of motion of QED and QM in a way that requires the use of the unit imaginary, our model seems to indicate that it does not have to be that way. Though it is frequently postulated that evolution in time has to be unitary within QM, it appears that symplectic motion does not only suffice, but is superior as it yields the correct number of relevant operators. While in the unitary case, one should expect 16 (15) unitary (traceless) operators for a 4-component spinor, but the natural number of generators in the corresponding symplectic treatment is 10 as found by Dirac himself in QED [2,38]. If a theory contains things which are *not required*, then we have added something arbitrary and artificial. The theory as we described it indicates that in momentum space, which is used here, there is no immediate need for the use of the unit imaginary and no need for more than 10 fundamental generators. The use of the unit imaginary however appears unavoidable when we switch via Fourier transform to the "real space". + +There is a dichotomy in physics. On the one hand all *causes* are considered to inhabit space-time (*local causality*), but on the other hand the *physical reasoning* mostly happens in energy-momentum space: There are no Feyman-graphs, no scattering amplitudes, no fundamental physical relations, that +---PAGE_BREAK--- + +do not refer in some way to energy or momentum (-conservation). We treat problems in solid state physics as well as in high energy physics mostly in Fourier space (reciprocal lattice). + +We are aware that the rules of the game are, due to their rigour, difficult to accept. However, maybe it does not suffice to speculate that the world might be a hologram (As t'Hooft suggested [39] and Leonard Susskind sketched in his celebrated paper, Reference [40])-we really should play modeling games that might help to decide, if and how it could be like that. + +**Conflicts of Interest:** "The author declares no conflict of interest." + +## Appendix Microcanonical Ensemble + +Einstein once wrote that "A theory is the more impressive the greater the simplicity of its premises, the more different kinds of things it relates, and the more extended its area of applicability. Hence the deep impression that classical thermodynamics made upon me. It is the only physical theory of universal content concerning which I am convinced that, within the framework of the applicability of its basic concepts, it will never be overthrown [...]". We agree with him and we will try to show in the following that this holds also for the branch of thermodynamics that is called statistical mechanics. By the use of the EMEQ it has been shown, that the expectation values + +$$f_k = \frac{\operatorname{Tr}(\gamma_k^2)}{16} \bar{\psi} \gamma_k \psi \qquad (\text{A1})$$ + +can be associated with energy $\mathcal{E}$ and momentum $\vec{p}$ of and with the electric (magnetic) field $\vec{E}$ and $\vec{B}$ as seen by a relativistic charged particle. It has also been shown that stable systems can always be transformed in such a way as to bring $\mathcal{H}$ into a diagonal form: + +$$\mathbf{F} = \begin{pmatrix} 0 & \omega_1 & 0 & 0 \\ -\omega_1 & 0 & 0 & 0 \\ 0 & 0 & 0 & \omega_2 \\ 0 & 0 & -\omega_2 & 0 \end{pmatrix} \qquad (\text{A2})$$ + +In the following we will use the classical model of the microcanonical ensemble to compute some phase space averages. Let the constant value of the Hamiltonian be $\mathcal{H} = U$ where $U$ is some energy, the volume in phase space $\Phi^*$ that is limited by the surface of constant energy $U$ is given by [41]: + +$$\Phi^* = \int_{\mathcal{H} 0$ [11] (resp. $b(t) < 0$, see [13]) in the low-intensity limit, the graded-index waveguide acts as a linear defocusing (focusing) lens. + +Depending on the selections of the coefficients in Equation (1), its applications vary in very specific problems (see [16] and references therein): + +* Bose-Einstein condensates: $b(\cdot) \neq 0$, $a, h$ constants and other coefficients are zero. + +* Dispersion-managed optical fibers and soliton lasers [9,14,15]: $a(\cdot), h(\cdot), d(\cdot) \neq 0$ are respectively dispersion, nonlinearity and amplification, and the other coefficients are zero. $a(\cdot)$ and $h(\cdot)$ can be periodic as well, see [29]. + +* Pulse dynamics in the dispersion-managed fibers [10]: $h(\cdot) \neq 0$, $a$ is a constant and other coefficients are zero. + +In this paper, to obtain the main results, we use a fundamental approach consisting of the use of similarity transformations and the solutions of Riccati systems with several parameters inspired by the work in [30]. Similarity transformations have been a very popular strategy in nonlinear optics since the lens transform presented by Talanov [27]. Extensions of this approach have been presented in [26,28]. Applications include nonlinear optics, Bose-Einstein condensates, integrability of NLS and quantum mechanics, see for example [3,31-33], and references therein. E. Marhic in 1978 introduced (probably for the first time) a one-parameter {$a(0)$} family of solutions for the linear Schrödinger equation of the one-dimensional harmonic oscillator, where the use of an explicit formulation (classical Melher's formula [34]) for the propagator was fundamental. The solutions presented by E. Marhic constituted a generalization of the original Schrödinger wave packet with oscillating width. + +In addition, in [35], a generalized Melher's formula for a general linear Schrödinger equation of the one-dimensional generalized harmonic oscillator of the form Equation (1) with $h(t) = 0$ was presented. For the latter case, in [36-38], multiparameter solutions in the spirit of Marhic in [30] have been presented. The parameters for the Riccati system arose originally in the process of proving convergence to the initial data for the Cauchy initial value problem Equation (1) with $h(t) = 0$ and in the process of finding a general solution of a Riccati system [38,39]. In addition, Ermakov systems with solutions containing parameters [36] have been used successfully to construct solutions for the generalized harmonic oscillator with a hidden symmetry [37], and they have also been used to present Galilei transformation, pseudoconformal transformation and others in a unified manner, see [37]. More recently, they have been used in [40] to show spiral and breathing solutions and solutions with bending for the paraxial wave equation. In this paper, as the second main result, we introduce a family of Schrödinger equations presenting periodic soliton solutions by using multiparameter solutions for Riccati systems. Furthermore, as the third main result, we show that these parameters provide a control on the dynamics of solutions for equations of the form Equation (1). These results should deserve numerical and experimental studies. + +This paper is organized as follows: In Section 2, by means of similarity transformations and using computer algebra systems, we show the existence of Peregrine, bright and dark solitons for the family Equation (1). Thanks to the computer algebra systems, we are able to find an extensive list of integrable VCNLS, in the sense that they can be reduced to the standard integrable NLS, see Table 1. In Section 3, we use different similarity transformations than those used in Section 3. The advantage of the presentation of this section is a multiparameter approach. These parameters provide us a control on the center axis of bright and dark soliton solutions. Again in this section, using Table 2 and by means of computer algebra systems, we show that we can produce a very extensive number of integrable VCNLS allowing soliton-type solutions. A supplementary Mathematica file is provided where it is evident how the variation of the parameters change the dynamics of the soliton solutions. In Section 4, we use a finite difference method to compare analytical solutions described in [41] (using similarity transformations) with numerical approximations for the paraxial wave equation (also known as linear Schrödinger equation with quadratic potential). +---PAGE_BREAK--- + +Table 1. Families of NLS with variable coefficients. + +
#Variable Coefficient NLSSolutions (j=1,2,3)
1t = l0ψxx - bmtm-14l0 + b2x2m4l0x2ψ
-ibtmx - λl0e-btm+1m+1 |ψ|^2ψ
ψj(x,t) = 1√e-btm+1ei(btm4 - l0x2) uj(x,t)
2t = l0ψxx - t-22l0x2ψ
+i14x - λl0|ψ|^2ψ
ψj(x,t) = 1√tei(-btm+14 - l0x2) uj(x,t)
3t = l0ψxx - (c24l0)x2ψ
+icxψx - λl0ect|ψ|^2ψ
ψj(x,t) = 1√ectei(c2b - l0x2) uj(x,t)
4t = l0ψxx - b24l0tkx2ψ
+ibxψx - λl0ebt|ψ|^2ψ
ψj(x,t) = 1√ebktei(c2b-l0x2) uj(x,t)
5t = l0ψxx - atbk4l0+a2atbx2ψ
-iaebtkx-λl0ea-atb|ψ|^2ψ
ψj(x,t) = 1√en-αtei(αt-n-αt)/4-l0x2) uj(x,t)
t=l0ψxx-1/4l0x2ψ
-icoth(t)xψx-λl0cscch(t)|ψ|^2ψ
-itan(t)xψx-λl0cos(t)|ψ|^2ψ
-ibln(t)xψx-λl0t-bt-et|ψ|^2ψ
-tan(t)xψx-λl0cosh(t)|ψ|^2ψ
-λl0csc(t)|ψ|^2ψ
-ian(t)xψx-λl0cscch(t)|ψ|^2ψ
-itan(-t)xψx-λl0csc(t)|ψ|^2ψ
-ian(-t)xψx-λl0cosh(-t)|ψ|^2ψ
-iacosh(-t)xψx-λl0e-asinh(-t)bkt(bt)/b|ψ|^2ψ
-iacosh(bt)xψx-λl0e-asinh(bt)bkt(bt)/b|ψ|^2ψ
+iacos(bt)xψx-λl0e-asin(bt)bkt(bt)/b|ψ|^2ψ
+iacos(bt)
-iasin(bt)xψx+λl0ea/b|ψ|^2ψ
+atanh(bt)xψx-λl0|cosh(bt)|^b/b|ψ|^2ψ
+atanh(bt)(bt)
-atanh(bt)
+atanh(bt)
-ab
+atanh(bt)
+acoth(bt)
-acoth(bt)
-atanh(bt)
+atanh(bt)
+acoth(bt)
-acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
+atanh(bt)
+atanh(bt)
+acoth(bt)
+acoth(bt)
6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,t=l0ψxx+1/4l0x2ψ+icot(-t)xψ+1/4l0x2-iαl0csc(cot(t))|ψ|^2 ψ
-αlαsinc(αsinc(t))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinc(αsinc(αsinc(αsinc(t)))|ψ|^2 ψ
-αlαsinh(cosh(-b))|ω|²
-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at-at>b
a>b
a b c d e f g h i j k l m n o p q r s t u v w x y z z z z z z z z z z z z z z z z z z z z z z z z z z z z z z z z z z z z z z z z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_z_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_y_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy_yy-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-y-yy_yn_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_n_nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnneeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc +# 1 +$$i\psi_t = l_0 \psi_{xx} - \frac{bt^{m-1}}{4l_0} + \frac{b^2 x^m}{4l_0} x^2 \psi - ib t^m x \psi_{x} - \lambda l_0 e^{-bt^{m+1}} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - \frac{b^2}{4l_0} x^2 \psi + i \frac{1}{4} x \psi_{x} - \lambda l_0 t |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - (\frac{c^2}{4} l_0) x^2 \psi + icx \psi_{x} - \lambda l_0 e^{ct} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - \frac{b^2}{4l_0} t^k x^2 \psi + ibx \psi_{x} - \lambda l_0 e^{bt} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - \frac{ab^{bt} + a^2 e^{bkt}}{4l_0} x^2 \psi - ia e^{bt} x \psi_{x} - \lambda l_0 e^{-\frac{ab-tb}{b}} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - \frac{bt^{-1} + b^2 ln^2(t)}{4l_0} x^2 \psi - ib ln(t) x \psi_{x} - \lambda l_0 t^{-bt} e^{bt} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} + \frac{1}{4l_0} x^2 \psi + icot(-t) x \psi_{x} - \lambda l_0 csc(cot(t)) |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} + \frac{1}{4l_0} x^2 \psi - itan(-t) x \psi_{x} - \lambda l_0 cos(t) |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - \frac{bt^{-1} + b^2 ln^2(t)}{4l_0} x^2 \psi - ibln(t) x \psi_{x} - \lambda l_0 t^{-bt} e^{bt} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} + \frac{1}{4l_0} x^2 \psi + icot(-it) x \psi_{x} - \lambda l_0 cscsec(cot(t)) |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} + \frac{1}{4l_0} x^2 \psi - itan(-it) x \psi_{x} - \lambda l_0 sec(t) |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - (\frac{a^2 + absin(h_t) + a^2 sin(h_t)^2}{4l_0}) x^2 \psi - ia cos(h_t) x \psi_{x} - \lambda l_0 e^{-\frac{asin(h_t)}{b}} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - (\frac{a^2 + absin(h_t) - a^2 sin(h_t)^2}{4l_0}) x^2 \psi + iacos(h_t) x \psi_{x} - \lambda l_0 e^{-\frac{asin(h_t)}{b}} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - (\frac{a^2 + abcos(h_t) - a^2 cos(h_t)^2}{4l_0}) x^2 \psi - iasin(h_t) x \psi_{x} + \lambda l_0 e^{-\frac{abcos(h_t)}{b}} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - (\frac{atanh(h_t)(a+b)+ab}{4l_0}) x^2 \psi - itan(h_t) x \psi_{x} - \lambda l_0 |cos(h_t)|^{\frac{b}{b}} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - (\frac{atanh(h_t)(a+b)-ab}{4l_0}) x^2 \psi + atan(h_t) x \psi_{x} - \lambda l_0 |cos(h_t)|^{\frac{b}{b}} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} - (\frac{atanh(h_t)(b-a)-ab}{4l_0}) x^2 \psi - atan(h_t) x \psi_{x} + \lambda l_0 e^{-\frac{atanh(h_t)}{b}} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} + (\frac{atanh^2(h_t)(b-a)-ab}{4l_0}) x^2 \psi - iacoth(h_t) x \psi_{x} - \lambda l_0 |sin(h_t)|^{\frac{b}{b}} |\psi|^2 \psi$$ +$$i\psi_t = l_0 \psi_{xx} + (\frac{atanh^2(h_t)(b-a)-ab}{4l_0}) x^2 \psi - iacoth(h_t) x \psi_{x} + (\frac{atanh^2(h_t)(b-a)-ab}{4l_0}) x^3 \psi$$ +$$i\psi_t = l_0 \psi_{xx} + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \psi - iacot(h_t) x \psi_{x} - (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^4 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x^3 \\ + (\frac{atanh^3(h_t)(b-a)-ab}{4l_0}) x\\# 1 +$$ + + +---PAGE_BREAK--- + +**Table 2.** Riccati equations used to generate the similarity transformations. + +
#Riccati EquationSimilarity Transformation from Table 1
1y'x = axny2 + bmxm-1 - ab2xn+2m1
2(axn + b)y'x = by2 + axn-22
3y'x = axny2 + bxmy + bcxm - ac2xn3
4y'x = axny2 + bxmy + ckxk-1 - bcxm+k - ac2xn+2k1
5xy'x = axny2 + my - ab2xn+2m3
6(axn + bxm + c)y'x = axky2 + βxsy - αb2xk + βbxs4
7y'x = beμxy2 + acecx - a2be(μ+2c)x5
8y'x = aenxy2 + cy - ab2e(μ+2c)x3
9y'x = aecxy2 + bnxn-1 - ab2ex2n1
10y'x = axny2 + bceex - ab2xn2x8
11y'x = axny2 + cy - ab2xn2cx3
12y'x = [a sinh2(cx) - c]y2 - a sinh2(cx) + c - a6
132y'x = [a - b + a cosh(bx)]y2 + a + b - a cosh(bx)7
14y'x = a(ln x)ny2 + bmxm-1 - ab2x2m(ln x)n1
15xy'x = axny2 + b - ab2xn ln2 x8
16y'x = [b + a sin2(bx)]y2 + b - a + a sin2(bx)9
172y'x = [b + a + a cos(bx)]y2 + b - a + a cos(bx)10
18y'x = [b + a cos2(bx)]y2 + b - a + a cos2(bx)10
19y'x = c(arcsin x)ny2 + ay + ab - b2c arctan x n3
20y'x = a(arcsin x)n/2 y2 + βmx m-1 - aβ²x²m (arcsin x)n
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1
1br/>(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)
(g-f)(af+b)
a tanh²(bx)(af+b)+ab
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ 38 + + y'x = fy² - a²f + ab sinh(bx) - a²f sinh²(bx) + + 14 +
+ 39 + + y'x = fy² - a²f + ab sin(bx) + a²f sin²(bx) + + 15 +
+ 40 + + y'x = fy² - a²f + ab cos(bx) + a²f cos²(bx) + + 16 +
+ 41 + + y'x = fy² - a tan²(bx)(af - b) + ab + + 17 +
+ 42 + + y'x = fy² - a cot²(bx)(af - b) + ab + + 18 +
+ +Symmetry **2016**, *8*, 38 +---PAGE_BREAK--- + +**2. Soliton Solutions for VCNLS through Riccati Equations and Similarity Transformations** + +In this section, by means of a similarity transformation introduced in [42], and using computer +algebra systems, we show the existence of Peregrine, bright and dark solitons for the family Equation +(1). Thanks to the computer algebra systems, we are able to find an extensive list of integrable +variable coefficient nonlinear Schrödinger equations (see Table 1). For similar work and applications to +Bose-Einstein condensates, we refer the reader to [1] + +**Lemma 1.** ([42]) Suppose that $h(t) = -l_0\lambda\mu(t)$ with $\lambda \in \mathbb{R}$, $l_0 = \pm 1$ and that $c(t)$, $\alpha(t)$, $\delta(t)$, $\kappa(t)$, $\mu(t)$ and $g(t)$ satisfy the equations: + +$$ +\begin{align} +\alpha(t) &= l_0 \frac{c(t)}{4}, \quad \delta(t) = -l_0 \frac{g(t)}{2}, \quad h(t) = -l_0 \lambda \mu(t), \tag{2} \\ +\kappa(t) &= \kappa(0) - \frac{l_0}{4} \int_0^t g^2(z) dz, \tag{3} \\ +\mu(t) &= \mu(0) \exp \left( \int_0^t (2d(z) - c(z)) dz \right) \mu(0) \neq 0, \tag{4} \\ +g(t) &= g(0) - 2l_0 \exp \left( -\int_0^t c(z) dz \right) \int_0^t \exp \left( \int_0^z c(y) dy \right) f(z) dz. \tag{5} +\end{align} +$$ + +Then, + +$$ +\psi(t,x) = \frac{1}{\sqrt{\mu(t)}} e^{i(\alpha(t)x^2 + \delta(t)x + \kappa(t))} u(t,x) \quad (6) +$$ + +is a solution to the Cauchy problem for the nonautonomous Schrödinger equation + +$$ +i\psi_t - l_0\psi_{xx} - b(t)x^2\psi + ic(t)x\psi_x + id(t)\psi + f(t)x\psi - ig(t)\psi_x - h(t)|\psi|^2\psi = 0, \quad (7) +$$ + +$$ +\psi(0, x) = \psi_0(x), +$$ + +if and only if $u(t,x)$ is a solution of the Cauchy problem for the standard Schrödinger equation + +$$ +iu_t - l_0 u_{xx} + l_0 |\lambda| u^2 = 0, +$$ + +with initial data + +$$ +u(0,x) = \sqrt{\mu(0)}e^{-i(\alpha(0)x^2+\delta(0)x+\kappa(0))}\psi_0(x). \quad (10) +$$ + +Now, we proceed to use Lemma 1 to discuss how we can construct NLS with variable coefficients +equations that can be reduced to the standard NLS and therefore be solved explicitly. We start +recalling that + +$$ +u_1(t, x) = A \exp\left(2iA^2t\right) \left(\frac{3 + 16iA^2t - 16A^4t^2 - 4A^2x^2}{1 + 16A^4t^2 + 4A^2x^2}\right), A \in \mathbb{R} \quad (11) +$$ + +is a solution for ($l_0 = -1$ and $\lambda = -2$) + +$$ +iu_t + u_{xx} + 2|u|^2 u = 0, t, x \in \mathbb{R}. \tag{12} +$$ + +In addition, + +$$ +u_2(\xi, \tau) = A \tanh(A\xi)e^{-2iA^2\tau} \quad (13) +$$ + +is a solution of ($l_0 = -1$ and $\lambda = 2$) + +$$ +iu_{\tau} + u_{\xi\xi} - 2|u|^2 u = 0, \quad (14) +$$ +---PAGE_BREAK--- + +and + +$$ +u_3(\tau, \xi) = \sqrt{v} \operatorname{sech}(\sqrt{v}\xi) \exp(-iv\tau), v > 0 \quad (15) +$$ + +is a solution of ($l_0 = 1$ and $\lambda = -2$), + +$$ +iu_{\tau} - u_{\xi\xi} - 2|u|^2 u = 0. \tag{16} +$$ + +**Example 1.** Consider the NLS: + +$$ +i\psi_t + \psi_{xx} - \frac{c^2}{4} x^2 \psi - icx\psi_x \pm 2e^{ct} |\psi|^2 \psi = 0. \quad (17) +$$ + +Our intention is to construct a similarity transformation from Equation (17) to standard NLS Equation (9) by means of Lemma 1. Using the latter, we obtain + +$$ +b(t) = \frac{c^2}{4}, c(t) = c, \mu(t) = e^{ct}, +$$ + +and + +$$ +\alpha(t) = -\frac{c}{4}, h(t) = \pm 2e^{ct}. +$$ + +Therefore, + +$$ +\psi(x,t) = \frac{e^{-i\frac{x}{c^2}t}}{\sqrt{e^{ct}}} u_j(x,t), j=1,2 +$$ + +is a solution of the form Equation (6), and $u_j(x,t)$ are given by Equations (12) and (13). + +**Example 2.** Consider the NLS: + +$$ +i\psi_t + \psi_{xx} - \frac{1}{2t^2}x^2\psi - i\frac{1}{t}x\psi_x \pm 2t|\psi|^2\psi = 0. \quad (18) +$$ + +By Lemma 1, a Riccati equation associated to the similarity transformation is given by + +$$ +\frac{dc}{dt} + c(t)^2 - 2t^{-2} = 0, \tag{19} +$$ + +and we obtain the functions + +$$ +b(t) = \frac{1}{2t^2}, c(t) = -\frac{1}{t}, \mu(t) = t, +$$ + +$$ +\alpha(t) = -\frac{1}{4t}, h_1(t) = -2t, h_2(t) = 2t. +$$ + +Using $u_j(x,t)$, $j=1$ and $2$, given by Equations (12) and (13), we get the solutions + +$$ +\psi_j(x,t) = \frac{e^{-i\frac{1}{4t}x^2}}{\sqrt{t}} u_i(x,t). \quad (20) +$$ + +Table 1 shows integrable variable coefficient NLS and the corresponding similarity transformation to constant coefficient NLS. Table 2 lists some Riccati equations that can be used to generate these transformations. +---PAGE_BREAK--- + +**Example 3.** If we consider the following family (m and B are parameters) of variable coefficient NLS, + +$$i\psi_t + \psi_{xx} - \frac{Bmt^{m-1} + Bt^{2m}}{4}x^2\psi + iBt^m x\psi_x + \gamma e^{-\frac{Bt^{m+1}}{m+1}}|\psi|^2\psi = 0, \quad (21)$$ + +by means of the Riccati equation + +$$y_t = At^n y^2 + Bmt^{m-1} - AB^2t^{n+2m}, \quad (22)$$ + +and Lemma 1, we can construct soliton-like solutions for Equation (21). For this example, we restrict ourselves to taking $A = -1$ and $n = 0$. Furthermore, taking in Lemma 1 $l_0 = -1$, $\lambda = -2$, $a(t) = 1$, $b(t) = \frac{Bmt^{m-1}+Bt^{2m}}{4}$, $c(t) = Bt^m$, $\mu(t) = e^{-\frac{Bt^{m+1}}{m+1}}$, $h(t) = -2e^{-\frac{Bt^{m+1}}{m+1}}$, and $\alpha(t) = -Bt^m/4$, soliton-like solutions to the Equation (21) are given by + +$$\psi_j(x,t) = e^{i-\frac{B^2 j^m}{4}} e^{\frac{B j^{m+1}}{2(m+1)}} u_j(x,t), \quad (23)$$ + +where using $u_j(x,t)$, $j=1$ and $2$, given by Equations (12) and (15), we get the solutions. It is important to notice that if we consider $B=0$ in Equation (21) we obtain standard NLS models. + +### 3. Riccati Systems with Parameters and Similarity Transformations + +In this section, we use different similarity transformations than those used in Section 2, but they have been presented previously [26,35,39,42]. The advantage of the presentation of this section is a multiparameter approach. These parameters provide us with a control on the center axis of bright and dark soliton solutions. Again in this section, using Table 2, and by means of computer algebra systems, we show that we can produce a very extensive number of integrable VCNLS allowing soliton-type solutions. The transformations will require: + +$$\frac{d\alpha}{dt} + b(t) + 2c(t)\alpha + 4a(t)\alpha^2 = 0, \quad (24)$$ + +$$\frac{d\beta}{dt} + (c(t) + 4a(t)\alpha(t))\beta = 0, \quad (25)$$ + +$$\frac{d\gamma}{dt} + l_0 a(t) \beta^2(t) = 0, l_0 = \pm 1, \quad (26)$$ + +$$\frac{d\delta}{dt} + (c(t) + 4a(t)\alpha(t))\delta = f(t) + 2a(t)g(t), \quad (27)$$ + +$$\frac{d\epsilon}{dt} = (g(t) - 2a(t)\delta(t))\beta(t), \quad (28)$$ + +$$\frac{d\kappa}{dt} = g(t)\delta(t) - a(t)\delta^2(t). \quad (29)$$ + +Considering the standard substitution + +$$\alpha(t) = \frac{1}{4a(t)} \frac{\mu'(t)}{\mu(t)} - \frac{d(t)}{2a(t)}, \quad (30)$$ + +it follows that the Riccati Equation (24) becomes + +$$\mu'' - \tau(t)\mu' + 4\sigma(t)\mu = 0, \quad (31)$$ + +with + +$$\tau(t) = \frac{a'}{a} - 2c + 4d, \sigma(t) = ab - cd + d^2 + \frac{d}{2}\left(\frac{a'}{a} - \frac{d'}{d}\right). \quad (32)$$ +---PAGE_BREAK--- + +We will refer to Equation (31) as the characteristic equation of the Riccati system. Here, $a(t)$, $b(t)$, $c(t)$, $d(t)$, $f(t)$ and $g(t)$ are real value functions depending only on the variable $t$. A solution of the Riccati system Equations (24)–(29) with multiparameters is given by the following expressions (with the respective inclusion of the parameter $l_0$) [26,35,39]: + +$$ \mu(t) = 2\mu(0)\mu_0(t)(\alpha(0) + \gamma_0(t)), \quad (33) $$ + +$$ \alpha(t) = \alpha_0(t) - \frac{\beta_0^2(t)}{4(\alpha(0) + \gamma_0(t))'}, \quad (34) $$ + +$$ \beta(t) = -\frac{\beta(0)\beta_0(t)}{2(\alpha(0) + \gamma_0(t))} = \frac{\beta(0)\mu(0)}{\mu(t)}w(t), \quad (35) $$ + +$$ \gamma(t) = l_0\gamma(0) - \frac{l_0\beta^2(0)}{4(\alpha(0) + \gamma_0(t))}, \quad l_0 = \pm 1, \quad (36) $$ + +$$ \delta(t) = \delta_0(t) - \frac{\beta_0(t)(\delta(0) + \varepsilon_0(t))}{2(\alpha(0) + \gamma_0(t))}, \quad (37) $$ + +$$ \varepsilon(t) = \varepsilon(0) - \frac{\beta(0)(\delta(0) + \varepsilon_0(t))}{2(\alpha(0) + \gamma_0(t))}, \quad (38) $$ + +$$ \kappa(t) = \kappa(0) + \kappa_0(t) - \frac{(\delta(0) + \varepsilon_0(t))^2}{4(\alpha(0) + \gamma_0(t))'}, \quad (39) $$ + +subject to the initial arbitrary conditions $\mu(0), \alpha(0), \beta(0) \neq 0, \gamma(0), \delta(0), \varepsilon(0)$ and $\kappa(0)$. $\alpha_0, \beta_0, \gamma_0, \delta_0, \varepsilon_0$ and $\kappa_0$ are given explicitly by: + +$$ a_0(t) = \frac{1}{4a(t)} \frac{\mu'_0(t)}{\mu_0(t)} - \frac{d(t)}{2a(t)}, \quad (40) $$ + +$$ \beta_0(t) = -\frac{w(t)}{\mu_0(t)}, w(t) = \exp\left(-\int_0^t (c(s) - 2d(s))ds\right), \quad (41) $$ + +$$ \gamma_0(t) = \frac{d(0)}{2a(0)} + \frac{1}{2\mu_1(0)} \frac{\mu_1(t)}{\mu_0(t)}, \quad (42) $$ + +$$ \delta_0(t) = \frac{w(t)}{\mu_0(t)} \int_0^t \left[ \left(f(s) - \frac{d(s)}{a(s)}g(s)\right)\mu_0(s) + \frac{g(s)}{2a(s)}\mu'_0(s) \right] \frac{ds}{w(s)}, \quad (43) $$ + +$$ \begin{aligned} \varepsilon_0(t) = & -\frac{2a(t)w(t)}{\mu'_0(t)}\delta_0(t) + 8 \int_0^t \frac{a(s)\varphi(s)w(s)}{(\mu'_0(s))^2}(\mu_0(s)\delta_0(s))ds \\ & + 2\int_0^t \frac{a(s)w(s)}{\mu'_0(s)}[f(s) - \frac{d(s)}{a(s)}g(s)]ds, \end{aligned} \quad (44) $$ + +$$ \begin{aligned} \kappa_0(t) = & \frac{a(t)\mu_0(t)}{\mu'_0(t)}\delta_0^2(t) - 4\int_0^t \frac{a(s)\varphi(s)}{(\mu'_0(s))^2}(\mu_0(s)\delta_0(s))^2 ds \\ & - 2\int_0^t \frac{a(s)}{\mu'_0(s)}(\mu_0(s)\delta_0(s))[f(s) - \frac{d(s)}{a(s)}g(s)]ds, \end{aligned} \quad (45) $$ + +with $\delta_0(0) = g_0(0)/(2a(0))$, $\varepsilon_0(0) = -\delta_0(0)$, $\kappa_0(0) = 0$. Here, $\mu_0$ and $\mu_1$ represent the fundamental solution of the characteristic equation subject to the initial conditions $\mu_0(0) = 0, \mu'_0(0) = 2a(0) \neq 0$ and $\mu_1(0) \neq 0, \mu'_1(0) = 0$. + +Using the system Equations (34)–(39), in [26], a generalized lens transformation is presented. Next, we recall this result (here we use a slight perturbation introducing the parameter $l_0 = \pm 1$ in order to use Peregrine type soliton solutions): +---PAGE_BREAK--- + +**Lemma 2** ($l_0 = 1$, [26]). Assume that $h(t) = \lambda a(t) \beta^2(t) \mu(t)$ with $\lambda \in \mathbb{R}$. Then, the substitution + +$$ \psi(t,x) = \frac{1}{\sqrt{\mu(t)}} e^{i(\alpha(t)x^2 + \delta(t)x + \kappa(t))} u(\tau, \xi), \quad (46) $$ + +where $\xi = \beta(t)x + \epsilon(t)$ and $\tau = \gamma(t)$, transforms the equation + +$$ i\psi_t = -a(t)\psi_{xx} + b(t)x^2\psi - ic(t)x\psi_x - id(t)\psi - f(t)x\psi + ig(t)\psi_x + h(t)|\psi|^2\psi $$ + +into the standard Schrödinger equation + +$$ iu_{\tau} - l_{0}u_{\xi\xi} + l_{0}\lambda|u|^{2}u = 0, l_{0} = \pm 1, \quad (47) $$ + +as long as $\alpha, \beta, \gamma, \delta, \varepsilon$ and $\kappa$ satisfy the Riccati system Equations (24)–(29) and also Equation (30). + +**Example 4.** Consider the NLS: + +$$ i\psi_t = \psi_{xx} - \frac{x^2}{4}\psi + h(0) \operatorname{sech}(t) |\psi|^2 \psi. \quad (48) $$ + +It has the associated characteristic equation $\mu'' + a\mu = 0$, and, using this, we will obtain the functions: + +$$ \alpha(t) = \frac{\coth(t)}{4} - \frac{1}{2} \operatorname{csch}(t) \operatorname{sech}(t), \quad \delta(t) = -\operatorname{sech}(t), \quad (49) $$ + +$$ \kappa(t) = 1 - \frac{\tanh(t)}{2}, \quad \mu(t) = \cosh(t), \quad (50) $$ + +$$ h(t) = h(0) \operatorname{sech}(t), \quad \beta(t) = \frac{1}{\cosh(t)}, \quad (51) $$ + +$$ \varepsilon(t) = -1 + \tanh(t), \quad \gamma(t) = 1 - \frac{\tanh(t)}{2}. \quad (52) $$ + +Then, we can construct solution of the form + +$$ \psi_j(t,x) = \frac{1}{\sqrt{\mu(t)}} e^{i(\alpha(t)x^2 + \delta(t)x + \kappa(t))} u_j\left(1 - \frac{\tanh(t)}{2}, \frac{x}{\cosh(t)} - 1 + \tanh(t)\right), \quad (53) $$ + +with $u_j, j = 1$ and $2$, given by Equations (12) and (13). + +**Example 5.** Consider the NLS: + +$$ i\psi_t(x,t) = \psi_{xx}(x,t) + \frac{h(0)\beta(0)^2\mu(0)}{1+\alpha(0)2c_2t} |\psi(x,t)|^2 \psi(x,t). $$ + +It has the characteristic equation $\mu'' + a\mu = 0$, and, using this, we will obtain the functions: + +$$ \alpha(t) = \frac{1}{4t} - \frac{1}{2+\alpha(0)4c_2^2t^2}, \quad \delta(t) = \frac{\delta(0)}{1+\alpha(0)2c_2t'} \quad (54) $$ + +$$ \kappa(t) = \kappa(0) - \frac{\delta(0)^2 c_2 t}{2 + 4\alpha(0)c_2 t'}, \quad h(t) = \frac{h(0)\beta(0)^2\mu(0)}{1 + \alpha(0)2c_2 t'}, \quad (55) $$ + +$$ \mu(t) = (1 + \alpha(0)2c_2t)\mu(0), \quad \beta(t) = \frac{\beta(0)}{1 + \alpha(0)2c_2t'} $$ +---PAGE_BREAK--- + +$$ +\gamma(t) = \gamma(0) - \frac{\beta(0)^2 c_2 t}{2 + 4\alpha(0)c_2 t}, \quad \epsilon(t) = \epsilon(0) - \frac{\beta(0)\delta(0)c_2 t}{1 + 2\alpha(0)c_2 t}. +$$ + +Then, we can construct a solution of the form + +$$ +\begin{equation} +\begin{split} +\psi_j(t,x) ={}& \frac{1}{\sqrt{\mu(t)}} e^{i(\alpha(t)x^2 + \delta(t)x + \kappa(t))} \\ +& u_j \left( \gamma(0) - \frac{\beta(0)^2 c_2 t}{2+4\alpha(0)c_2 t'} \frac{\beta(0)x}{1+\alpha(0)2c_2 t} + \epsilon(0) - \frac{\beta(0)\delta(0)c_2 t}{1+2\alpha(0)c_2 t} \right), +\end{split} +\tag{56} +\end{equation} +$$ + +with $u_j, j = 1$ and $2$, Equations (12) and (13). + +Following Table 2 of Riccati equations, we can use Equation (24) and Lemma 2 to construct an extensive list of integrable variable coefficient nonlinear Schrödinger equations. + +**4. Crank-Nicolson Scheme for Linear Schrödinger Equation with Variable Coefficients Depending on Space** + +In addition, in [35], a generalized Melher’s formula for a general linear Schrödinger equation of the one-dimensional generalized harmonic oscillator of the form Equation (1) with $h(t) = 0$ was presented. As a particular case, if $b = \lambda \frac{\omega^2}{2}$; $f = b$, $\omega > 0$, $\lambda \in \{-1, 0, 1\}$, $c = g = 0$, then the evolution operator is given explicitly by the following formula (note—this formula is a consequence of Mehler’s formula for Hermite polynomials): + +$$ +\psi(x,t) = U_V(t)f := \frac{1}{\sqrt{2i\pi\mu_j(t)}} \int_{\mathbb{R}^n} e^{iS_V(x,y,t)} f(y)dy, \quad (57) +$$ + +where + +$$ +S_V(x, y, t) = \frac{1}{\mu_j(t)} \left( \frac{x_j^2 + y_j^2}{2} l_j(t) - x_j y_j \right), +$$ + +$$ +\{\mu_j(t), l_j(t)\} = \begin{cases} i\psi_l = -\Delta\psi + V(x, t)\psi, & (59) \\ 0, & (58) \end{cases} +$$ + +Using Riccati-Ermakov systems in [41], it was shown how computer algebra systems can be used to derive the multiparameter formulas (33)–(45). This multi-parameter study was used also to study solutions for the inhomogeneous paraxial wave equation in a linear and quadratic approximation including oscillating laser beams in a parabolic waveguide, spiral light beams, and more families of propagation-invariant laser modes in weakly varying media. However, the analytical method is restricted to solve Riccati equations exactly as the ones presented in Table 2. In this section, we use a finite differences method to compare analytical solutions described in [41] with numerical approximations. We aim (in future research) to extend numerical schemes to solve more general cases that the analytical method exposed cannot. Particularly, we will pursue to solve equations of the general form: + +using polynomial approximations in two variables for the potential function $V(x, t)$ ($V(x, t) \approx b(t)(x_1^2 + x_2^2) + f(t)x_1 + g(t)x_2 + h(t))$. For this purpose, it is necessary to analyze stability of different methods applied to this equation. +---PAGE_BREAK--- + +We also will be interested in extending this process to nonlinear Schrödinger-type equations with potential terms dependent on time, such as + +$$i\psi_t = -\Delta\psi + V(\mathbf{x}, t)\psi + s|\psi|^2\psi. \quad (60)$$ + +In this section, we show that the Crank-Nicolson scheme seems to be the best method to deal with reconstructing numerically the analytical solutions presented in [41]. + +Numerical methods arise as an alternative when it is difficult to find analytical solutions of the Schrödinger equation. Despite numerical schemes not providing explicit solutions to the problem, they do yield approaches to the real solutions which allow us to obtain some relevant properties of the problem. Most of the simplest and often-used methods are those based on finite differences. + +In this section, the Crank-Nicolson scheme is used for linear Schrödinger equation in the case of coefficients depending only on the space variable because it is absolutely stable and the matrix of the associate system does not vary for each iteration. + +A rectangular mesh $(x_m, t_n)$ is introduced in order to discretize a bounded domain $\Omega \times [0, T]$ in space and time. In addition, $\tau$ and $\mathbf{h}$ represent the size of the time step and the size of space step, respectively. $\mathbf{x}_m$ and $\mathbf{h}$ are in $\mathbb{R}$ if one-dimensional space is considered; otherwise, they are in $\mathbb{R}^2$. + +The discretization is given by the matrix system + +$$\left(I + \frac{i\alpha\tau}{2h^2}\Delta + \frac{i\tau}{2}V(\mathbf{x})\right)\psi^{n+1} = \left(I - \frac{i\alpha\tau}{2h^2}\Delta - \frac{i\tau}{2}V(\mathbf{x})\right)\psi^n, \quad (61)$$ + +where $I$ is the identity matrix, $\Delta$ is the discrete representation of the Laplacian operator in space, and $V(\mathbf{x})$ is the diagonal matrix that represents the operator of the external potential depending on $\mathbf{x}$. + +The paraxial wave equation (also known as harmonic oscillator) + +$$2i\psi_t + \Delta\psi - r^2\psi = 0, \quad (62)$$ + +where $r = x$ for $\mathbf{x} \in \mathbb{R}$ or $r = \sqrt{x_1^2 + x_2^2}$ for $\mathbf{x} \in \mathbb{R}^2$, describes the wave function for a laser beam [40]. + +One solution for this equation can be presented as Hermite-Gaussian modes on a rectangular domain: + +$$ \begin{aligned} \psi_{nm}(\mathbf{x}, t) = & A_{nm} \frac{\exp[i(\kappa_1+\kappa_2)+2i(n+m+1)\gamma]}{\sqrt{2^{n+m}n!m!\pi}} \beta \\ & \times \exp\left[i(\alpha\mathbf{r}^2 + \delta_1x_1 + \delta_2x_2) - (\beta x_1 + \epsilon_1)^2/2 - (\beta x_2 + \epsilon_2)^2/2\right] \\ & \times H_n(\beta x_1 + \epsilon_1)H_m(\beta x_2 + \epsilon_2), \end{aligned} \quad (63) $$ + +where $H_n(x)$ is the n-th order Hermite polynomial in the variable $x$, see [40,41]. + +In addition, some solutions of the paraxial equation may be expressed by means of Laguerre-Gaussian modes in the case of cylindrical domains (see [43]): + +$$ \begin{aligned} \psi_n^m(\mathbf{x}, t) = & A_n^m \sqrt{\frac{n!}{\pi(n+m)!}\beta} \\ & \times \exp\left[i(\alpha\mathbf{r}^2 + \delta_1x_1 + \delta_2x_2 + \kappa_1 + \kappa_2) - (\beta x_1 + \epsilon_1)^2/2 - (\beta x_2 + \epsilon_2)^2/2\right] \\ & \times \exp[i(2n+m+1)\gamma](\beta(x_1 \pm ix_2) + \epsilon_1 \pm i\epsilon_2)^m \\ & \times L_n^m((\beta x_1 + \epsilon_1)^2 + (\beta x_2 + \epsilon_2)^2), \end{aligned} \quad (64) $$ + +with $L_n^m(x)$ being the n-th order Laguerre polynomial with parameter $m$ in the variable $x$. + +$\alpha, \beta, \gamma, \delta_1, \delta_2, \epsilon_1, \epsilon_2, \kappa_1$ and $\kappa_2$ given by Equations (34)-(39) for both Hermite-Gaussian and Laguerre-Gaussian modes. + +Figures 1 and 2 show two examples of solutions of the one-dimensional paraxial equation with $\Omega = [-10, 10]$ and $T = 12$. The step sizes are $\tau = \frac{10}{200}$ and $h = \frac{10}{200}$. +---PAGE_BREAK--- + +**Figure 1.** (a) corresponding approximation for the one-dimensional Hermite-Gaussian beam with $t = 10$. The initial condition is $\sqrt{\frac{2}{3\sqrt{\pi}}}e^{(\frac{3}{2}x)^2/2}$; (b) the exact solution for the one-dimensional Hermite-Gaussian beam with $t = 10$, $A_n = 1$, $\mu_0 = 1$, $\alpha_0 = 0$, $\beta_0 = \frac{4}{9}$, $n_0 = 0$, $\delta_0 = 0$, $\gamma_0 = 0$, $\epsilon_0 = 0$, $\kappa_0 = 0$. + +**Figure 2.** (a) corresponding approximation for the one-dimensional Hermite-Gaussian beam with $t = 10$. The initial condition is $\sqrt{\frac{2}{3\sqrt{\pi}}}e^{(\frac{3}{2}x)^2/2+ix}$; (b) the exact solution for the one-dimensional Hermite-Gaussian beam with $t = 10$, $A_n = 1$, $\mu_0 = 1$, $\alpha_0 = 0$, $\beta_0 = \frac{4}{9}$, $n_0 = 0$, $\delta_0 = 1$, $\gamma_0 = 0$, $\epsilon_0 = 0$, $\kappa_0 = 0$. + +Figure 3 shows four profiles of two-dimensional Hermite-Gaussian beams considering $\Omega = [-6,6] \times [-6,6]$ and $T = 10$. The corresponding step sizes are $\tau = \frac{10}{40}$ and $h = (\frac{12}{48}, \frac{12}{48})$. +---PAGE_BREAK--- + +Figure 3. (Left): corresponding approximations for the two-dimensional Hermite-Gaussian beams with $t = 10$. The initial conditions are (a) $\frac{1}{\sqrt{8\pi}}e^{-(x^2+y^2)}$; (b) $\frac{1}{\sqrt{2\pi}}e^{-(x^2+y^2)x}$; (c) $\sqrt{\frac{2}{\pi}}e^{-(x^2+y^2)xy}$; (d) $\frac{1}{4\sqrt{32\pi}}e^{-(x^2+y^2)}(8x^2-2)(8y^2-2)$. (Right): the exact solutions for the two-dimensional Hermite-Gaussian beams with $t = 10$ and parameters $A_{nm} = \frac{1}{4}$, $a_0 = 0$, $\beta_0 = \sqrt{2}$, $\delta_{0,1} = 1$, $\gamma_{0,1} = 0$, $\epsilon_{0,1} = 0$, $\kappa_{0,1} = 0$. For (a) $n=0$ and $m=0$, for (b) $n=1$ and $m=0$, for (c) $n=1$ and $m=1$, for (d) $n=2$ and $m=2$. +---PAGE_BREAK--- + +Figure 4 shows two profiles of two-dimensional Laguerre-Gaussian beams considering $\Omega = [-6,6] \times [-6,6]$ and $T = 10$. The corresponding step sizes are $\tau = \frac{10}{40}$ and $\mathbf{h} = (\frac{12}{48}, \frac{12}{48})$. + +**Figure 4.** (Left): corresponding approximations for the two-dimensional Laguerre-Gaussian beams with $t = 10$. The initial conditions are (a) $\frac{1}{\sqrt{4\pi}}e^{-(x^2+y^2)}(x+iy)$; (b) $\frac{1}{\sqrt{2\pi}}e^{-(x^2+y^2)}(x+iy)(1-x^2-y^2)$. (Right): the exact solutions for the two-dimensional Laguerre-Gaussian beams with $t = 10$ and parameters $A_n^m = \frac{1}{4}$, $a_0 = 0$, $\beta_0 = \sqrt{2}$, $\delta_{0,1} = 1$, $\gamma_{0,1} = 0$, $\epsilon_{0,1} = 0$, $\kappa_{0,1} = 0$. + +**5. Conclusions** + +Rajendran et al. in [1] used similarity transformations introduced in [28] to show a list of integrable NLS equations with variable coefficients. In this work, we have extended this list, using similarity transformations introduced by Suslov in [26], and presenting a more extensive list of families of integrable nonlinear Schrödinger (NLS) equations with variable coefficients (see Table 1 as a primary list. In both approaches, the Riccati equation plays a fundamental role. The reader can observe that, using computer algebra systems, the parameters (see Equations (33)–(39)) provide a change of the dynamics of the solutions; the Mathematica files are provided as a supplement for the readers. Finally, we have tested numerical approximations for the inhomogeneous paraxial wave equation by the Crank-Nicolson scheme with analytical solutions. These solutions include oscillating laser beams and Laguerre and Gaussian beams. The explicit solutions have been found previously thanks to explicit solutions of Riccati-Ermakov systems [41]. + +**Supplementary Materials:** The following are available online at http://www.mdpi.com/2073-8994/8/5/38/s1, Mathematica supplement file. + +**Acknowledgments:** The authors were partially funded by the Mathematical American Association through NSF (grant DMS-1359016) and NSA (grant DMS-1359016). Also, the authors are thankful for the funding received from the Department of Mathematics and Statistical Sciences and the College of Liberal Arts and Sciences at University of Puerto Rico, Mayagüez. E. S. is funded by the Simons Foundation Grant # 316295 and by the National Science Foundation Grant DMS-1440664. E.S is also thankful for the start up funds and the "Faculty +---PAGE_BREAK--- + +Development Funding Program Award" received from the School of Mathematics and Statistical Sciences and the College of Sciences at University of Texas, Rio Grande Valley. + +**Author Contributions:** The original results presented in this paper are the outcome of a research collaboration started during the Summer 2015 and continuous until Spring 2016. Similarly, the selection of the examples, tables, graphics and extended bibliography is the result of a continuous long interaction between the authors. + +**Conflicts of Interest:** The authors declare no conflict of interest. + +References + +1. Rajendran, S.; Muruganandam, P.; Lakshmanan, M. Bright and dark solitons in a quasi-1D Bose-Einstein condensates modelled by 1D Gross-Pitaevskii equation with time-dependent parameters. *Phys. D Nonlinear Phenom.* **2010**, *239*, 366–386. [CrossRef] + +2. Agrawal, G.-P. *Nonlinear Fiber Optics*, 4th ed.; Academic Press: New York, NY, USA, 2007. + +3. Al Khawaja, U. A comparative analysis of Painlevé, Lax Pair and similarity transformation methods in obtaining the integrability conditions of nonlinear Schrödinger equations. *J. Phys. Math.* **2010**, *51*. [CrossRef] + +4. Brugarino, T.; Sciacca, M. Integrability of an inhomogeneous nonlinear Schrödinger equation in Bose-Einstein condensates and fiber optics. *J. Math. Phys.* **2010**, *51*. [CrossRef] + +5. Chen, H.-M.; Liu, C.S. Solitons in nonuniform media. *Phys. Rev. Lett.* **1976**, *37*, 693–697. [CrossRef] + +6. He, X.G.; Zhao, D.; Li, L.; Luo, H.G. Engineering integrable nonautonomous nonlinear Schrödinger equations. *Phys. Rev. E* **2009**, *79*. [CrossRef] [PubMed] + +7. He, J.; Li, Y. Designable ineigrability of the variable coefficient nonlinear Schrödinger equations. *Stud. Appl. Math.* **2010**, *126*, 1–15. [CrossRef] + +8. He, J.S.; Charalampidis, E.G.; Kevrekidis, P.G.; Frantzeskakis, D.J. Rogue waves in nonlinear Schrödinger models with variable coefficients: Application to Bose-Einstein condensates. *Phys. Lett. A* **2014**, *378*, 577–583. [CrossRef] + +9. Kruglov, V.I.; Peacock, A.C.; Harvey, J.D. Exact solutions of the generalized nonlinear Schrödinger equation with distributed coefficients. *Phys. Rev. E* **2005**, *71*. [CrossRef] [PubMed] + +10. Marikhin, V.G.; Shabat, A.B.; Boiti, M.; Pimpinelli, F. Self-similar solutions of equations of the nonlinear Schrödinger type. *J. Exp. Theor. Phys.* **2000**, *90*, 553–561. [CrossRef] + +11. Ponomarenko, S.A.; Agrawal, G.P. Do Solitonlike self-similar waves exist in nonlinear optical media? *Phys. Rev. Lett.* **2006**, *97*. [CrossRef] [PubMed] + +12. Ponomarenko, S.A.; Agrawal, G.P. Optical similaritons in nonlinear waveguides. *Opt. Lett.* **2007**, *32*, 1659–1661. [CrossRef] [PubMed] + +13. Raghavan, S.; Agrawal, G.P. Spatiotemporal solitons in inhomogeneous nonlinear media. *Opt. Commun.* **2000**, *180*, 377–382. [CrossRef] + +14. Serkin, V.N.; Hasegawa, A. Novel Soliton solutions of the nonlinear Schrödinger Equation model. *Phys. Rev. Lett.* **2000**, *85*. [CrossRef] [PubMed] + +15. Serkin, V.; Matsumoto, M.; Belyaeva, T. Bright and dark solitary nonlinear Bloch waves in dispersion managed fiber systems and soliton lasers. *Opt. Commun.* **2001**, *196*, 159–171. [CrossRef] + +16. Tian, B.; Shan, W.; Zhang, C.; Wei, G.; Gao, Y. Transformations for a generalized variable-coefficient nonlinear Schrödinger model from plasma physics, arterial mechanics and optical fibers with symbolic computation. *Eur. Phys. J. B* **2005**, *47*, 329–332. [CrossRef] + +17. Dai, C.-Q.; Wang, Y.-Y. Infinite generation of soliton-like solutions for complex nonlinear evolution differential equations via the NLSE-based constructive method. *Appl. Math. Comput.* **2014**, *236*, 606–612. [CrossRef] + +18. Wang, M.; Shan, W.-R.; Lü, X.; Xue, Y.-S.; Lin, Z.-Q.; Tian, B. Soliton collision in a general coupled nonlinear Schrödinger system via symbolic computation. *Appl. Math. Comput.* **2013**, *219*, 11258–11264. [CrossRef] + +19. Yu, F.; Yan, Z. New rogue waves and dark-bright soliton solutions for a coupled nonlinear Schrödinger equation with variable coefficients. *Appl. Math. Comput.* **2014**, *233*, 351–358. [CrossRef] + +20. Fibich, G. *The Nonlinear Schrödinger Equation*, Singular Solutions and Optical Collapse; Springer: Berlin/Heidelberg, Germany, 2015. + +21. Kevrekidis, P.G.; Frantzeskakis, D.J.; Carretero-Gonzáles, R. *Emergent Nonlinear Phenomena in Bose-Einstein Condensates: Theory and Experiment*; Springer Series of Atomic, Optical and Plasma Physics; Springer: Berlin/Heidelberg, Germany, 2008; Volume 45. +---PAGE_BREAK--- + +22. Suazo, E.; Suslov, S.-K. Soliton-Like solutions for nonlinear Schrödinger equation with variable quadratic Hamiltonians. *J. Russ. Laser Res.* **2010**, *33*, 63–83. [CrossRef] + +23. Sulem, C.; Sulem, P.L. *The Nonlinear Schrödinger Equation*; Springer: New York, NY, USA, 1999. + +24. Tao, T. Nonlinear dispersive equations: Local and global analysis. In *CBMS Regional Conference Series in Mathematics*; American Mathematical Society: Providence, RI, USA, 2006. + +25. Zakharov, V.-E.; Shabat, A.-B. Exact theory of two-dimensional self-focusing and one-dimensional self-modulation of waves in nonlinear media. *Soviet. Phys. JETP* **1972**, *34*, 62–69. + +26. Suslov, S.-K. On integrability of nonautonomous nonlinear Schrödinger equations. *Proc. Am. Math. Soc.* **2012**, *140*, 3067–3082. [CrossRef] + +27. Talanov, V.I. Focusing of light in cubic media. *JETP Lett.* **1970**, *11*, 199–201. + +28. Perez-Garcia, V.M.; Torres, P.J.; Konotop, V.K. Similarity transformations for nonlinear Schrödinger equations with time-dependent coefficients. *Physica D* **2006**, *221*, 31–36. [CrossRef] + +29. Ablowitz, M.; Hooroka, T. Resonant intrachannel pulse interactions in dispersion-managed transmission systems. *IEEE J. Sel. Top. Quantum Electron.* **2002**, *8*, 603–615. [CrossRef] + +30. Marhic, M.E. Oscillating Hermite-Gaussian wave functions of the harmonic oscillator. *Lett. Nuovo Cim.* **1978**, *22*, 376–378. [CrossRef] + +31. Carles, R. Nonlinear Schrödinger equation with time dependent potential. *Commun. Math. Sci.* **2010**, *9*, 937–964. [CrossRef] + +32. López, R.M.; Suslov, S.K.; Vega-Guzmán, J.M. On a hidden symmetry of quantum harmonic oscillators. *J. Differ. Equ. Appl.* **2013**, *19*, 543–554. [CrossRef] + +33. Aldaya, V.; Cossio, F.; Guerrero, J.; López-Ruiz, F.F. The quantum Arnold transformation. *J. Phys. A Math. Theor.* **2011**, *44*, 1–6. [CrossRef] + +34. Feynman, R.P.; Hibbs, A.R. *Quantum Mechanics and Path Integrals*; McGraw-Hill: New York, NY, USA, 1965. + +35. Cordero-Soto, R.; Lopez, R.M.; Suazo, E.; Suslov, S.K. Propagator of a charged particle with a spin in uniform magnetic and perpendicular electric fields. *Lett. Math. Phys.* **2008**, *84*, 159–178. [CrossRef] + +36. Lanfear, N.; López, R.M.; Suslov, S.K. Exact wave functions for a generalized harmonic oscillators. *J. Russ. Laser Res.* **2011**, *32*, 352–361. [CrossRef] + +37. López, R.M.; Suslov, S.K.; Vega-Guzmán, J.M. Reconstructing the Schrödinger groups. *Phys. Scr.* **2013**, *87*, 1–6. [CrossRef] + +38. Suazo, E.; Suslov, S.K. Cauchy problem for Schrödinger equation with variable quadratic Hamiltonians. *2011*. to be submitted. + +39. Suazo, E. Fundamental Solutions of Some Evolution Equations. Ph.D. Thesis, Arizona State University, Tempe, AZ, USA, September 2009. + +40. Mahalov, A.; Suazo, E.; Suslov, S.K. Spiral laser beams in inhomogeneous media. *Opt. Lett.* **2013**, *38*, 2763–2766. [CrossRef] [PubMed] + +41. Koutschan, C.; Suazo, E.; Suslov, S.K. Fundamental laser modes in paraxial optics: From computer algebra and simulations to experimental observation. *Appl. Phys. B* **2015**, *121*, 315–336. [CrossRef] + +42. Escoriacia, J.; Suazo, E. Blow-up results and soliton solutions for a generalized variable coefficient nonlinear Schrödinger equation. Available online: http://arxiv.org/abs/1605.07554 (accessed on 24 May 2016). + +43. Andrews, L.C.; Phillips, R.L. *Laser Beam Propagation through Random Media*, 2nd ed.; SPIE Press: Bellingham, WA, USA, 2005. + +© 2016 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +# Coherent States of Harmonic and Reversed Harmonic Oscillator + +Alexander Rauh + +Department of Physics, University of Oldenburg, Oldenburg D-26111, Germany; +alexander.rauh@uni-oldenburg.de; Tel.: +49-441-798-3460 + +Academic Editor: Young Suh Kim + +Received: 16 January 2016; Accepted: 3 June 2016; Published: 13 June 2016 + +**Abstract:** A one-dimensional wave function is assumed whose logarithm is a quadratic form in the configuration variable with time-dependent coefficients. This trial function allows for general time-dependent solutions both of the harmonic oscillator (HO) and the reversed harmonic oscillator (RO). For the HO, apart from the standard coherent states, a further class of solutions is derived with a time-dependent width parameter. The width of the corresponding probability density fluctuates, or "breathes" periodically with the oscillator frequency. In the case of the RO, one also obtains normalized wave packets which, however, show diffusion through exponential broadening with time. At the initial time, the integration constants give rise to complete sets of coherent states in the three cases considered. The results are applicable to the quantum mechanics of the Kepler-Coulomb problem when transformed to the model of a four-dimensional harmonic oscillator with a constraint. In the classical limit, as was shown recently, the wave packets of the RO basis generate the hyperbolic Kepler orbits, and, by means of analytic continuation, the elliptic orbits are also obtained quantum mechanically. + +**Keywords:** inverted harmonic oscillator; harmonic trap; Kepler-Coulomb problem; Kustaanheimo-Stiefel transformation + +## 1. Introduction + +Coherent states of the harmonic oscillator (HO) were introduced already at the beginning of wave mechanics [1]. Much later, such states were recognized as being useful as a basis to describe radiation fields [2] and optical correlations [3]. The reversed harmonic oscillator (RO) refers to a model with repulsive harmonic forces, and was discussed in [4] in the context of irreversibility. Recently, in [5], which also communicates historical remarks, the RO was applied to describe nonlinear optical phenomena. As mentioned in [5], the term “inverted harmonic oscillator” (IO) originally refers to a model with negative kinetic and potential energy, as proposed in [6]. Nevertheless, most articles under the headline IO, actually consider the RO model, see, e.g., [7–9]. + +The RO model formally can be obtained by assuming a purely imaginary oscillator frequency. It is then not anymore possible to construct coherent states by means of creation and annihilation operators; for a text book introduction see [10]. In [9], the RO was generalized by the assumption of a time-dependent mass and frequency. The corresponding Schrödinger equation was solved by means of an algebraic method with the aim to describe quantum tunneling. + +In the present study, emphasis is laid on the derivation of complete sets of coherent states both for the HO and the RO model, together with their time evolution. In the case of the HO, in addition to the standard coherent states, a further function set is found with a time-dependent width parameter. Both in the HO and RO case, the integration constants of the time-dependent solutions induce complete function sets which, at time $t = 0$, are isomorphic to the standard coherent states of the HO. +---PAGE_BREAK--- + +In Section 6, an application to the quantum mechanics of the Kepler-Coulomb problem will be briefly discussed. As has first been observed by Fock [11], the underlying four-dimensional rotation symmetry of the non-relativistic Hamiltonian of the hydrogen atom permits the transformation to the problem of four isotropic harmonic oscillators with a constraint; for applications see, e.g., [12–14]. The transformation proceeds conveniently by means of the Kustaanheimo-Stiefel transformation [15]. In [14], the elliptic Kepler orbits were derived in the classical limit on the basis of coherent HO states. By means of coherent RO states, the classical limit for hyperbolic Kepler orbits was achieved in [16,17], whereby the elliptic regime could be obtained by analytic continuation from the hyperbolic side. Recently, by means of the same basis, a first order quantum correction to Kepler’s equation was derived in [18], whereby the smallness parameter was defined by the reciprocal angular momentum in units of $\hbar$. + +As compared to the classical elliptic Kepler orbits, the derivation of hyperbolic orbits from quantum mechanics was accomplished quite recently [16,17]. For this achievement, it was crucial to devise a suitable time-dependent ansatz for the wave function, see (1) below, in order to construct coherent RO states. As it turns out, the wave function (1) contains also the usual coherent HO states, and, unexpectedly, a further set of coherent states, which we call type-II states. The latter are characterized by a time-dependent width parameter and are solutions of the time-dependent Schrödinger equation of the HO. Section 4 contains the derivation. Essentially, the type-II states offer a disposable width parameter which allows us, for instance, to describe arbitrarily narrowly peaked initial states together with their time evolution in a harmonic potential. In this paper, a unified derivation is presented of coherent states of the HO, RO, and type-II HO states. Furthermore, the connection of HO and RO with the quantum mechanics of the Kepler-Coulomb problem is briefly discussed in the context of the derivation of the classical Kepler orbits from quantum mechanics. + +## 2. Introducing a Trial Wave Function + +In order to solve the Schrödinger equation for the harmonic oscillator (HO) and the reversed oscillator (RO), a trial wave function of Gaussian type is assumed as follows + +$$ \psi(x,t) = C_0 \exp \left[ C(t) + B(t)x - \Gamma(t)x^2 \right], \quad x \in \mathbf{R}, \quad \text{Real}(\Gamma) > 0, \qquad (1) $$ + +where $C, B, \Gamma$ are complex functions of time $t$ and $C_0$ the time-independent normalization constant. When the Schrödinger operator $[\mathrm{i}\hbar\partial_t - H]$ is applied to $\psi$ for a Hamiltonian with harmonic potential, then the wave function $\psi$ is reproduced up to a factor which is a quadratic polynomial and must vanish identically in the configuration variable $x$: + +$$ 0 = p_0(t) + p_1(t)x + p_2(t)x^2. \qquad (2) $$ + +The conditions $p_0 = 0$, $p_1 = 0$, and $p_2 = 0$, give rise to three first-order differential equations for the functions $C(t)$, $B(t)$, and $\Gamma(t)$. In the following we examine two cases for the HO: type-I and type-II are characterized by a constant and time-dependent function $\Gamma$, respectively. In the case of the RO, only a time-dependent $\Gamma$ leads to a solution. By a suitable choice of the parameters, the ansatz (1) solves the time-dependent Schrödinger equation both for the HO and the RO Hamiltonian + +$$ H = p^2/(2m) + (m\omega^2/2)x^2 \quad \text{and} \quad H_{\Omega} = p^2/(2m) - (m\Omega^2/2)x^2, \quad \omega, \Omega > 0, $$ + +respectively. + +## 3. Standard (Type-I) Coherent States of the HO + +In the following, the time-dependent solutions are derived, within the trial function scheme, for the Hamiltonian + +$$ H = p^2/(2m) + (m\omega^2/2)x^2 = (\hbar\omega/2) [-\partial_\xi^2 + \zeta^2], \qquad (3) $$ +---PAGE_BREAK--- + +where $\zeta = ax$ is dimensionless with $a^2 = m\omega/\hbar$. For later comparison, we list the standard definition of coherent states from the textbook [10], see Equations (4.72) and (4.75): + +$$|z\rangle = \exp\left[-\frac{1}{2}zz^*\right] \sum_{n=0}^{\infty} \frac{z^n}{\sqrt{n!}} |n\rangle, \quad (4)$$ + +$$\psi_z(\zeta) = \pi^{-1/4} \exp\left[-\frac{1}{2}(zz^* + z^2)\right] \exp\left[-\frac{1}{2}\zeta^2 + \sqrt{2}\zeta z\right], \quad \zeta = ax, \quad a^2 = \frac{m\omega}{\hbar}, \quad (5)$$ + +where $\psi_z(\zeta) = \langle\zeta|z\rangle$, $|n\rangle$ denotes the n-th energy eigenvector, and the star superscript means complex conjugation. The time evolution gives rise to, see [10], + +$$|z,t\rangle = \exp[-i\omega t/2] |z \exp[-i\omega t]\rangle, \quad (6)$$ + +$$\psi_z(\zeta, t) = \exp[-i\omega t/2] \psi_{(z \exp[-i\omega t])}(\zeta). \quad (7)$$ + +The state $|z\rangle$ is minimal with respect to the position-momentum uncertainty product $\Delta x \Delta p$, and there exists the following completeness property, see [3], + +$$\frac{1}{\pi} \int_0^\infty u du \int_0^{2\pi} d\varphi |z\rangle\langle z| = \sum_n |n\rangle\langle n|, \quad z = u \exp[i\varphi]. \quad (8)$$ + +The relation (8) follows immediately from the definition (4). An equivalent statement is: + +$$\frac{1}{\pi} \int_{0}^{\infty} u du \int_{0}^{2\pi} d\varphi \langle \zeta_2 | z \rangle \langle z | \zeta_1 \rangle = \delta(\zeta_2 - \zeta_1), \quad (9)$$ + +which corresponds to the completeness of the energy eigenfunctions of the harmonic oscillator. In Appendix B, we reproduce a proof of (9), which is appropriate, since the proof has to be extended to the modified coherent states in the type-II HO and the RO cases. + +In terms of the scaled variables $\zeta$ and $\tau = t\omega$, the trial ansatz reads: + +$$\psi(\zeta, \tau) = C_0 \exp[c(\tau) + \beta(\tau)\zeta - \gamma(\tau)\zeta^2/2], \quad (10)$$ + +where $c, \beta, \gamma$ are dimensionless functions of $\tau$, and the re-scaling factor of the probability density, $1/\sqrt{\alpha}$, is taken into the normalization constant $C_0$. + +We assume that $\gamma = \gamma_0 = \text{const}$. Then, the polynomial (2) gives rise to the equations: + +$$\gamma_0^2 = 1, \quad i\beta'(\tau) = \beta(\tau), \quad 2ic'(t) = 1 - \beta^2(t), \quad (11)$$ + +which implies that $\gamma_0 = 1$ is fixed. The further solutions emerge easily as: + +$$\beta(\tau) = C_2 \exp[-i\tau], \quad c(\tau) = -i\tau/2 - (C_2^2/4) \exp[-2i\tau] + C_3, \quad (12)$$ + +where $C_2$ and $C_3$ are complex integration constants. A comparison with (5), at $t=0$, suggests to set: + +$$C_2 = \sqrt{2}z, \quad C_3 = -(1/2)zz^*, \quad (13)$$ + +which specifies the functions $\beta$ and $c$ as follows: + +$$\beta(\tau) = \sqrt{2}(z \exp[-i\tau]), \quad c(t) = -i\tau/2 - (1/2)[zz^* + (z \exp[-i\tau])^2]. \quad (14)$$ +---PAGE_BREAK--- + +The normalization integral with respect to $\zeta$ amounts to the condition + +$$C_0^2 \sqrt{\pi} \exp[zz^*] = 1; \qquad (15)$$ + +hence (7) with (5) is reproduced. + +**4. Type-II Solutions of the Harmonic Oscillator** + +With $\gamma$ being a function of time, one obtains the following differential equations with prime denoting the derivative with respect to the scaled time $\tau$: + +$$i\gamma' = \gamma^2 - 1, \quad i\beta' = \gamma\beta; \quad 2i\gamma'c' = \gamma - \beta^2. \qquad (16)$$ + +The solution for $\gamma$ is + +$$\gamma(\tau) = \frac{\exp(2i\tau) - C_1}{\exp(2i\tau) + C_1}, \quad C_1 = \frac{1-\gamma_0}{1+\gamma_0}. \quad \gamma_0 = \gamma(0). \qquad (17)$$ + +Splitting $\gamma$ into its real and imaginary parts, one can write + +$$\begin{aligned} \gamma(\tau) &= \gamma_R + i\gamma_I; & \gamma_R &= (1-C_1^2)N_1^{-1}, & \gamma_I &= 2C_1N_1^{-1}\sin(2\tau), \\ N_1(\tau) &= 1+C_1^2+2C_1\cos(2\tau) = 4(1+\gamma_0)^{-2}[1+(\gamma_0^2-1)\sin^2(\tau)]. & & & \end{aligned} \qquad (18)$$ + +In order that the wave function is square integrable, $\gamma_R$ has to be positive, which implies that + +$$C_1^2 < 1 \text{ or } \gamma_0 > 0. \qquad (19)$$ + +The initial value $\gamma(t=0) = \gamma_0 > 0$ emerges as a disposable parameter. + +The probability density, $P = |\psi(\zeta, \tau)|^2$, is characterized by a width of order of magnitude $d = 1/\sqrt{\gamma_R}$: + +$$d(\tau) = \sqrt{[1 + (\gamma_0^2 - 1) \sin^2(\tau)] / \gamma_0}. \qquad (20)$$ + +Obviously, the width fluctuates, or "breathes", periodically with time. Of course, this is not a breathing mode as observed in systems of confined interacting particles, see [19,20], e.g., + +Integration of the $\beta$ equation leads to + +$$\beta = C_2 \exp(i\tau) [\exp(2i\tau) + C_1]^{-1} = C_2 N_1^{-1} [\exp(-i\tau) + \exp(i\tau) C_1]. \qquad (21)$$ + +Later on, the complex integration constant $C_2 = A_2 + iB_2$ will serve as a state label. The third differential equation of (16) amounts to + +$$c(\tau) = i\tau/2 - C_2^2 [4(\exp(2i\tau) + C_1)]^{-1} - (1/2) \ln \left( \sqrt{\exp(2i\tau) + C_1} \right) + C_3. \qquad (22)$$ + +By reasons explained in Appendix A, we dispose of the integration constant $C_3$ as follows + +$$C_3 = -(1 + \gamma_0)(8\gamma_0)^{-1}(A_2^2 + \gamma_0 B_2^2), \quad C_2 = A_2 + iB_2. \qquad (23)$$ + +In Appendix A, the probability density $P$ is derived in the following form + +$$P(\xi, \tau) = \frac{C_0^2}{\sqrt{N_1}} \exp[-\gamma_R (\xi - \beta_R / \gamma_R)^2], \qquad (24)$$ +---PAGE_BREAK--- + +where the time-dependent functions $\gamma_R$ and $N_1$ are defined through (17) and (18), and $\beta_R$ comes out as + +$$ \beta_R(\tau) = (1/8)(1 + \gamma_0)^{-1} N_1^{-1} [A_2 \cos(\tau) + B_2 \sin(\tau)]. \quad (25) $$ + +The complex integration constant $C_2$ corresponds to the familiar complex quantum number $z$ in the case of the standard coherent states; hence, the real numbers $A_2, B_2$ characterize different states. The normalization constant $C_0$ obeys the following condition, see Appendix A, + +$$ 1 = (1/2) C_0^2 \sqrt{\pi / \gamma_0 (1 + \gamma_0)}. \quad (26) $$ + +## 4.1. Completeness of Type-II States + +Combining the above results, we write the time-dependent wave function as follows: + +$$ \psi(\xi, \tau) = \frac{C_0}{\sqrt{\exp(2i\tau) + C_1}} \exp \left[ C_3 - \frac{C_2^2 (\exp(-2i\tau) + C_1)}{4N_1} + \beta(\tau)\xi - \gamma(\tau)\frac{\xi^2}{2} \right], \quad (27) $$ + +where $\gamma$, $\beta$, and $C_3$ are defined in (18), (21), and (23), respectively. Let us consider $\psi$ at zero time: + +$$ \psi(\xi, 0) = \frac{C_0}{\sqrt{1+C_1}} \exp \left[ C_3 - \frac{C_2^2}{4(1+C_1)} + C_2(1+\gamma_0)\xi/2 - \gamma_0\xi^2/2 \right]. \quad (28) $$ + +In (28), we set $\tilde{\xi} = \xi/\sqrt{\gamma_0}$ to write: + +$$ \psi(\tilde{\xi}, 0) = \frac{C_0 \gamma_0^{-1/4}}{\sqrt{1+C_1}} \exp \left[ C_3 - \frac{C_2^2}{4(1+C_1)} + C_2(1+\gamma_0)/\sqrt{\gamma_0 \tilde{\xi}/2} - \frac{\tilde{\xi}^2}{2} \right]. \quad (29) $$ + +Now we substitute the complex variable $z$ for the integration constant $C_2$ as follows: + +$$ C_2 \frac{1 + \gamma_0}{2\sqrt{\gamma_0}} = \sqrt{2}z \quad (30) $$ + +and obtain: + +$$ \psi(\tilde{\xi}, 0) = \frac{C_0}{\sqrt{1+C_1}} \exp \left[ C_3 - z^2 \frac{\gamma_0}{1+\gamma_0} + \sqrt{2}z\tilde{\xi} - \frac{\tilde{\xi}^2}{2} \right]. \quad (31) $$ + +In $C_3$, given in (23), we make the following replacements which are induced by (30): + +$$ A_2 \rightarrow \kappa(z+z^*), \quad B_2 \rightarrow -i\kappa(z-z^*), \quad \kappa = \frac{\sqrt{2\gamma_0}}{1+\gamma_0}. \quad (32) $$ + +There occur some nice cancelations, and one obtains: + +$$ \psi_z(\tilde{\xi}) = \frac{C_0 \gamma_0^{-1/4}}{\sqrt{1+C_1}} \exp \left[ -\frac{1}{2}(zz^* + z^2) + iD + \sqrt{2}z\tilde{\xi} - \frac{\tilde{\xi}^2}{2} \right], \quad D = \frac{1-\gamma_0}{2(1+\gamma_0)} \operatorname{Im}(z^2). \quad (33) $$ + +Comparison with (5) shows that the wave function (33) has the same structure apart from the purely imaginary phase $iD$. The latter drops out in the completeness proof, see (A15) in Appendix B. As a consequence, the states (33) form a complete set of states with respect to the state label $z$. + +At $\tau=0$, the states (33) differ from the standard coherent states (5) by the state dependent phase $D$, through the variables $\tilde{\zeta}$ and $\tilde{\xi}$ which denote the differently scaled space variable $x$, and also through the different definition of the quantum number $z$, which for simplicity was denoted by the same symbol in (30). Essentially, type-I and type-II states differ by their time evolution and width parameter $\gamma_0$ which is equal to $a^2 = m\omega/\hbar$ and to an arbitrary positive number, respectively. +---PAGE_BREAK--- + +## 4.2. Mean Values and Uncertainty Product + +In the following, we list mean values for the time-dependent states (27) including the position momentum uncertainty product $\Delta_{xp}$. They are periodic in time with the oscillator angular frequency $\omega \equiv 2\pi/T$. The uncertainty product is minimal at the discrete times $t_n = (1/4)nT$, $n = 0, 1, \dots$. For comparison, the traditional coherent states are always minimal [10]. We use the abbreviations $(\Delta_x)^2 = \langle x^2 \rangle - \langle x \rangle^2$ and $(\Delta_v)^2 = \langle v^2 \rangle - \langle v \rangle^2$ for the mean square deviations of position and velocity, respectively. + +$$ \langle x(\tau) \rangle = (1/\alpha)(1 + \gamma_0)(2\gamma_0)^{-1} [A_2 \cos(\tau) + B_2\gamma_0 \sin(\tau)]; \quad (34) $$ + +$$ \langle v(\tau) \rangle = \hbar a(2m\gamma_0)^{-1} [-A_2 \sin(\tau) + \gamma_0 B_2 \cos(\tau)]; \quad (35) $$ + +$$ (\Delta_x)^2 = (4a^2\gamma_0)^{-1} [1 + \gamma_0^2 + (1-\gamma_0^2)\cos(2\tau)]; \quad (36) $$ + +$$ (\Delta_v)^2 = \hbar^2 a^2 (4m^2\gamma_0)^{-1} [1 + \gamma_0^2 + (\gamma_0^2 - 1)\cos(2\tau)]; \quad (37) $$ + +$$ \langle H \rangle = \hbar\omega(8\gamma_0^2)^{-1} \left[ (1+\gamma_0)^2 (A_2^2 + \gamma_0^2 B_2^2) + 2\gamma_0(1+\gamma_0^2) \right]. \quad (38) $$ + +It is noticed that the mean square deviations do not depend on the state label ($A_2, B_2$). The uncertainty product follows immediately from (36) and (37) as + +$$ \Delta_{xp} := (\Delta_x)^2 (\Delta_p)^2 = m^2 (\Delta x)^2 (\Delta v)^2 = \frac{\hbar^2}{16\gamma_0^2} [(1+\gamma_0^2)^2 - (1-\gamma_0^2)^2 \cos^2(2\tau)]. \quad (39) $$ + +In the special case $\gamma_0 = 1$, the product is always minimal. As a matter of fact, $\gamma_0 = 1$ is the type-I case of Section 3. + +By (38), the mean energy does not depend on time and is positive definite, as it must be. The limit to the standard case with $\gamma_0 = 1$, gives the known result + +$$ \langle H \rangle_{\gamma_0=1} = \hbar\omega(zz^* + 1/2). \quad (40) $$ + +and the state with $z=0$ is the ground state of the HO with zero point energy $\hbar\omega/2$. + +# 5. Wave Packet Solutions for the RO + +For convenience, we will keep the same symbols for the trial functions $\gamma(\tau)$, $\beta(\tau)$, and $c(\tau)$. Setting $\omega = i\Omega$ with $\Omega > 0$, implies that $a^2 = -m\Omega/\hbar$. In the coherent state (5), the exponential part, $-z^2/2 = -(m\omega/\hbar)x^2/2$, is then replaced by $(m\Omega/\hbar)x^2/2$, which precludes normalization. + +We introduce $1/a_\Omega$ as the new length parameter and define the dimensionless magnitudes + +$$ z = a_\Omega x, \quad \tau = t\Omega, \quad \text{with } a_\Omega^2 = m\Omega/\hbar. \quad (41) $$ + +The Schrödinger equation, with the ansatz (10), has to be solved for the RO Hamiltonian + +$$ H_{\Omega} = p^2/(2m) - m\Omega^2/2 x^2 = -\hbar\Omega/2 [\partial_{x}^{2} + z^{2}]. \quad (42) $$ + +From (2), the following differential equations result: + +$$ i\gamma'(\tau) = 1 + \gamma^2(\tau), \quad i\beta'(\tau) = \gamma(\tau)\beta(\tau), \quad 2ic'(\tau) = \gamma(\tau) - \beta^2(\tau), \quad (43) $$ +---PAGE_BREAK--- + +where, as compared with the HO case in (16), only the equation for $\gamma$ differs. Beginning with $\gamma$, one successively obtains the following solutions + +$$ \gamma(\tau) = -i \tanh(\tau + iC_1), \quad (44) $$ + +$$ \beta(\tau) = C_2 / \cosh(\tau + i C_1), \quad (45) $$ + +$$ c(\tau) = C_3 - (1/2) \ln(\cosh(\tau + i C_1)) + (i/2) C_2^2 \tanh(\tau + i C_1), \quad (46) $$ + +where $C_1, C_2, C_3$ are integration constants. We assume that + +$$ \gamma_0 \equiv \gamma(0) = \tan(C_1) > 0, \quad 0 < C_1 < \pi/2, \quad (47) $$ + +which implies that + +$$ \cos(C_1) = (1 + \gamma_0^2)^{-1/2}, \quad \sin(C_1) = \gamma_0 (1 + \gamma_0^2)^{-1/2}. \quad (48) $$ + +In order to decompose the functions $c(\tau)$, $\beta(\tau)$, $\gamma(\tau)$ into their real and imaginary parts, we take over the following abbreviations from [16] + +$$ f(\tau) = \cosh(\tau) - i\gamma_0 \sinh(\tau), \quad h(\tau) = [ff^*]^{-1}. \quad (49) $$ + +After the decompositions $\beta = \beta_R + i\beta_I$, $\gamma = \gamma_R + i\gamma_I$, $C_2 = A_2 + iB_2$, we infer from (44) to (46): + +$$ \gamma_R = h(\tau)\gamma_0, \quad \gamma_I = -(h(\tau)/2)(1+\gamma_0^2)\sinh(2\tau); \quad (50) $$ + +$$ \beta_R = h(\tau) \sqrt{1 + \gamma_0^2} [A_2 \cosh(\tau) + \gamma_0 B_2 \sinh(\tau)], $$ + +$$ \beta_I = h(\tau) \sqrt{1 + \gamma_0^2} [B_2 \cosh(\tau) - \gamma_0 A_2 \sinh(\tau)]; \quad (51) $$ + +$$ \exp[c(\tau)] = [\cosh(\tau + i C_1)]^{-1/2} \exp[C_3 - C_2^2 \gamma(\tau)/2]. \quad (52) $$ + +According to (50), $\gamma_R$ is larger zero, which makes the wave function (10) a normalizable wave packet. The probability density reads: + +$$ P(\zeta, \tau) = C_0^2 \exp[c + c^* + 2\beta_R\zeta - \gamma_R\zeta^2]. \quad (53) $$ + +Integration with respect to $\zeta$ leads to the normalization condition + +$$ 1 = C_0^2 \sqrt{\pi/\gamma_R} \exp[c(\tau) + c^*(\tau) + \beta_R^2/\gamma_R]. \quad (54) $$ + +The normalization constant $C_0$ was determined in [16] for real constants $C_2$. With $C_2 = A_2 + iB_2$, we dispose of the integration constant $C_3$ as + +$$ C_3 = -(1/2)(A_2^2/\gamma_0 + B_2^2\gamma_0) \quad (55) $$ + +to obtain in a straightforward manner + +$$ C_0^2 = \sqrt{\pi(\gamma_0^{-1} + \gamma_0)}, \quad (56) $$ + +which is a time independent condition as it must be. + +With the aid of elementary trigonometric manipulations and the normalization constant $C_0$ given in (56), the wave function can be written as follows: + +$$ \psi(\zeta, \tau) = (\gamma_0/\pi)^{1/4} \sqrt{h(\tau)f(\tau)} \exp[C_3 - (1/2)C_2^2\gamma(\tau) + \beta(\tau)\zeta - \gamma(\tau)\zeta^2/2]. \quad (57) $$ +---PAGE_BREAK--- + +5.1. Coherent States of the RO + +As before, let us consider the wave function at time $t = 0$, where in particular $h = f = 1$: + +$$ +\psi(\zeta, 0) \equiv \psi(\zeta, \tau = 0) = (\gamma_0 / \pi)^{1/4} \exp \left[ C_3 - \frac{1}{2} C_2^2 \gamma_0 + C_2 \sqrt{1 + \gamma_0^2 \zeta - \gamma_0 \zeta^2 / 2} \right]. \quad (58) +$$ + +After the re-scaling $\zeta \rightarrow \tilde{\zeta}$ with $\tilde{\zeta} = \sqrt{\gamma_0} \zeta$, one obtains + +$$ +\Psi(\tilde{\zeta}, 0) = \pi^{-1/4} \exp \left[ C_3 - \frac{1}{2} C_2^2 \gamma_0 + C_2 \sqrt{(1+\gamma_0^2)/\gamma_0} \tilde{\zeta} - \frac{\tilde{\zeta}^2}{2} \right]. \quad (59) +$$ + +In view of the standard HO wave function (5), we replace the integration constant $C_2$ by $z$: + +$$ +C_2 \sqrt{(1 + \gamma_0^2) / \gamma_0} = \sqrt{2} z \tag{60} +$$ + +and obtain + +$$ +\Psi_z(\xi) = \pi^{-1/4} \exp \left[ C_3 - \frac{\gamma_0^2 z^2}{(1+\gamma_0^2)} + \sqrt{2} z \xi - \frac{\xi^2}{2} \right]. \quad (61) +$$ + +In $C_3$, given in (55), the relation (60) gives rise to the substitutions + +$$ +A_2 \rightarrow \kappa_1(z+z^*), \quad B_2 \rightarrow -i\kappa_1(z-z^*), \quad \kappa_1 = (1/2)\sqrt{2\gamma_0/(1+\gamma_0^2)}, \qquad (62) +$$ + +and hence to + +$$ +C_3 = [4(1 + \gamma_0^2)]^{-1} [(7\gamma_0^2 - 1)(z^2 + z^*z^*) - 2(1 + \gamma_0^2)zz^*]. \quad (63) +$$ + +After some elementary re-arrangements, one finds + +$$ +\Psi_z(\xi) = \frac{1}{\pi^{1/4}} \exp \left[ -\frac{1}{2}(zz^* + z^2) + iD_1 + \sqrt{2}z\xi - \frac{\xi^2}{2} \right], \quad D_1 = \frac{1-\gamma_0^2}{2(1+\gamma_0^2)} \operatorname{Im}(z^2). \quad (64) +$$ + +Apart from the purely imaginary phase $i D_1$, the wave functions $\Psi_z$ are the same as the standard coherent states (5). Since in the completeness proof the $D_1$ phase drops out, see (A15) in Appendix B, the states $\Psi_z$ form a complete function set. + +5.2. Mean Values + +With the aid of Mathematica [21], we get the following mean values for position x, velocity v, +their mean square deviations (Δx)², (Δv)², and the mean energy ⟨HΩ>: + +$$ +\langle x \rangle = (\alpha_{\Omega})^{-1} \sqrt{1 + \gamma_0^{-2}} [A_2 \cosh(\tau) + \gamma_0 B_2 \sinh(\tau)]; \quad (65) +$$ + +$$ +(\Delta x)^2 = (2a_\Omega^2 \gamma_0)^{-1} [\cosh^2(\tau) + \gamma_0^2 \sinh^2(\tau)]; \quad (66) +$$ + +$$ +\langle v \rangle = (\hbar a_{\Omega}/m) \sqrt{1 + \gamma_0^{-2}} [A_2 \sinh(\tau) + \gamma_0 B_2 \cosh(\tau)]; \quad (67) +$$ + +$$ +(\Delta v)^2 = (\hbar a_{\Omega} / (2m))^2 \gamma_0^{-1} [\gamma_0^2 - 1 + (1 + \gamma_0^2) \cosh(2\tau)]; \quad (68) +$$ + +$$ +\langle H_{\Omega} \rangle = \hbar\Omega(4\gamma_0)^{-1}[\gamma_0^2 - 1 + 2(\gamma_0 + \gamma_0^{-1})(\gamma_0^2 B_2^2 - A_2^2)]. \quad (69) +$$ + +The mean energy does not depend on time, as it must be. With the aid of (62), the mean energy +could also be expressed in terms of the complex state label z. Since $A_2$ and $B_2$ are arbitrary real +---PAGE_BREAK--- + +numbers, the mean energy can have any positive or negative value. From (66) and (68) one infers the +position-momentum uncertainty product $\Delta_{xp}$ as + +$$ +\Delta_{xp}^2(\tau) = \hbar^2 / (8\gamma_0^2) \left[ \cosh^2(\tau) + \gamma_0^2 \sinh(\tau) \right] \left[ \gamma_0^2 - 1 + (1+\gamma_0^2) \cosh(2\tau) \right]. \quad (70) +$$ + +This product obeys the inequality + +$$ +\Delta_{xp}^2(\tau) > \Delta_{xp}^2(0) = \frac{\hbar^2}{4}, \quad \tau > 0. \tag{71} +$$ + +Obviously, the uncertainty product is minimal at $\tau = 0$, which means for the coherent states (64). +By (66), the wave packets broaden exponentially with time. + +**6. Application to the Kepler-Coulomb Problem** + +The connection of the non-relativistic Hamiltonian for the hydrogen atom with the model +of a four-dimensional oscillator is conveniently achieved by means of the Kustaanheimo-Stiefel +transformation [15], which we write as follows [16,22] + +$$ +\begin{align*} +u_1 &= \sqrt{r} \cos(\theta/2) \cos(\varphi - \Phi); & u_2 &= \sqrt{r} \cos(\theta/2) \sin(\varphi - \Phi); \\ +u_3 &= \sqrt{r} \sin(\theta/2) \cos(\Phi); & u_4 &= \sqrt{r} \sin(\theta/2) \sin(\Phi), +\end{align*} +\tag{72} +$$ + +where $r, \theta, \varphi$ are three-dimensional polar coordinates with $r > 0, 0 < \theta < \pi, 0 \le \varphi < 2\pi$, +and $0 \le \Phi < 2\pi$ generates the extension to the fourth dimension. The vector **u** = {$u_1, u_2, u_3, u_4$} +covers the $\mathbf{R}^4$ and the volume elements are related as [16] + +$$ +du_1 du_2 du_3 du_4 = (1/8) r \sin(\theta) dr d\theta d\varphi d\Phi. \quad (73) +$$ + +The stationary Schrödinger equation $H\psi = E\psi$ for the Hamiltonian $H = p^2/(2m) - \lambda/r$ is +transformed into the following form of a four-dimensional harmonic oscillator [14]: + +$$ +H_u \Psi(\mathbf{u}) = \lambda \Psi(\mathbf{u}), \quad H_u = -\frac{\hbar^2}{(8m)} \Delta_u - E \mathbf{u} \cdot \mathbf{u}, \quad \Delta_u = \partial_{u_1}^2 + \dots \partial_{u_4}^2 +\qquad (74) +$$ + +with the constraint + +$$ +\partial_{\Phi} \Psi(\mathbf{u}) = 0. \tag{75} +$$ + +It should be noticed that, by (72), the components $u_i^2$ have the dimension of a length rather than +length square. As a consequence, in the evolution equation $i\hbar\partial_\nu\Psi = H_u\Psi$, the parameter $\sigma$, which has +the dimension time/length, is not the time parameter of the original problem. For negative energies +with $E<0$, four-dimensional coherent oscillator states (of type-I) were used in [14] to show that elliptic +orbits emerge in the classical limit whereby $\sigma$ turns out being proportional to the eccentric anomaly. + +In the spectrum of positive energies (ionized states of the hydrogen atom) with $E > 0$, +coherent states of the RO were constructed in [16] and gave rise to hyperbolic orbits in the classical limit; +by analytic continuation, also the elliptic orbits were derived from the RO states in the classical +limit [17]. In addition, Kepler's equation was obtained by the assumption that time-dependence enters +through the curve parameter $\sigma$ only. Recently [18], based on the coherent RO states, the first order +quantum correction to Kepler's equation could be established for the smallness parameter $\epsilon = \hbar/L$ +where L denotes the orbital angular momentum. + +**7. Conclusions** + +Besides the standard coherent states of the harmonic oscillator (H0), a further solution family of +the time-dependent Schrödinger equation was derived with the following properties: (i) The functions +are normalizable of Gaussian type and contain a disposable width parameter. The latter allows us, +for instance, to use arbitrarily concentrated one-particle states independently of the parameters of +---PAGE_BREAK--- + +a harmonic trap; (ii) The functions are complete and isomorphic to the standard coherent states at time $t=0$; (iii) The states minimize the position-momentum uncertainty product at the discrete times $T_n = n\pi/(2\omega)$, $n=0,1,...$; (iv) The width of the wave packets "breathes" periodically with period $T/2 = \pi/\omega$. (v) There is no diffusion, $T = 2\pi/\omega$ is the recurrence time of the states. + +In the case of the reversed harmonic oscillator (RO), there exists only one family of time-dependent solutions. They share the properties (i) and (ii) of the type-II HO states, and (iii) is fulfilled at time $t=0$, only. There is no recurrence, instead there is diffusion with a broadening which increases exponentially with time. The application to the Kepler-Coulomb problem was briefly discussed. The HO coherent states of type-I and the RO coherent states served as basis to derive, in the classical limit, the elliptic Kepler orbits [14] and the hyperbolic ones [16,17], respectively. + +**Acknowledgments:** The author expresses his gratitude to Jürgen Parisi for his constant encouragement and support. He also profited from his critical reading of the manuscript. + +**Conflicts of Interest:** The author declares no conflict of interest. + +## Appendix. Probability Density for Type-II States + +We have to decompose the functions $\beta(\tau)$ and $c(\tau)$, as given by (21)and (22), into their real and imaginary parts. To this end, we set $C_2 = A_2 + iB_2$ with real constants $A_2$ and $B_2$ and $\beta = \beta_R + i\beta_I$. Using the definitions of $N_1$ and $C_1$ in terms of $\gamma_0$, we obtain + +$$ +\begin{aligned} +\beta_R &= \frac{1 + \gamma_0}{2} \frac{A_2 \cos(\tau) + B_2 \gamma_0 \sin(\tau)}{1 + (\gamma_0^2 - 1) \sin^2(\tau)}, \\ +\beta_I &= \frac{1 + \gamma_0}{2} \frac{B_2 \cos(\tau) - A_2 \gamma_0 \sin(\tau)}{1 + (\gamma_0^2 - 1) \sin^2(\tau)}. +\end{aligned} +\quad (A1) $$ + +In view of the function $c(\tau)$, we make use of the following auxiliary relations + +$$ F_c \equiv -C_2^2 [4 (\exp(2i \tau) + C_1)]^{-1} = F_R + i F_I, $$ + +$$ F_R = \left(1/(4N_1)\right) \left[(B_2^2 - A_2^2)\cos(2\tau) - 2A_2B_2\sin(2\tau) + (B_2^2 - A_2^2)C_1\right], $$ + +$$ F_I = \left(1/(4N_1)\right) \left[(A_2^2 - B_2^2)\sin(2\tau) - 2A_2B_2\cos(2\tau) - 2A_2B_2C_1\right], \quad (A2) $$ + +$$ \exp[c(\tau) + c^{*}(\tau)] = (1/\sqrt{N_1}) \exp[2C_3 + 2F_R], \quad (A3) $$ + +where the integration constant $C_3$ is assumed being real and the star suffix means complex conjugation. The probability density $P$ results from the wave function (10) in the form + +$$ P(\xi, \tau) = \frac{C_0^2}{\sqrt{N_1}} \exp \left[ 2C_3 + 2F_R + 2\beta_R \xi - \gamma_R \xi^2 \right], \quad (A4) $$ + +where $C_0$ is defined through the normalization integral + +$$ 1 = \int_{-\infty}^{\infty} d\xi P(\xi, \tau) = \frac{C_0^2 \sqrt{\pi}}{\sqrt{N_1 \gamma_R}} \exp(G), \quad G = 2C_3 + 2F_R + \frac{\beta_R^2}{\gamma_R}. \quad (A5) $$ + +From the expression of $G$, it is not obvious that $C_0$ is independent of $\tau$ which was assumed in (10). Clearly, since $\Phi := \psi/C_0$ obeys the Schrödinger equation and $H$ is hermitian, one has the property + +$$ \partial_{\tau}\langle\Phi|\Phi\rangle = 0. \quad (A6) $$ + +As a matter of fact, it is straightforward to show that + +$$ 2F_R + \frac{\beta_R^2}{\gamma_R} = [B_2^2(C_1 - 1) - A_2^2(1 + C_1)] [2(C_1^2 - 1)]^{-1} \quad (A7) $$ +---PAGE_BREAK--- + +does not depend on $\tau$. We now dispose of the integration constant $C_3$ such that the exponent $G$ vanishes: + +$$ C_3 = - [B_2^2(C_1-1) - A_2^2(1+C_1)] [4(C_1^2-1)]^{-1} \quad (A8) $$ + +In view of $G=0$, we replace $2C_3 + 2F_R$ by $-\beta_R^2/\gamma_R$, so that + +$$ P(\zeta, \tau) = \frac{C_0^2}{\sqrt{N_1}\gamma_R} \exp[-\gamma_R (\zeta - \beta_R/\gamma_R)^2], \quad (A9) $$ + +which is the result (24). The normalization condition comes out immediately in the form + +$$ 1 = \frac{C_0^2 \sqrt{\pi}}{\sqrt{N_1 \gamma_R}} = \frac{C_0^2 \sqrt{\pi}}{\sqrt{1-C_1^2}} = \frac{C_0^2 \sqrt{\pi}(1+\gamma_0)}{2\sqrt{\gamma_0}}. \quad (A10) $$ + +## Appendix. Proof of Completeness + +In order to prove the completeness of the functions (5), i.e., for the type-I HO case, we take advantage of the following generating function of the Hermite polynomials [23]: + +$$ \exp[2XZ - Z^2] = \sum_{n=0}^{\infty} \frac{Z^n}{n!} H_n(X). \quad (A11) $$ + +In the function (5), we replace $z$ by $\sqrt{2}Z$ to obtain + +$$ \psi_z(\zeta) = \pi^{-1/4} \exp[-ZZ^* - (1/2)\zeta^2] \exp[-Z^2 + 2\zeta Z]. \quad (A12) $$ + +With the aid of (A11), one can write + +$$ \psi_z(\zeta) = \exp[-(1/2)zz^*] \sum_{n=0}^{\infty} \frac{z^n}{\sqrt{n!}} \varphi_n(\zeta), \quad (A13) $$ + +where + +$$ \varphi_n(\zeta) = \frac{1}{\sqrt{n! 2^n \sqrt{\pi}}} H_n(\zeta) \exp[-(1/2)\zeta^2]. \quad (A14) $$ + +By means of (A13) and setting $z = u \exp[i\varphi]$, we obtain + +$$ \langle \zeta_2 | z \rangle \langle z | \zeta_1 \rangle = \exp[-u^2] \sum_{m,n=0}^{\infty} \frac{u^{n+m} \exp[i(m-n)\varphi]}{\sqrt{m!n!}} \varphi_m(\zeta_2) \varphi_n(\zeta_1). \quad (A15) $$ + +In (A15), the $\varphi$ integration projects out the terms $n=m$ with the result + +$$ \frac{1}{\pi} \int_{0}^{\infty} u du \int_{0}^{2\pi} d\varphi \langle \zeta_2 | z \rangle \langle z | \zeta_1 \rangle = 2 \int_{0}^{\infty} u du \exp[-u^2] \sum_{n=0}^{\infty} \frac{u^{2n}}{n!} \varphi_n(\zeta_2) \varphi_n(\zeta_1). \quad (A16) $$ + +After changing the integration variable $u \to v$ with $v = u^2$ with $udu = dv/2$, one uses + +$$ \int_{0}^{\infty} dv \frac{v^n}{n!} \exp[-v] = 1, \quad n = 0, 1, \dots \quad (A17) $$ + +and, in view of the completeness of the Hermite polynomials, arrives at + +$$ \frac{1}{\pi} \int_{0}^{\infty} u du \int_{0}^{2\pi} d\varphi \langle \zeta_2 | z \rangle \langle z | \zeta_1 \rangle = \sum_{n=0}^{\infty} \varphi_n(\zeta_2) \varphi_n(\zeta_1) = \delta(\zeta_2 - \zeta_1). \quad (A18) $$ +---PAGE_BREAK--- + +In the type-II HO and the RO cases, there appear additional purely imaginary phases in the +wave function, which do not depend on $\zeta_1$, $\zeta_2$, and drop out at the step (A15) of the completeness +proof above. + +References + +1. Schrödinger, E. Der stetige Übergang von der Mikro-zur Makromechanik. *Naturwissenschaften* **1926**, *14*, 664-666. + +2. Glauber, R.J. Coherent and incoherent states of the radiation field. *Phys. Rev.* **1963**, *131*, 2766. + +3. Glauber, R.J. Photon Correlations. *Phys. Rev. Lett.* **1963**, *10*, 84. + +4. Antoniou, I.E.; Progogine, I. Intrinsic irreversibility and integrability of dynamics. *Phys. A Stat. Mech. Appl.* **1993**, *192*, 443–464. + +5. Gentilini, S.; Braidotti, M.C.; Marcucci, G.; DelRe, E.; Conti, C. Physical realization of the Glauber quantum oscillator. *Sci. Rep.* **2015**, *5*, 15816. + +6. Glauber, R.J. Amplifiers, attenuators, and schrödinger's cat. *Ann. N. Y. Acad. Sci.* **1986**, *480*, 336–372. + +7. Barton, G. Quantum mechanics of the inverted oscillator potential. *Ann. Phys.* **1986**, *166*, 322–363. + +8. Bhaduri, R.K.; Khare, A.; Reimann, S.M.; Tomisiek, E.L. The riemann zeta function and the inverted harmonic oscillator. *Ann. Phys.* **1997**, *264*, 25–40. + +9. Guo, G.-J.; Ren, Z.-Z.; Ju, G.-X.; Guo, X.-Y. Quantum tunneling effect of a time-dependent inverted harmonic oscillator. *J. Phys. A Math. Theor.* **2011**, *44*, 185301. + +10. Galindo, A.; Pascual, P. *Quantum Mechanics I*; Springer: Berlin, Germany, 1990. + +11. Fock, V.A. Zur Theorie des Wassenstoffatoms. Z. Phys. **1935**, *98*, 145–154. + +12. Chen, A.C. Hydrogen atom as a four-dimensional oscillator. *Phys. Rev.* **A 1980**, *22*, 333–335. + +13. Gracia-Bondia, J.M. Hydrogen atom in the phase-space formulation of quantum mechanics. *Phys. Rev.* **A 1984**, *30*, 691–697. + +14. Gerry, C.C. Coherent states and the Kepler-Coulomb problem. *Phys. Rev.* **A 1986**, *33*, 6–11. + +15. Kustaanheimo, P.; Stiefel, E. Perturbation theory of Kepler motion based on spinor regularization. *J. Reine Angew. Math.* **1965**, *218*, 204–219. + +16. Rauh, A.; Parisi, J. Quantum mechanics of hyperbolic orbits in the Kepler problem. *Phys. Rev.* **A 2011**, *83*, 042101. + +17. Rauh, A.; Parisi, J. Quantum mechanics of Kepler orbits. *Adv. Stud. Theor. Phys.* **2014**, *8*, 889–938. + +18. Rauh, A.; Parisi, J. Quantum mechanical correction to Kepler’s equation. *Adv. Stud. Theor. Phys.* **2016**, *10*, 1–22. + +19. Baletto, F.; Riccardo, F. Structural properties of nanoclusters: Energetic, thermodynamic, and kinetic effects. *Rev. Mod. Phys.* **2015**, *77*, 371–423. + +20. Bauch, S.; Balzer, K.; Bonitz, M. Quantum breathing mode of trapped bosons and fermions at arbitrary coupling. *Phys. Rev. B* **2009**, *80*, 054515. + +21. Wolfram Research, Inc. Mathematica; Version 10.1.0.0; Wolfram Research, Inc.: Champaign, IL, USA, 2015. + +22. Chen, C.; Kibler, M. Connection between the hydrogen atom and the four-dimensional oscillator. *Phys. Rev.* **A 1985**, *31*, 3960–3963. + +23. Gradshteyn, I.S.; Ryzhik, I.M. Table of Integrals, Series, and Products; Academic Press: New York, NY, USA, 1965. + +© 2016 by the author. Licensee MDPI, Basel, Switzerland. This article is an open access article distributed under the terms and conditions of the Creative Commons Attribution (CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +Entangled Harmonic Oscillators and +Space-Time Entanglement + +Sibel Başkal ¹, Young S. Kim ²,* and Marilyn E. Noz ³ + +¹ Department of Physics, Middle East Technical University, 06800 Ankara, Turkey; baskal@newton.physics.metu.edu.tr + +² Center for Fundamental Physics, University of Maryland College Park, College Park, MD 20742, USA + +³ Department of Radiology, New York University School of Medicine, New York, NY 10016, USA; marilyn.noz@med.nyu.edu + +* Correspondence: yskim@umd.edu; Tel.: +1-301-937-6306 + +Academic Editor: Sergei D. Odintsov + +Received: 26 February 2016; Accepted: 20 June 2016; Published: 28 June 2016 + +**Abstract:** The mathematical basis for the Gaussian entanglement is discussed in detail, as well as its implications in the internal space-time structure of relativistic extended particles. It is shown that the Gaussian entanglement shares the same set of mathematical formulas with the harmonic oscillator in the Lorentz-covariant world. It is thus possible to transfer the concept of entanglement to the Lorentz-covariant picture of the bound state, which requires both space and time separations between two constituent particles. These space and time variables become entangled as the bound state moves with a relativistic speed. It is shown also that our inability to measure the time-separation variable leads to an entanglement entropy together with a rise in the temperature of the bound state. As was noted by Paul A. M. Dirac in 1963, the system of two oscillators contains the symmetries of the $O(3,2)$ de Sitter group containing two $O(3,1)$ Lorentz groups as its subgroups. Dirac noted also that the system contains the symmetry of the $Sp(4)$ group, which serves as the basic language for two-mode squeezed states. Since the $Sp(4)$ symmetry contains both rotations and squeezes, one interesting case is the combination of rotation and squeeze, resulting in a shear. While the current literature is mostly on the entanglement based on squeeze along the normal coordinates, the shear transformation is an interesting future possibility. The mathematical issues on this problem are clarified. + +**Keywords:** Gaussian entanglement; two coupled harmonic oscillators; coupled Lorentz groups; space-time separation; Wigner's little groups; $O(3,2)$ group; Dirac's generators for two coupled oscillators + +**PACS:** 03.65.Fd, 03.65.Pm, 03.67.-a, 05.30.-d + +# 1. Introduction + +Entanglement problems deal with fundamental issues in physics. Among them, the Gaussian entanglement is of current interest not only in quantum optics [1–4], but also in other dynamical systems [3,5–8]. The underlying mathematical language for this form of entanglement is that of harmonic oscillators. In this paper, we present first the mathematical tools that are and may be useful in this branch of physics. + +The entangled Gaussian state is based on the formula: + +$$ \frac{1}{\cosh \eta} \sum_k (\tanh \eta)^k \chi_k(x) \chi_k(y) \quad (1) $$ + +where $\chi_n(x)$ is the $n^{th}$ excited-state oscillator wave function. +---PAGE_BREAK--- + +In Chapter 16 of their book [9], Walls and Milburn discussed in detail the role of this formula in the theory of quantum information. Earlier, this formula played the pivotal role for Yuen to formulate his two-photon coherent states or two-mode squeezed states [10]. The same formula was used by Yurke and Patasek in 1987 [11] and by Ekert and Knight [12] for the two-mode squeezed state where one of the photons is not observed. The effect of entanglement is to be seen from the beam splitter experiments [13,14]. + +In this paper, we point out first that the series of Equation (1) can also be written as a squeezed Gaussian form: + +$$ \frac{1}{\sqrt{\pi}} \exp \left\{ -\frac{1}{4} \left[ e^{-2\eta} (x+y)^2 + e^{2\eta} (x-y)^2 \right] \right\} \quad (2) $$ + +which becomes: + +$$ \frac{1}{\sqrt{\pi}} \exp \left\{ -\frac{1}{2} (x^2 + y^2) \right\} \qquad (3) $$ + +when $\eta = 0$. + +We can obtain the squeezed form of Equation (2) by replacing $x$ and $y$ by $x'$ and $y'$, respectively, where: + +$$ \begin{pmatrix} x' \\ y' \end{pmatrix} = \begin{pmatrix} \cosh \eta & -\sinh \eta \\ -\sinh \eta & \cosh \eta \end{pmatrix} \begin{pmatrix} x \\ y \end{pmatrix} \qquad (4) $$ + +If $x$ and $y$ are replaced by $z$ and $t$, Equation (4) becomes the formula for the Lorentz boost along the $z$ direction. Indeed, the Lorentz boost is a squeeze transformation [3,15]. + +The squeezed Gaussian form of Equation (2) plays the key role in studying boosted bound states in the Lorentz-covariant world [16–20], where $z$ and $t$ are the space and time separations between two constituent particles. Since the mathematics of this physical system is the same as the series given in Equation (1), the physical concept of entanglement can be transferred to the Lorentz-covariant bound state, as illustrated in Figure 1. + +**Figure 1.** One mathematics for two branches of physics. Let us look at Equations (1) and (2) applicable to quantum optics and special relativity, respectively. They are the same formula from the Lorentz group with different variables as in the case of the Inductor-Capacitor-Resistor (LCR) circuit and the mechanical oscillator sharing the same second-order differential equation. + +We can approach this problem from the system of two harmonic oscillators. In 1963, Paul A. M. Dirac studied the symmetry of this two-oscillator system and discussed all possible transformations +---PAGE_BREAK--- + +applicable to this oscillator [21]. He concluded that there are ten possible generators of transformations satisfying a closed set of commutation relations. He then noted that this closed set corresponds to the Lie algebra of the $O(3, 2)$ de Sitter group, which is the Lorentz group applicable to three space-like and two time-like dimensions. This $O(3, 2)$ group has two $O(3, 1)$ Lorentz groups as its subgroups. + +We note that the Lorentz group is the language of special relativity, while the harmonic oscillator is one of the major tools for interpreting bound states. Therefore, Dirac's two-oscillator system can serve as a mathematical framework for understanding quantum bound systems in the Lorentz-covariant world. + +Within this formalism, the series given in Equation (1) can be produced from the ten-generator Dirac system. In discussing the oscillator system, the standard procedure is to use the normal coordinates defined as: + +$$u = \frac{x+y}{\sqrt{2}}, \quad \text{and} \quad v = \frac{x-y}{\sqrt{2}} \qquad (5)$$ + +In terms of these variables, the transformation given in Equation (4) takes the form: + +$$\begin{pmatrix} u' \\ v' \end{pmatrix} = \begin{pmatrix} e^{-\eta} & 0 \\ 0 & e^{\eta} \end{pmatrix} \begin{pmatrix} u \\ v \end{pmatrix} \qquad (6)$$ + +where this is a squeeze transformation along the normal coordinates. While the normal-coordinate transformation is a standard procedure, it is interesting to note that it also serves as a Lorentz boost [18]. + +With these preparations, we shall study in Section 2 the system of two oscillators and coordinate transformations of current interest. It is pointed out in Section 3 that there are ten different generators for transformations, including those discussed in Section 2. It is noted that Dirac derived ten generators of transformations applicable to these oscillators, and they satisfy the closed set of commutation relations, which is the same as the Lie algebra of the $O(3, 2)$ de Sitter group containing two Lorentz groups among its subgroups. In Section 4, Dirac's ten-generator symmetry is studied in the Wigner phase-space picture, and it is shown that Dirac's symmetry contains both canonical and Lorentz transformations. + +While the Gaussian entanglement starts from the oscillator wave function in its ground state, we study in Section 5 the entanglements of excited oscillator states. We give a detailed explanation of how the series of Equation (1) can be derived from the squeezed Gaussian function of Equation (2). + +In Section 6, we study in detail how the sheared state can be derived from a squeezed state. It appears to be a rotated squeezed state, but this is not the case. In Section 7, we study what happens when one of the two entangled variables is not observed within the framework of Feynman's rest of the universe [22,23]. + +In Section 8, we note that most of the mathematical formulas in this paper have been used earlier for understanding relativistic extended particles in the Lorentz-covariant harmonic oscillator formalism [20,24–28]. These formulas allow us to transport the concept of entanglement from the current problem of physics to quantum bound states in the Lorentz-covariant world. The time separation between the constituent particles is not observable and is not known in the present form of quantum mechanics. However, this variable effects the real world by entangling itself with the longitudinal variable. + +## 2. Two-Dimensional Harmonic Oscillators + +The Gaussian form: + +$$\left[ \frac{1}{\sqrt{\pi}} \right]^{1/4} \exp \left( -\frac{x^2}{2} \right) \qquad (7)$$ + +is used for many branches of science. For instance, we can construct this function by throwing dice. +---PAGE_BREAK--- + +In physics, this is the wave function for the one-dimensional harmonic oscillator in the ground state. This function is also used for the vacuum state in quantum field theory, as well as the zero-photon state in quantum optics. For excited oscillator states, the wave function takes the form: + +$$ \chi_n(x) = \left[ \frac{1}{\sqrt{\pi} 2^{n} n!} \right]^{1/2} H_n(x) \exp \left( -\frac{x^2}{2} \right) \quad (8) $$ + +where $H_n(x)$ is the Hermite polynomial of the $n$-th degree. The properties of this wave function are well known, and it becomes the Gaussian form of Equation (7) when $n=0$. + +We can now consider the two-dimensional space with the orthogonal coordinate variables x and y and the same wave function with the y variable: + +$$ \chi_m(y) = \left[ \frac{1}{\sqrt{\pi} 2^{m} m!} \right]^{1/2} H_m(y) \exp \left( -\frac{y^2}{2} \right) \quad (9) $$ + +and construct the function: + +$$ \psi^{n,m}(x,y) = [\chi_n(x)] [\chi_m(y)] \quad (10) $$ + +This form is clearly separable in the x and y variables. If *n* and *m* are zero, the wave function becomes: + +$$ \psi^{0,0}(x,y) = \frac{1}{\sqrt{\pi}} \exp \left\{ -\frac{1}{2} (x^2 + y^2) \right\} \quad (11) $$ + +Under the coordinate rotation: + +$$ \begin{pmatrix} x' \\ y' \end{pmatrix} = \begin{pmatrix} \cos \theta & -\sin \theta \\ \sin \theta & \cos \theta \end{pmatrix} \begin{pmatrix} x \\ y \end{pmatrix} \quad (12) $$ + +this function remains separable. This rotation is illustrated in Figure 2. This is a transformation very familiar to us. + +We can next consider the scale transformation of the form: + +$$ \begin{pmatrix} x' \\ y' \end{pmatrix} = \begin{pmatrix} e^\eta & 0 \\ 0 & e^{-\eta} \end{pmatrix} \begin{pmatrix} x \\ y \end{pmatrix} \quad (13) $$ + +This scale transformation is also illustrated in Figure 2. This area-preserving transformation is known as the squeeze. Under this transformation, the Gaussian function is still separable. + +If the direction of the squeeze is rotated by 45°, the transformation becomes the diagonal transformation of Equation (6). Indeed, this is a squeeze in the normal coordinate system. This form of squeeze is most commonly used for squeezed states of light, as well as the subject of entanglements. It is important to note that, in terms of the x and y variables, this transformation can be written as Equation (4) [18]. In 1905, Einstein used this form of squeeze transformation for the longitudinal and time-like variables. This is known as the Lorentz boost. + +In addition, we can consider the transformation of the form: + +$$ \begin{pmatrix} x' \\ y' \end{pmatrix} = \begin{pmatrix} 1 & 2\alpha \\ 0 & 1 \end{pmatrix} \begin{pmatrix} x \\ y \end{pmatrix} \quad (14) $$ + +This transformation shears the system as is shown in Figure 2. + +After the squeeze or shear transformation, the wave function of Equation (10) becomes non-separable, but it can still be written as a series expansion in terms of the oscillator wave functions. It can take the form: + +$$ \psi(x,y) = \sum_{n,m} A_{n,m} \chi_n(x) \chi_m(y) \quad (15) $$ +---PAGE_BREAK--- + +with: + +$$ \sum_{n,m} |A_{n,m}|^2 = 1 $$ + +if $\psi(x, y)$ is normalized, as was the case for the Gaussian function of Equation (11). + +## 2.1. Squeezed Gaussian Function + +Under the squeeze along the normal coordinate, the Gaussian form of Equation (11) becomes: + +$$ \psi_{\eta}(x, y) = \frac{1}{\sqrt{\pi}} \exp \left\{ -\frac{1}{4} \left[ e^{-2\eta}(x+y)^2 + e^{2\eta}(x-y)^2 \right] \right\} \quad (16) $$ + +which was given in Equation (2). This function is not separable in the x and y variables. These variables are now entangled. We obtain this form by replacing, in the Gaussian function of Equation (11), the x and y variables by $x'$ and $y'$, respectively, where: + +$$ x' = (\cosh \eta)x - (\sinh \eta)y, \quad \text{and} \quad y' = (\cosh \eta)y - (\sinh \eta)x \qquad (17) $$ + +This form of squeeze is illustrated in Figure 3, and the expansion of this squeezed Gaussian function becomes the series given in Equation (1) [20,26]. This aspect will be discussed in detail in Section 5. + +**Figure 2.** Transformations in the two-dimensional space. The object can be rotated, squeezed or sheared. In all three cases, the area remains invariant. + +**Figure 3.** Squeeze along the 45 °C direction, discussed most frequently in the literature. +---PAGE_BREAK--- + +In 1976 [10], Yuen discussed two-photon coherent states, often called squeezed states of light. This series expansion served as the starting point for two-mode squeezed states. More recently, in 2003, Giedke et al. [1] used this formula to formulate the concept of the Gaussian entanglement. + +There is another way to derive the series. For the harmonic oscillator wave functions, there are step-down and step-up operators [17]. These are defined as: + +$$a = \frac{1}{\sqrt{2}} \left( x + \frac{\partial}{\partial x} \right), \quad \text{and} \quad a^{\dagger} = \frac{1}{\sqrt{2}} \left( x - \frac{\partial}{\partial x} \right) \qquad (18)$$ + +If they are applied to the oscillator wave function, we have: + +$$a \chi_n(x) = \sqrt{n} \chi_{n-1}(x), \quad \text{and} \quad a^{\dagger} \chi_n(x) = \sqrt{n+1} \chi_{n+1}(x) \qquad (19)$$ + +Likewise, we can introduce $b$ and $b^\dagger$ operators applicable to $\chi_n(y)$: + +$$b = \frac{1}{\sqrt{2}} \left( y + \frac{\partial}{\partial y} \right), \quad \text{and} \quad b^{\dagger} = \frac{1}{\sqrt{2}} \left( y - \frac{\partial}{\partial y} \right) \qquad (20)$$ + +Thus + +$$\begin{aligned} \left(a^{\dagger}\right)^{n} \psi^{0}(x) &= \sqrt{n!} \chi_{n}(x) \\ \left(b^{\dagger}\right)^{n} \psi^{0}(y) &= \sqrt{n!} \chi_{n}(y) \end{aligned} \qquad (21)$$ + +and: + +$$a \chi_0(x) = b \chi_0(y) = 0 \qquad (22)$$ + +In terms of these variables, the transformation leading the Gaussian function of Equation (11) to its squeezed form of Equation (16) can be written as: + +$$\exp\left\{\frac{\eta}{2}(a^{\dagger}b^{\dagger} - ab)\right\} \qquad (23)$$ + +which can also be written as: + +$$\exp\left\{-\eta\left(x\frac{\partial}{\partial y} + y\frac{\partial}{\partial x}\right)\right\} \qquad (24)$$ + +Next, we can consider the exponential form: + +$$\exp\left\{(\tanh \eta)a^{\dagger}b^{\dagger}\right\} \qquad (25)$$ + +which can be expanded as: + +$$\sum_{n} \frac{1}{n!} (\tanh \eta)^n (a^{\dagger} b^{\dagger})^n \qquad (26)$$ + +If this operator is applied to the ground state of Equation (11), the result is: + +$$\sum_{n} (\tanh \eta)^n \chi_n(x) \chi_n(y) \qquad (27)$$ + +This form is not normalized, while the series of Equation (11) is. What is the origin of this difference? + +There is a similar problem with the one-photon coherent state [29,30]. There, the series comes from the expansion of the exponential form: + +$$\exp\{aa^{\dagger}\} \qquad (28)$$ +---PAGE_BREAK--- + +which can be expanded to: + +$$ \sum_n \frac{1}{n!} a^n (a^\dagger)^n \qquad (29) $$ + +However, this operator is not unitary. In order to make this series unitary, we consider the exponential form: + +$$ \exp (\alpha a^{\dagger} - \alpha^* a) \qquad (30) $$ + +which is unitary. This expression can then be written as: + +$$ e^{-\alpha a^*/2} [\exp(\alpha a^{\dagger})] [\exp(\alpha^* a)] \qquad (31) $$ + +according to the Baker–Campbell–Hausdorff (BCH) relation [31,32]. If this is applied to the ground state, the last bracket can be dropped, and the result is: + +$$ e^{-\alpha a^*/2} \exp[\alpha a^{\dagger}] \qquad (32) $$ + +which is the unitary operator with the normalization constant: + +$$ e^{-\alpha a^*/2} $$ + +Likewise, we can conclude that the series of Equation (27) is different from that of Equation (1) due to the difference between the unitary operator of Equation (23) and the non-unitary operator of Equation (25). It may be possible to derive the normalization factor using the BCH formula, but it seems to be intractable at this time. The best way to resolve this problem is to present the exact calculation of the unitary operator leading to the normalized series of Equation (11). We shall return to this problem in Section 5, where squeezed excited states are studied. + +## 2.2. Sheared Gaussian Function + +In addition, there is a transformation called "shear," where only one of the two coordinates is translated, as shown in Figure 2. This transformation takes the form: + +$$ \begin{pmatrix} x' \\ y' \end{pmatrix} = \begin{pmatrix} 1 & 2\alpha \\ 0 & 1 \end{pmatrix} \begin{pmatrix} x \\ y \end{pmatrix} \qquad (33) $$ + +which leads to: + +$$ \begin{pmatrix} x' \\ y' \end{pmatrix} = \begin{pmatrix} x + 2\alpha y \\ y \end{pmatrix} \qquad (34) $$ + +This shear is one of the basic transformations in engineering sciences. In physics, this transformation plays the key role in understanding the internal space-time symmetry of massless particles [33–35]. This matrix plays the pivotal role during the transition from the oscillator mode to the damping mode in classical damped harmonic oscillators [36,37]. + +Under this transformation, the Gaussian form becomes: + +$$ \psi_{shr}(x,y) = \frac{1}{\sqrt{\pi}} \exp \left\{ -\frac{1}{2} \left[ (x - 2\alpha y)^2 + y^2 \right] \right\} \qquad (35) $$ + +It is possible to expand this into a series of the form of Equation (15) [38]. + +The transformation applicable to the Gaussian form of Equation (11) is: + +$$ \exp(-2\alpha y \frac{\partial}{\partial x}) \qquad (36) $$ +---PAGE_BREAK--- + +and the generator is: + +$$ -iy \frac{\partial}{\partial x} \tag{37} $$ + +It is of interest to see where this generator stands among the ten generators of Dirac. + +However, the most pressing problem is whether the sheared Gaussian form can be regarded as a rotated squeezed state. The basic mathematical issue is that the shear matrix of Equation (33) is triangular and cannot be diagonalized. Therefore, it cannot be a squeezed state. Yet, the Gaussian form of Equation (35) appears to be a rotated squeezed state, while not along the normal coordinates. We shall look at this problem in detail in Section 6. + +### 3. Dirac's Entangled Oscillators + +Paul A. M. Dirac devoted much of his life-long efforts to the task of making quantum mechanics compatible with special relativity. Harmonic oscillators serve as an instrument for illustrating quantum mechanics, while special relativity is the physics of the Lorentz group. Thus, Dirac attempted to construct a representation of the Lorentz group using harmonic oscillator wave functions [17,21]. + +In his 1963 paper [21], Dirac started from the two-dimensional oscillator whose wave function takes the Gaussian form given in Equation (11). He then considered unitary transformations applicable to this ground-state wave function. He noted that they can be generated by the following ten Hermitian operators: + +$$ L_1 = \frac{1}{2} (a^\dagger b + b^\dagger a), \quad L_2 = \frac{1}{2i} (a^\dagger b - b^\dagger a) $$ + +$$ L_3 = \frac{1}{2} (a^\dagger a - b^\dagger b), \quad S_3 = \frac{1}{2} (a^\dagger a + b b^\dagger) $$ + +$$ K_1 = -\frac{1}{4} (a^\dagger a^\dagger + aa - b^\dagger b^\dagger - bb) $$ + +$$ K_2 = \frac{i}{4} (a^\dagger a^\dagger - aa + b^\dagger b^\dagger - bb) $$ + +$$ K_3 = \frac{1}{2} (a^\dagger b^\dagger + ab) $$ + +$$ Q_1 = -\frac{i}{4} (a^\dagger a^\dagger - aa - b^\dagger b^\dagger + bb) $$ + +$$ Q_2 = -\frac{1}{4} (a^\dagger a^\dagger + aa + b^\dagger b^\dagger + bb) $$ + +$$ Q_3 = \frac{i}{2} (a^\dagger b^\dagger - ab) \tag{38} $$ + +He then noted that these operators satisfy the following set of commutation relations. + +$$ [L_i, L_j] = i\epsilon_{ijk}L_k, \quad [L_i, K_j] = i\epsilon_{ijk}K_k, \quad [L_i, Q_j] = i\epsilon_{ijk}Q_k $$ + +$$ [K_i, K_j] = [Q_i, Q_j] = -i\epsilon_{ijk}L_k, \quad [L_i, S_3] = 0 $$ + +$$ [K_i, Q_j] = -i\delta_{ij}S_3, \quad [K_i, S_3] = -iQ_i, \quad [Q_i, S_3] = iK_i \tag{39} $$ + +Dirac then determined that these commutation relations constitute the Lie algebra for the $O(3,2)$ de Sitter group with ten generators. This de Sitter group is the Lorentz group applicable to three space +---PAGE_BREAK--- + +coordinates and two time coordinates. Let us use the notation (x, y, z, t, s), with (x, y, z) as the space +coordinates and (t, s) as two time coordinates. Then, the rotation around the z axis is generated by: + +$$ +L_3 = \begin{pmatrix} 0 & -i & 0 & 0 \\ i & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \end{pmatrix} \tag{40} +$$ + +The generators $L_1$ and $L_2$ can also be constructed. The $K_3$ and $Q_3$ generators will take the form: + +$$ +K_3 = \begin{pmatrix} 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & i & 0 \\ 0 & 0 & i & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 \end{pmatrix}, \quad Q_3 = \begin{pmatrix} 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & i \\ 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & i & 0 & 0 \end{pmatrix} \tag{41} +$$ + +From these two matrices, the generators $K_1, K_2, Q_1, Q_2$ can be constructed. The generator $S_3$ can be written as: + +$$ +S_3 = \begin{pmatrix} 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & -i & 0 \\ 0 & 0 & i & 0 & 0 \end{pmatrix} \quad (42) +$$ + +The last five-by-five matrix generates rotations in the two-dimensional space of (t, s). If we introduce +these two time variables, the O(3,2) group leads to two coupled Lorentz groups. The particle mass is +invariant under Lorentz transformations. Thus, one Lorentz group cannot change the particle mass. +However, with two coupled Lorentz groups, we can describe the world with variable masses, such as +the neutrino oscillations. + +In Section 2, we used the operators $Q_3$ and $K_3$ as the generators for the squeezed Gaussian +function. For the unitary transformation of Equation (23), we used: + +$$ +\exp(-i\eta Q_3) \tag{43} +$$ + +However, the exponential form of Equation (25) can be written as: + +$$ +\exp\{-i(\tanh \eta)(Q_3 + iK_3)\} \qquad (44) +$$ + +which is not unitary, as was seen before. + +From the space-time point of view, both $K_3$ and $Q_3$ generate Lorentz boosts along the z direction, +with the time variables $t$ and $s$, respectively. The fact that the squeeze and Lorentz transformations +share the same mathematical formula is well known. However, the non-unitary operator $iK_3$ does not +seem to have a space-time interpretation. + +As for the sheared state, the generator can be written as: + +$$ +Q_3 - L_2 \tag{45} +$$ + +leading to the expression given in Equation (37). This is a Hermitian operator leading to the unitary +transformation of Equation (36). +---PAGE_BREAK--- + +**4. Entangled Oscillators in the Phase-Space Picture** + +Also in his 1963 paper, Dirac states that the Lie algebra of Equation (39) can serve as the four-dimensional symplectic group $Sp(4)$. This group allows us to study squeezed or entangled states in terms of the four-dimensional phase space consisting of two position and two momentum variables [15,39,40]. + +In order to study the $Sp(4)$ contents of the coupled oscillator system, let us introduce the Wigner function defined as [41]: + +$$ +\begin{aligned} +W(x,y;p,q) = & \left(\frac{1}{\pi}\right)^2 \int \exp\{-2i(px' + qy')\} \\ +& \times \psi^*(x+x',y+y')\psi(x-x',y-y')dx'dy' +\end{aligned} +\quad (46) +$$ + +If the wave function $\psi(x, y)$ is the Gaussian form of Equation (11), the Wigner function becomes: + +$$ W(x,y:p,q) = \left(\frac{1}{\pi}\right)^2 \exp\left\{-\left(x^2 + p^2 + y^2 + q^2\right)\right\} \quad (47) $$ + +The Wigner function is defined over the four-dimensional phase space of $(x, p, y, q)$ just as in the case of classical mechanics. The unitary transformations generated by the operators of Equation (38) are translated into Wigner transformations [39,40,42]. As in the case of Dirac's oscillators, there are ten corresponding generators applicable to the Wigner function. They are: + +$$ +\begin{aligned} +L_1 &= +\frac{i}{2} \left\{ \left( x \frac{\partial}{\partial q} - q \frac{\partial}{\partial x} \right) + \left( y \frac{\partial}{\partial p} - p \frac{\partial}{\partial y} \right) \right\} \\ +L_2 &= -\frac{i}{2} \left\{ \left( x \frac{\partial}{\partial y} - y \frac{\partial}{\partial x} \right) + \left( p \frac{\partial}{\partial q} - q \frac{\partial}{\partial p} \right) \right\} \\ +L_3 &= +\frac{i}{2} \left\{ \left( x \frac{\partial}{\partial p} - p \frac{\partial}{\partial x} \right) - \left( y \frac{\partial}{\partial q} - q \frac{\partial}{\partial y} \right) \right\} \\ +S_3 &= -\frac{i}{2} \left\{ \left( x \frac{\partial}{\partial p} - p \frac{\partial}{\partial x} \right) + \left( y \frac{\partial}{\partial q} - q \frac{\partial}{\partial y} \right) \right\} +\end{aligned} +\quad (48) +$$ + +and: + +$$ +\begin{aligned} +K_1 &= -\frac{i}{2} \left\{ \left( x \frac{\partial}{\partial p} + p \frac{\partial}{\partial x} \right) - \left( y \frac{\partial}{\partial q} + q \frac{\partial}{\partial y} \right) \right\} \\ +K_2 &= -\frac{i}{2} \left\{ \left( x \frac{\partial}{\partial x} + y \frac{\partial}{\partial y} \right) - \left( p \frac{\partial}{\partial p} + q \frac{\partial}{\partial q} \right) \right\} \\ +K_3 &= +\frac{i}{2} \left\{ \left( x \frac{\partial}{\partial q} + q \frac{\partial}{\partial x} \right) + \left( y \frac{\partial}{\partial p} + p \frac{\partial}{\partial y} \right) \right\} \\ +Q_1 &= +\frac{i}{2} \left\{ \left( x \frac{\partial}{\partial x} + q \frac{\partial}{\partial q} \right) - \left( y \frac{\partial}{\partial y} + p \frac{\partial}{\partial p} \right) \right\} \\ +Q_2 &= -\frac{i}{2} \left\{ \left( x \frac{\partial}{\partial p} + p \frac{\partial}{\partial x} \right) + \left( y \frac{\partial}{\partial q} + q \frac{\partial}{\partial y} \right) \right\} \\ +Q_3 &= -\frac{i}{2} \left\{ \left( y \frac{\partial}{\partial x} + x \frac{\partial}{\partial y} \right) - \left( q \frac{\partial}{\partial p} + p \frac{\partial}{\partial q} \right) \right\} +\end{aligned} +\quad (49) +$$ +---PAGE_BREAK--- + +These generators also satisfy the Lie algebra given in Equations (38) and (39). Transformations generated by these generators have been discussed in the literature [15,40,42]. + +As in the case of Section 3, we are interested in the generators $Q_3$ and $K_3$. The transformation generated by $Q_3$ takes the form: + +$$ \left[ \exp \left\{ \eta \left( x \frac{\partial}{\partial y} + y \frac{\partial}{\partial x} \right) \right\} \right] \left[ \exp \left\{ -\eta \left( p \frac{\partial}{\partial q} + q \frac{\partial}{\partial p} \right) \right\} \right] \quad (50) $$ + +This exponential form squeezes the Wigner function of Equation (47) in the *x* *y* space, as well as in their corresponding momentum space. However, in the momentum space, the squeeze is in the opposite direction, as illustrated in Figure 4. This is what we expect from canonical transformation in classical mechanics. Indeed, this corresponds to the unitary transformation, which played the major role in Section 2. + +**Figure 4.** Transformations generated by $Q_3$ and $K_3$. As the parameter $\eta$ becomes larger, both the space and momentum distribution becomes larger. + +Even though shown insignificant in Section 2, $K_3$ had a definite physical interpretation in Section 3. The transformation generated by $K_3$ takes the form: + +$$ \left[ \exp \left\{ \eta \left( x \frac{\partial}{\partial q} + q \frac{\partial}{\partial x} \right) \right\} \right] \left[ \exp \left\{ \eta \left( y \frac{\partial}{\partial p} + p \frac{\partial}{\partial y} \right) \right\} \right] \quad (51) $$ + +This performs the squeeze in the *x* *q* and *y* *p* spaces. In this case, the squeezes have the same sign, and the rate of increase is the same in all directions. We can thus have the same picture of squeeze for both *x* *y* and *p* *q* spaces, as illustrated in Figure 4. This parallel transformation corresponds to the Lorentz squeeze [20,25]. + +As for the sheared state, the combination: + +$$ Q_3 - L_2 = -i \left( y \frac{\partial}{\partial x} + q \frac{\partial}{\partial p} \right) \quad (52) $$ + +generates the same shear in the *p* *q* space. +---PAGE_BREAK--- + +**5. Entangled Excited States** + +In Section 2, we discussed the entangled ground state and noted that the entangled state of Equation (1) is a series expansion of the squeezed Gaussian function. In this section, we are interested in what happens when we squeeze an excited oscillator state starting from: + +$$ \chi_n(x)\chi_m(y) \tag{53} $$ + +In order to entangle this state, we should replace $x$ and $y$, respectively, by $x'$ and $y'$ given in Equation (17). + +The question is how the oscillator wave function is squeezed after this operation. Let us note first that the wave function of Equation (53) satisfies the equation: + +$$ \frac{1}{2} \left\{ \left( x^2 - \frac{\partial^2}{\partial x^2} \right) - \left( y^2 - \frac{\partial^2}{\partial y^2} \right) \right\} \chi_n(x) \chi_m(y) = (n-m) \chi_n(x) \chi_m(y) \tag{54} $$ + +This equation is invariant under the squeeze transformation of Equation (17), and thus, the eigenvalue $(n-m)$ remains invariant. Unlike the usual two-oscillator system, the $x$ component and the $y$ component have opposite signs. This is the reason why the overall equation is squeeze-invariant [3,25,43]. + +We then have to write this squeezed oscillator in the series form of Equation (15). The most interesting case is of course for $m=n=0$, which leads to the Gaussian entangled state given in Equation (16). Another interesting case is for $m=0$, while $n$ is allowed to take all integer values. This single-excitation system has applications in the covariant oscillator formalism where no time-like excitations are allowed. The Gaussian entangled state is a special case of this single-excited oscillator system. + +The most general case is for nonzero integers for both $n$ and $m$. The calculation for this case is available in the literature [20,44]. Seeing no immediate physical applications of this case, we shall not reproduce this calculation in this section. + +For the single-excitation system, we write the starting wave function as: + +$$ \chi_n(x)\chi_0(y) = \left[ \frac{1}{\pi 2^n n!} \right]^{1/2} H_n(x) \exp \left\{ -\left( \frac{x^2 + y^2}{2} \right) \right\} \tag{55} $$ + +There are no excitations along the $y$ coordinate. In order to squeeze this function, our plan is to replace $x$ and $y$ by $x'$ and $y'$, respectively, and write $\chi_n(x')\chi_0(y')$ as a series in the form: + +$$ \chi_n(x')\chi_0(y') = \sum_{k',k} A_{k',k}(n)\chi_{k'}(x)\chi_k(y) \tag{56} $$ + +Since $k' - k = n$ or $k' = n + k$, according to the eigenvalue of the differential equation given in Equation (54), we write this series as: + +$$ \chi_n(x')\chi_0(y') = \sum_{k',k} A_k(n)\chi_{(k+n)}(x)\chi_k(y) \tag{57} $$ + +with: + +$$ \sum_k |A_k(n)|^2 = 1 \tag{58} $$ + +This coefficient is: + +$$ A_k(n) = \int \chi_{k+n}(x)\chi_k(y)\chi_n(x')\chi_0(y') dx dy \tag{59} $$ + +This calculation was given in the literature in a fragmentary way in connection with a Lorentz-covariant description of extended particles starting from Ruiz's 1974 paper [45], subsequently by Kim et al. in +---PAGE_BREAK--- + +1979 [26] and by Rotbart in 1981 [44]. In view of the recent developments of physics, it seems necessary +to give one coherent calculation of the coefficient of Equation (59). + +We are now interested in the squeezed oscillator function: + +$$ +\begin{equation} +\begin{aligned} +A_k(n) = & \left[ \frac{1}{\pi^2 2^n n! (k+n)^2 (n+k)! k^{2k}!} \right]^{1/2} \\ +& \times \int H_{n+k}(x) H_k(y) H_n(x') \exp \left\{ -\left( \frac{x^2 + y^2 + x'^2 + y'^2}{2} \right) \right\} dx dy +\end{aligned} +\tag{60} +\end{equation} +$$ + +As was noted by Ruiz [45], the key to the evaluation of this integral is to introduce the generating +function for the Hermite polynomials [46,47]: + +$$ +G(r,z) = \exp(-r^2 + 2rz) = \sum_m \frac{r^m}{m!} H_m(z) \quad (61) +$$ + +and evaluate the integral: + +$$ +I = \int G(r,x)G(s,y)G(r',x') \exp \left\{ - \left( \frac{x^2 + y^2 + (x'^2 + y'^2)}{2} \right) \right\} dx dy \quad (62) +$$ + +The integrand becomes one exponential function, and its exponent is quadratic in x and y. +This quadratic form can be diagonalized, and the integral can be evaluated [20,26]. The result is: + +$$ +I = \left[ \frac{\pi}{\cosh \eta} \right] \exp(2rs \tanh \eta) \exp\left(\frac{2rr'}{\cosh \eta}\right) \quad (63) +$$ + +We can now expand this expression and choose the coefficients of r$^{n+k}$, s$^{k}$, r$^{m}$ for H$_{(n+k)}$ (x), H$_{n}$ (y) and +H$_{n}$ (z'), respectively. The result is: + +$$ +A_{n;k} = \left( \frac{1}{\cosh \eta} \right)^{(n+1)} \left[ \frac{(n+k)!}{n!k!} \right]^{1/2} (\tanh \eta)^k \quad (64) +$$ + +Thus, the series becomes: + +$$ +\chi_n(x')\chi_0(y') = \left(\frac{1}{\cosh \eta}\right)^{(n+1)} \sum_k \left[\frac{(n+k)!}{n!k!}\right]^{1/2} (\tanh \eta)^k \chi_{k+n}(x)\chi_k(y) \quad (65) +$$ + +If $n = 0$, it is the squeezed ground state, and this expression becomes the entangled state of +Equation (16). + +**6. E(2)-Sheared States** + +Let us next consider the effect of shear on the Gaussian form. From Figures 3 and 5, it is clear that +the sheared state is a rotated squeezed state. + +In order to understand this transformation, let us note that the squeeze and rotation are generated +by the two-by-two matrices: + +$$ +K = \begin{pmatrix} 0 & i \\ i & 0 \end{pmatrix}, \quad J = \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix} \tag{66} +$$ +---PAGE_BREAK--- + +which generate the squeeze and rotation matrices of the form: + +$$ +\begin{align} +\exp(-i\eta K) &= \begin{pmatrix} \cosh \eta & \sinh \eta \\ \sinh \eta & \cosh \eta \end{pmatrix} \notag \\ +\exp(-i\theta J) &= \begin{pmatrix} \cos \theta & -\sin \theta \\ \sin \theta & \cos \theta \end{pmatrix} \tag{67} +\end{align} +$$ + +respectively. We can then consider: + +$$ +S = K - J = \begin{pmatrix} 0 & 2i \\ 0 & 0 \end{pmatrix} \tag{68} +$$ + +This matrix has the property that S² = 0. Thus, the transformation matrix becomes: + +$$ +\exp(-i\alpha S) = \begin{pmatrix} 1 & 2\alpha \\ 0 & 1 \end{pmatrix} \qquad (69) +$$ + +Since $S^2 = 0$, the Taylor expansion truncates, and the transformation matrix becomes the triangular matrix of Equation (34), leading to the transformation: + +$$ +\begin{pmatrix} x \\ y \end{pmatrix} \rightarrow \begin{pmatrix} x + 2\alpha y \\ y \end{pmatrix} \qquad (70) +$$ + +The shear generator S of Equation (68) indicates that the infinitesimal transformation is a rotation followed by a squeeze. Since both rotation and squeeze are area-preserving transformations, the shear should also be an area-preserving transformations. + +Figure 5. Shear transformation of the Gaussian form given in Equation (11). + +In view of Figure 5, we should ask whether the triangular matrix of Equation (69) can be obtained from one squeeze matrix followed by one rotation matrix. This is not possible mathematically. It can however, be written as a squeezed rotation matrix of the form: + +$$ +\begin{pmatrix} e^{\lambda/2} & 0 \\ 0 & e^{-\lambda/2} \end{pmatrix} \begin{pmatrix} \cos \omega & \sin \omega \\ -\sin \omega & \cos \omega \end{pmatrix} \begin{pmatrix} e^{-\lambda/2} & 0 \\ 0 & e^{\lambda/2} \end{pmatrix} \quad (71) +$$ + +resulting in: + +$$ +\left( \begin{array}{cc} \cos \omega & e^{\lambda} \sin \omega \\ -e^{-\lambda} \sin \omega & \cos \omega \end{array} \right) \qquad (72) +$$ + +If we let: + +$$ +(\sin \omega) = 2\alpha e^{-\lambda} \tag{73} +$$ +---PAGE_BREAK--- + +Then: + +$$ +\begin{pmatrix} +\cos \omega & 2\alpha \\ +-2\alpha e^{-2\lambda} & \cos \omega +\end{pmatrix} +\qquad (74) +$$ + +If $\lambda$ becomes infinite, the angle $\omega$ becomes zero, and this matrix becomes the triangular matrix of Equation (69). This is a singular process where the parameter $\lambda$ goes to infinity. + +If this transformation is applied to the Gaussian form of Equation (11), it becomes: + +$$ +\psi(x, y) = \frac{1}{\sqrt{\pi}} \exp \left\{ -\frac{1}{2} \left[ (x - 2\alpha y)^2 + y^2 \right] \right\} \quad (75) +$$ + +The question is whether the exponential portion of this expression can be written as: + +$$ +\exp \left\{ -\frac{1}{2} \left[ e^{-2\eta} (x \cos \theta + y \sin \theta)^2 + e^{2\eta} (x \sin \theta - y \cos \theta)^2 \right] \right\} \quad (76) +$$ + +The answer is yes. This is possible if: + +$$ +e^{2\eta} = 1 + 2\alpha^2 + 2\alpha \sqrt{\alpha^2 + 1} +e^{-2\eta} = 1 + 2\alpha^2 - 2\alpha \sqrt{\alpha^2 + 1} +$$ + +In Equation (74), we needed a limiting case of $\lambda$ becoming infinite. This is necessarily a singular transformation. On the other hand, the derivation of the Gaussian form of Equation (75) appears to be analytic. How is this possible? In order to achieve the transformation from the Gaussian form of Equations (11) to (75), we need the linear transformation: + +$$ +\begin{pmatrix} \cos \theta & -\sin \theta \\ \sin \theta & \cos \theta \end{pmatrix} \begin{pmatrix} e^\eta & 0 \\ 0 & e^{-\eta} \end{pmatrix} \tag{78} +$$ + +If the initial form is invariant under rotations as in the case of the Gaussian function of Equation (11), +we can add another rotation matrix on the right-hand side. We choose that rotation matrix to be: + +$$ +\begin{pmatrix} \cos(\theta - \pi/2) & -\sin(\theta - \pi/2) \\ \sin(\theta - \pi/2) & \cos(\theta - \pi/2) \end{pmatrix} \tag{79} +$$ + +write the three matrices as: + +$$ +\begin{pmatrix} \cos \theta' & -\sin \theta' \\ \sin \theta' & \cos \theta' \end{pmatrix} \begin{pmatrix} \cosh \eta & \sinh \eta \\ \sinh \eta & \cosh \eta \end{pmatrix} \begin{pmatrix} \cos \theta' & -\sin \theta' \\ \sin \theta' & \cos \theta' \end{pmatrix} \quad (80) +$$ + +with: + +$$ +\theta' = \theta - \frac{\pi}{4} +$$ + +The multiplication of these three matrices leads to: + +$$ +\begin{pmatrix} +(\cosh \eta) \sin(2\theta) & \sinh \eta + (\cosh \eta) \cos(2\theta) \\ +\sinh \eta - (\cosh \eta) \cos(2\theta) & (\cosh \eta) \sin(2\theta) +\end{pmatrix} +\quad (81) +$$ +---PAGE_BREAK--- + +The lower-left element can become zero when $\sinh\eta = \cosh(\eta)\cos(2\theta)$, and consequently, this matrix becomes: + +$$ \begin{pmatrix} 1 & 2 \sinh \eta \\ 0 & 1 \end{pmatrix} \qquad (82) $$ + +Furthermore, this matrix can be written in the form of a squeezed rotation matrix given in Equation (72), with: + +$$ \cos \omega = (\cosh \eta) \sin(2\theta) $$ + +$$ e^{-2\lambda} = \frac{\cos(2\theta) - \tanh \eta}{\cos(2\theta) + \tanh \eta} \qquad (83) $$ + +The matrices of the form of Equations (72) and (81) are known as the Wigner and Bargmann decompositions, respectively [33,36,48–50]. + +## 7. Feynman's Rest of the Universe + +We need the concept of entanglement in quantum systems of two variables. The issue is how the measurement of one variable affects the other variable. The simplest case is what happens to the first variable while no measurements are taken on the second variable. This problem has a long history since von Neumann introduced the concept of the density matrix in 1932 [51]. While there are many books and review articles on this subject, Feynman stated this problem in his own colorful way. In his book on statistical mechanics [22], Feynman makes the following statement about the density matrix. + +*When we solve a quantum-mechanical problem, what we really do is divide the universe into two parts—the system in which we are interested and the rest of the universe. We then usually act as if the system in which we are interested comprised the entire universe. To motivate the use of density matrices, let us see what happens when we include the part of the universe outside the system.* + +Indeed, Yurke and Potasek [11] and also Ekert and Knight [12] studied this problem in the two-mode squeezed state using the entanglement formula given in Equation (16). Later in 1999, Han et al. studied this problem with two coupled oscillators where one oscillator is observed while the other is not and, thus, is in the rest of the universe as defined by Feynman [23]. + +Somewhat earlier in 1990 [27], Kim and Wigner observed that there is a time separation wherever there is a space separation in the Lorentz-covariant world. The Bohr radius is a space separation. If the system is Lorentz-boosted, the time-separation becomes entangled with the space separation. However, in the present form of quantum mechanics, this time-separation variable is not measured and not understood. + +This variable was mentioned in the paper of Feynman et al. in 1971 [43], but the authors say they would drop this variable because they do not know what to do with it. While what Feynman et al. did was not quite respectable from the scientific point of view, they made a contribution by pointing out the existence of the problem. In 1990, Kim and Wigner [27] noted that the time-separation variable belongs to Feynman's rest of the universe and studied its consequences in the observable world. + +In this section, we first reproduce the work of Kim and Wigner using the *x* and *y* variables and then study the consequences. Let us introduce the notation $\psi_{\eta}^{n}(x,y)$ for the squeezed oscillator wave function given in Equation (65): + +$$ \psi_{\eta}^{n}(x,y) = \chi_{n}(x')\chi_{0}(y') \qquad (84) $$ + +with no excitations along the *y* direction. For $\eta = 0$, this expression becomes $\chi_n(x)\chi_0(y)$. + +From this wave function, we can construct the pure-state density matrix as: + +$$ \rho_{\eta}^{n}(x, y; r, s) = \psi_{\eta}^{n}(x, y)\psi_{\eta}^{n}(r, s) \qquad (85) $$ +---PAGE_BREAK--- + +which satisfies the condition $\rho^2 = \rho$, which means: + +$$ \rho_{\eta}^{n}(x, y; r, s) = \int \rho_{\eta}^{n}(x, y; u, v) \rho_{\eta}^{n}(u, v; r, s) du dv \quad (86) $$ + +As illustrated in Figure 6, it is not possible to make measurements on the variable $y$. We thus have to take the trace of this density matrix along the $y$ axis, resulting in: + +$$ \begin{aligned} \rho_{\eta}^{n}(x,r) &= \int \psi_{\eta}^{n}(x,y)\psi_{\eta}^{n}(r,y)dy \\ &= \left(\frac{1}{\cosh \eta}\right)^{2(n+1)} \sum_{k} \frac{(n+k)!}{n!k!} (\tanh \eta)^{2k} \chi_{n+k}(x) \chi_{k+n}(r) \end{aligned} \quad (87) $$ + +The trace of this density matrix is one, but the trace of $\rho^2$ is: + +$$ \begin{aligned} \mathrm{Tr} (\rho^2) &= \int \rho_{\eta}^{n}(x,r)\rho_{\eta}^{n}(r,x)drdx \\ &= \left(\frac{1}{\cosh \eta}\right)^{4(n+1)} \sum_{k} \left[\frac{(n+k)!}{n!k!}\right]^2 (\tanh \eta)^{4k} \end{aligned} \quad (88) $$ + +which is less than one. This is due to the fact that we are not observing the $y$ variable. Our knowledge is less than complete. + +**Figure 6.** Feynman's rest of the universe. As the Gaussian function is squeezed, the $x$ and $y$ variables become entangled. If the $y$ variable is not measured, it affects the quantum mechanics of the $x$ variable. + +The standard way to measure this incompleteness is to calculate the entropy defined as [51–53]: + +$$ S = -\operatorname{Tr} (\rho(x, r) \ln[\rho(x, r)]) \quad (89) $$ + +which leads to: + +$$ S = 2(n+1)[(\cosh \eta)^2 \ln(\cosh \eta) - (\sinh \eta)^2 \ln(\sinh \eta)] \\ - \left(\frac{1}{\cosh \eta}\right)^{2(n+1)} \sum_k \frac{(n+k)!}{n!k!} \ln\left[\frac{(n+k)!}{n!k!}\right] (\tanh \eta)^{2k} \quad (90) $$ + +Let us go back to the wave function given in Equation (84). As is illustrated in Figure 6, its localization property is dictated by its Gaussian factor, which corresponds to the ground-state wave +---PAGE_BREAK--- + +function. For this reason, we expect that much of the behavior of the density matrix or the entropy for +the $n^{th}$ excited state will be the same as that for the ground state with $n = 0$. For this state, the density +matrix is: + +$$ \rho_{\eta}(x, r) = \left( \frac{1}{\pi \cosh(2\eta)} \right)^{1/2} \exp \left\{ -\frac{1}{4} \left[ \frac{(x+r)^2}{\cosh(2\eta)} + (x-r)^2 \cosh(2\eta) \right] \right\} \quad (91) $$ + +and the entropy is: + +$$ S_{\eta} = 2 \left[ (\cosh \eta)^2 \ln(\cosh \eta) - (\sinh \eta)^2 \ln(\sinh \eta) \right] \quad (92) $$ + +The density distribution $\rho_\eta(x,x)$ becomes: + +$$ \rho_{\eta}(x,x) = \left( \frac{1}{\pi \cosh(2\eta)} \right)^{1/2} \exp \left( -\frac{x^2}{\cosh(2\eta)} \right) \qquad (93) $$ + +The width of the distribution becomes $\sqrt{\cosh(2\eta)}$, and the distribution becomes wide-spread as $\eta$ becomes larger. Likewise, the momentum distribution becomes wide-spread as can be seen in Figure 4. This simultaneous increase in the momentum and position distribution widths is due to our inability to measure the y variable hidden in Feynman's rest of the universe [22]. + +In their paper of 1990 [27], Kim and Wigner used the *x* and *y* variables as the longitudinal and time-like variables respectively in the Lorentz-covariant world. In the quantum world, it is a widely-accepted view that there are no time-like excitations. Thus, it is fully justified to restrict the *y* component to its ground state, as we did in Section 5. + +**8. Space-Time Entanglement** + +The series given in Equation (1) plays the central role in the concept of the Gaussian or continuous-variable entanglement, where the measurement on one variable affects the quantum mechanics of the other variable. If one of the variables is not observed, it belongs to Feynman's rest of the universe. + +The series of the form of Equation (1) was developed earlier for studying harmonic oscillators in moving frames [20,24–28]. Here, *z* and *t* are the space-like and time-like separations between the two constituent particles bound together by a harmonic oscillator potential. There are excitations along the longitudinal direction. However, no excitations are allowed along the time-like direction. Dirac described this as the “c-number” time-energy uncertainty relation [16]. Dirac in 1927 was talking about the system without special relativity. In 1945 [17], Dirac attempted to construct space-time wave functions using harmonic oscillators. In 1949 [18], Dirac introduced his light-cone coordinate system for Lorentz boosts, demonstrating that the boost is a squeeze transformation. It is now possible to combine Dirac’s three observations to construct the Lorentz covariant picture of quantum bound states, as illustrated in Figure 7. + +If the system is at rest, we use the wave function: + +$$ \psi_0^n(z,t) = \chi_n(z)\chi_0(t) \qquad (94) $$ + +which allows excitations along the *z* axis, but no excitations along the *t* axis, according to Dirac's c-number time-energy uncertainty relation. + +If the system is boosted, the *z* and *t* variables are replaced by *z'* and *t'* where: + +$$ z' = (\cosh \eta)z - (\sinh \eta)t, \quad \text{and} \quad t' = -(\sinh \eta)z + (\cosh \eta)t \qquad (95) $$ + +This is a squeeze transformation as in the case of Equation (17). In terms of these space-time variables, the wave function of Equation (84), can be written as: + +$$ \psi_{\eta}^{n}(z, t) = \chi_{n}(z')\chi_{0}(t') \qquad (96) $$ +---PAGE_BREAK--- + +and the series of Equation (65) then becomes: + +$$ \psi_{\eta}^{n}(z, t) = \left(\frac{1}{\cosh \eta}\right)^{(n+1)} \sum_{k} \left[\frac{(n+k)!}{n!k!}\right]^{1/2} (\tanh \eta)^{k} \chi_{k+n}(z) \chi_{k}(t) \quad (97) $$ + +**Figure 7.** Dirac's form of Lorentz-covariant quantum mechanics. In addition to Heisenberg's uncertainty relation, which allows excitations along the spatial direction, there is the "c-number" time-energy uncertainty without excitations. This form of quantum mechanics can be combined with Dirac's light-cone picture of Lorentz boost, resulting in the Lorentz-covariant picture of quantum mechanics. The elliptic squeeze shown in this figure can be called the space-time entanglement. + +Since the Lorentz-covariant oscillator formalism shares the same set of formulas with the Gaussian entangled states, it is possible to explain some aspects of space-time physics using the concepts and terminologies developed in quantum optics, as illustrated in Figure 1. + +The time-separation variable is a case in point. The Bohr radius is a well-defined spatial separation between the proton and electron in the hydrogen atom. However, if the atom is boosted, this radius picks up a time-like separation. This time-separation variable does not exist in the Schrödinger picture of quantum mechanics. However, this variable plays the pivotal role in the covariant harmonic oscillator formalism. It is gratifying to note that this “hidden or forgotten” variable plays a role in the real world while being entangled with the observable longitudinal variable. With this point in mind, let us study some of the consequences of this space-time entanglement. + +First of all, does the wave function of Equation (96) carry a probability interpretation in the Lorentz-covariant world? Since $dzdt = dz'dt'$, the normalization: + +$$ \int |\psi_{\eta}^{n}(z, t)|^{2} dtdz = 1 \qquad (98) $$ + +This is a Lorentz-invariant normalization. If the system is at rest, the z and t variables are completely dis-entangled, and the spatial component of the wave function satisfies the Schrödinger equation without the time-separation variable. + +However, in the Lorentz-covariant world, we have to consider the inner product: + +$$ (\psi_{\eta}^{n}(z,t), \psi_{\eta'}^{m}(z,t)) = \int [\psi_{\eta}^{n}(z,t)]^{*} \psi_{\eta'}^{m}(z,t) dzdt \quad (99) $$ +---PAGE_BREAK--- + +The evaluation of this integral was carried out by Michael Ruiz in 1974 [45], and the result was: + +$$ \left( \frac{1}{|\cosh(\eta - \eta')|} \right)^{n+1} \delta_{nm} \qquad (100) $$ + +In order to see the physical implications of this result, let us assume that one of the oscillators is at rest with $\eta' = 0$ and the other is moving with the velocity $\beta = \tanh(\eta)$. Then, the result is: + +$$ (\psi_{\eta}^{n}(z,t), \psi_{0}^{m}(z,t)) = (\sqrt{1-\beta^2})^{n+1} \delta_{nm} \qquad (101) $$ + +Indeed, the wave functions are orthonormal if they are in the same Lorentz frame. If one of them is boosted, the inner product shows the effect of Lorentz contraction. We are familiar with the contraction $\sqrt{1-\beta^2}$ for the rigid rod. The ground state of the oscillator wave function is contracted like a rigid rod. + +The probability density $|\psi_\eta^0(z)|^2$ is for the oscillator in the ground state, and it has one hump. For the $n^{th}$ excited state, there are $(n+1)$ humps. If each hump is contracted like $\sqrt{1-\beta^2}$, the net contraction factor is $(\sqrt{1-\beta^2})^{n+1}$ for the $n^{th}$ excited state. This result is illustrated in Figure 8. + +**Figure 8.** Orthogonality relations for two covariant oscillator wave functions. The orthogonality relation is preserved for different frames. However, they show the Lorentz contraction effect for two different frames. + +With this understanding, let us go back to the entanglement problem. The ground state wave function takes the Gaussian form given in Equation (11): + +$$ \psi_0(z,t) = \frac{1}{\sqrt{\pi}} \exp \left\{ -\frac{1}{2} (z^2 + t^2) \right\} \qquad (102) $$ + +where the x and y variables are replaced by z and t, respectively. If Lorentz-boosted, this Gaussian function becomes squeezed to [20,24,25]: + +$$ \psi_{\eta}^{0}(z,t) = \frac{1}{\sqrt{\pi}} \exp \left\{ -\frac{1}{4} \left[ e^{-2\eta}(z+t)^2 + e^{2\eta}(z-t)^2 \right] \right\} \qquad (103) $$ + +leading to the series: + +$$ \frac{1}{\cosh \eta} \sum_k (\tanh \eta)^k \chi_k(z) \chi_k(t) \qquad (104) $$ + +According to this formula, the z and t variables are entangled in the same way as the x and y variables are entangled. +---PAGE_BREAK--- + +Here, the z and t variables are space and time separations between two particles bound together by the oscillator force. The concept of the space separation is well defined, as in the case of the Bohr radius. On the other hand, the time separation is still hidden or forgotten in the present form of quantum mechanics. In the Lorentz-covariant world, this variable affects what we observe in the real world by entangling itself with the longitudinal spatial separation. + +In Chapter 16 of their book [9], Walls and Milburn wrote down the series of Equation (1) and discussed what would happen when the $\eta$ parameter becomes infinitely large. We note that the series given in Equation (104) shares the same expression as the form given by Walls and Milburn, as well as other papers dealing with the Gaussian entanglement. As in the case of Wall and Milburn, we are interested in what happens when $\eta$ becomes very large. + +As we emphasized throughout the present paper, it is possible to study the entanglement series using the squeezed Gaussian function given in Equation (103). It is then possible to study this problem using the ellipse. Indeed, we can carry out the mathematics of entanglement using the ellipse shown Figure 9. This figure is the same as that of Figure 6, but it illustrates the entanglement of the space and time separations, instead of the x and y variables. If the particle is at rest with $\eta = 0$, the Gaussian form corresponds to the circle in Figure 9. When the particle gains speed, this Gaussian function becomes squeezed into an ellipse. This ellipse becomes concentrated along the light cone with $t = z$, as $\eta$ becomes very large. + +The point is that we are able to observe this effect in the real world. These days, the velocity of protons from high-energy accelerators is very close to that of light. According to Gell-Mann [54], the proton is a bound state of three quarks. Since quarks are confined in the proton, they have never been observed, and the binding force must be like that of the harmonic oscillator. Furthermore, the observed mass spectra of the hadrons exhibit the degeneracy of the three-dimensional harmonic oscillator [43]. We use the word “hadron” for the bound state of the quarks. The simplest hadron is thus the bound state of two quarks. + +In 1969 [55], Feynman observed that the same proton, when moving with a velocity close to that of light, can be regarded as a collection of partons, with the following peculiar properties. + +1. The parton picture is valid only for protons moving with velocity close to that of light. + +2. The interaction time between the quarks becomes dilated, and partons are like free particles. + +3. The momentum distribution becomes wide-spread as the proton moves faster. Its width is proportional to the proton momentum. + +4. The number of partons is not conserved, while the proton starts with a finite number of quarks. + +**Figure 9.** Feynman's rest of the universe. This figure is the same as Figure 6. Here, the space variable z and the time variable t are entangled. +---PAGE_BREAK--- + +Indeed, Figure 10 tells why the quark and parton models are two limiting cases of one Lorentz-covariant entity. In the oscillator regime, the three-particle system can be reduced to two independent two-particle systems [43]. Also in the oscillator regime, the momentum-energy wave function takes the same form as the space-time wave function, thus with the same squeeze or entanglement property as illustrated in this figure. This leads to the wide-spread momentum distribution [20,56,57]. + +**Figure 10.** The transition from the quark to the parton model through space-time entanglement. When $\eta = 0$, the system is called the quark model where the space separation and the time separation are dis-entangled. Their entanglement becomes maximum when $\eta = \infty$. The quark model is transformed continuously to the parton model as the $\eta$ parameter increases from zero to $\infty$. The mathematics of this transformation is given in terms of circles and ellipses. + +Also in Figure 10, the time-separation between the quarks becomes large as $\eta$ becomes large, leading to a weaker spring constant. This is why the partons behave like free particles [20,56,57]. + +As $\eta$ becomes very large, all of the particles are confined into a narrow strip around the light cone. The number of particles is not constant for massless particles as in the case of black-body radiation [20,56,57]. + +Indeed, the oscillator model explains the basic features of the hadronic spectra [43]. Does the oscillator model tell the basic feature of the parton distribution observed in high-energy laboratories? The answer is yes. In his 1982 paper [58], Paul Hussar compared the parton distribution observed in a high-energy laboratory with the Lorentz-boosted Gaussian distribution. They are close enough to justify that the quark and parton models are two limiting cases of one Lorentz-covariant entity. + +To summarize, the proton makes a phase transition from the bound state into a plasma state as it moves faster, as illustrated in Figure 10. The unobserved time-separation variable becomes more prominent as $\eta$ becomes larger. We can now go back to the form of this entropy given in Equation (92) and calculate it numerically. It is plotted against $(\tanh \eta)^2 = \beta^2$ in Figure 11. The entropy is zero when the hadron is at rest, and it becomes infinite as the hadronic speed reaches the speed of light. +---PAGE_BREAK--- + +Figure 11. Entropy and temperature as functions of $[\tanh(\eta)]^2 = \beta^2$. They are both zero when the hadron is at rest, but they become infinitely large when the hadronic speed becomes close to that of light. The curvature for the temperature plot changes suddenly around $[\tanh(\eta)]^2 \approx 0.8$, indicating a phase transition. + +Let us go back to the expression given in Equation (87). For this ground state, the density matrix becomes: + +$$ \rho_{\eta}(z, z') = \left( \frac{1}{\cosh \eta} \right)^2 \sum_k (\tanh \eta)^{2k} \chi_k(z) \chi_k(z') \quad (105) $$ + +We can now compare this expression with the density matrix for the thermally-excited oscillator state [22]: + +$$ \rho_{\eta}(z, z') = (1 - e^{-1/T}) \sum_{k} [\cosh z]^{k} \chi_{k}(z) \chi_{k}(z') \quad (106) $$ + +By comparing these two expressions, we arrive at: + +$$ [\tanh(\eta)]^2 = e^{-1/T} \quad (107) $$ + +and thus: + +$$ T = \frac{-1}{\ln[(\tanh \eta)^2]} \quad (108) $$ + +This temperature is also plotted against $(\tanh \eta)^2$ in Figure 11. The temperature is zero if the hadron is at rest, but it becomes infinite when the hadronic speed becomes close to that of light. The slope of the curvature changes suddenly around $(\tanh \eta)^2 \approx 0.8$, indicating a phase transition from the bound state to the plasma state. + +In this section, we have shown how useful the concept of entanglement is in understanding the role of the time-separation in high energy hadronic physics including Gell-Mann's quark model and Feynman's parton model as two limiting cases of one Lorentz-covariant entity. + +**9. Concluding Remarks** + +The main point of this paper is the mathematical identity: + +$$ \frac{1}{\sqrt{\pi}} \exp \left\{ -\frac{1}{4} \left[ e^{-2\eta} (x+y)^2 + e^{2\eta} (x-y)^2 \right] \right\} = \frac{1}{\cosh \eta} \sum_k (\tanh \eta)^k \chi_k(x) \chi_k(y) \quad (109) $$ + +which says that the series of Equation (1) is an expansion of the Gaussian form given in Equation (2). +---PAGE_BREAK--- + +The first derivation of this series was published in 1979 [26] as a formula from the Lorentz group. Since this identity is not well known, we explained in Section 5 how this formula can be derived from the generating function of the Hermite polynomials. + +While the series serves useful purposes in understanding the physics of entanglement, the Gaussian form can be used to transfer this idea to high-energy hadronic physics. The hadron, such as the proton, is a quantum bound state. As was pointed out in Section 8, the squeezed Gaussian function of Equation (109) plays the pivotal role for hadrons moving with relativistic speeds. + +The Bohr radius is a very important quantity in physics. It is a spatial separation between the proton and electron in the the hydrogen atom. Likewise, there is a space-like separation between constituent particles in a bound state at rest. When the bound state moves, it picks up a time-like component. However, in the present form of quantum mechanics, this time-like separation is not recognized. Indeed, this variable is hidden in Feynman's rest of the universe. When the system is Lorentz-boosted, this variable entangles itself with the measurable longitudinal variable. Our failure to measure this entangled variable appears in the form of entropy and temperature in the real world. + +While harmonic oscillators are applicable to many aspects of quantum mechanics, Paul A. M. Dirac observed in 1963 [21] that the system of two oscillators contains also the symmetries of the Lorentz group. We discussed in this paper one concrete case of Dirac's symmetry. There are different languages for harmonic oscillators, such as the Schrödinger wave function, step-up and step-down operators and the Wigner phase-space distribution function. In this paper, we used extensively a pictorial language with circles and ellipses. + +Let us go back to Equation (109); this mathematical identity was published in 1979 as textbook material in the American Journal of Physics [26], and the same formula was later included in a textbook on the Lorentz group [20]. It is gratifying to note that the same formula serves as a useful tool for the current literature in quantum information theory [59,60]. + +**Author Contributions:** Each of the authors participated in developing the material presented in this paper and in writing the manuscript. + +**Conflicts of Interest:** The authors declare that no conflict of interest exists. + +## References + +1. Giedke, G.; Wolf, M.M.; Krueger, O.; Werner, R.F.; Cirac, J.J. Entanglement of formation for symmetric Gaussian states. Phys. Rev. Lett. **2003**, *91*, 10790.1–10790.4. + +2. Braunstein, S.L.; van Loock, P. Quantum information with continuous variables. Rev. Mod. Phys. **2005**, *28*, 513–676. + +3. Kim, Y.S.; Noz, M.E. Coupled oscillators, entangled oscillators, and Lorentz-covariant Oscillators. J. Opt. B Quantum Semiclass. **2003**, *7*, s459–s467. + +4. Ge, W.; Tasgin, M.E.; Suhail Zubairy, S. Conservation relation of nonclassicality and entanglement for Gaussian states in a beam splitter. Phys. Rev. A **2015**, *92*, 052328. + +5. Gingrich, R.M.; Adami, C. Quantum Engtanglement of Moving Bodies. Phys. Rev. Lett. **2002**, *89*, 270402. + +6. Dodd, P.J.; Halliwell, J.J. Disentanglement and decoherence by open system dynamics. Phys. Rev. A **2004**, *69*, 052105. + +7. Ferraro, A.; Olivares, S.; Paris, M.G.A. Gaussian States in Continuous Variable Quantum Information. EDIZIONI DI FILOSOFIA E SCIENZE (2005). Available online: http://arxiv.org/abs/quant-ph/0503237 (accessed on 24 June 2016). + +8. Adesso, G.; Illuminati, F. Entanglement in continuous-variable systems: Recent advances and current perspectives. J. Phys. A **2007**, *40*, 7821–7880. + +9. Walls, D.F.; Milburn, G.J. Quantum Optics, 2nd ed.; Springer: Berlin, Germany, 2008. + +10. Yuen, H.P. Two-photon coherent states of the radiation field. Phys. Rev. A **1976**, *13*, 2226–2243. + +11. Yurke, B.; Potasek, M. Obtainment of Thermal Noise from a Pure State. Phys. Rev. A **1987**, *36*, 3464–3466. + +12. Ekert, A.K.; Knight, P.L. Correlations and squeezing of two-mode oscillations. Am. J. Phys. **1989**, *57*, 692–697. +---PAGE_BREAK--- + +13. Paris, M.G.A. Entanglement and visibility at the output of a Mach-Zehnder interferometer. Phys. Rev. A **1999**, *59*, 1615. + +14. Kim, M.S.; Son, W.; Buzek, V.; Knight, P.L. Entanglement by a beam splitter: Nonclassicality as a prerequisite for entanglement. Phys. Rev. A **2002**, *65*, 02323. + +15. Han, D.; Kim, Y.S.; Noz, M.E. Linear Canonical Transformations of Coherent and Squeezed States in the Wigner phase Space III. Two-mode States. Phys. Rev. A **1990**, *41*, 6233-6244. + +16. Dirac, P.A.M. The Quantum Theory of the Emission and Absorption of Radiation. Proc. Roy. Soc. (Lond.) **1927**, A114, 243-265. + +17. Dirac, P.A.M. Unitary Representations of the Lorentz Group. Proc. Roy. Soc. (Lond.) **1945**, A183, 284-295. + +18. Dirac, P.A.M. Forms of relativistic dynamics. Rev. Mod. Phys. **1949**, *21*, 392-399. + +19. Yukawa, H. Structure and Mass Spectrum of Elementary Particles. I. General Considerations. Phys. Rev. **1953**, *91*, 415-416. + +20. Kim, Y.S.; Noz, M.E. Theory and Applications of the Poincaré Group; Reidel: Dordrecht, The Netherlands, 1986. + +21. Dirac, P.A.M. A Remarkable Representation of the 3 + 2 de Sitter Group. J. Math. Phys. **1963**, *4*, 901-909. + +22. Feynman, R.P. Statistical Mechanics; Benjamin Cummings: Reading, MA, USA, 1972. + +23. Han, D.; Kim, Y.S.; Noz, M.E. Illustrative Example of Feynman's Rest of the Universe. Am. J. Phys. **1999**, *67*, 61-66. + +24. Kim, Y.S.; Noz, M.E. Covariant harmonic oscillators and the quark model. Phys. Rev. D **1973**, *8*, 3521-3627. + +25. Kim, Y.S.; Noz, M.E.; Oh, S.H. Representations of the Poincaré group for relativistic extended hadrons. J. Math. Phys. **1979**, *20*, 1341-1344. + +26. Kim, Y.S.; Noz, M.E.; Oh, S.H. A simple method for illustrating the difference between the homogeneous and inhomogeneous Lorentz groups. Am. J. Phys. **1979**, *47*, 892-897. + +27. Kim, Y.S.; Wigner, E.P. Entropy and Lorentz Transformations. Phys. Lett. A **1990**, *147*, 343-347. + +28. Kim, Y.S.; Noz, M.E. Lorentz Harmonics, Squeeze Harmonics and Their Physical Applications. Symmety **2011**, *3*, 16-36. + +29. Klauder, J.R.; Sudarshan, E.C.G. Fundamentals of Quantum Optics; Benjamin: New York, NY, USA, 1968. + +30. Saleh, B.E.A.; Teich, M.C. Fundamentals of Photonics, 2nd ed.; John Wiley and Sons: Hoboken, NJ, USA, 2007. + +31. Miller, W. Symmetry Groups and Their Applications; Academic Press: New York, NY, USA, 1972. + +32. Hall, B.C. Lie Groups, Lie Algebras, and Representations: An Elementary Introduction, 2nd ed.; Springer International: Cham, Switzerland, 2015. + +33. Wigner, E. On Unitary Representations of the Inhomogeneous Lorentz Group. Ann. Math. **1939**, *40*, 149-204. + +34. Weinberg, S. Photons and gravitons in S-Matrix theory: Derivation of charge conservation and equality of gravitational and inertial mass. Phys. Rev. **1964**, *135*, B1049-B1056. + +35. Kim, Y.S.; Wigner, E.P. Space-time geometry of relativistic-particles. J. Math. Phys. **1990**, *31*, 55-60. + +36. Başkal, S.; Kim, Y.S.; Noz, M.E. Wigner's Space-Time Symmetries Based on the Two-by-Two Matrices of the Damped Harmonic Oscillators and the Poincaré Sphere. Symmetry **2014**, *6*, 473-515. + +37. Başkal, S.; Kim, Y.S.; Noz, M.E. Physics of the Lorentz Group; IOP Science; Morgan & Claypool Publishers: San Rafael, CA, USA, 2015. + +38. Kim, Y.S.; Yeh, Y. $E(2)$-symmetric two-mode sheared states. J. Math. Phys. **1992**, *33*, 1237-1246 + +39. Kim, Y.S.; Noz, M.E. Phase Space Picture of Quantum Mechanics; World Scientific Publishing Company: Singapore, Singapore, 1991. + +40. Kim, Y.S.; Noz, M.E. Dirac Matrices and Feynman's Rest of the Universe. Symmetry **2012**, *4*, 626-643. + +41. Wigner, E. On the Quantum Corrections for Thermodynamic Equilibrium. Phys. Rev. **1932**, *40*, 749-759. + +42. Han, D.; Kim, Y.S.; Noz, M.E. $O(3,3)$-like Symmetries of Coupled Harmonic Oscillators. J. Math. Phys. **1995**, *36*, 3940-3954. + +43. Feynman, R.P.; Kislinger, M.; Ravndal, F. Current Matrix Elements from a Relativistic Quark Model. +Phys. Rev. D **1971**, *3*, 2706-2732. + +44. Rotbart, F.C. Complete orthogonality relations for the covariant harmonic oscillator. +Phys. Rev. D **1981**, *12*, 3078-3090. + +45. Ruiz, M.J. Orthogonality relations for covariant harmonic oscillator wave functions. +Phys. Rev. D **1974**, *10*, 4306-4307. + +46. Magnus, W.; Oberhettinger, F.; Soni, R.P. Formulas and Theorems for the Special Functions of Mathematical Physics; +Springer-Verlag: Heidelberg, Germany, 1966. +---PAGE_BREAK--- + +47. Doman, B.G.S. *The Classical Orthogonal Polynomials*; World Scientific: Singapore, Singapore, 2016. + +48. Bargmann, V. Irreducible unitary representations of the Lorentz group. *Ann. Math.* **1947**, *48*, 568–640. + +49. Han, D.; Kim, Y.S. Special relativity and interferometers. *Phys. Rev. A* **1988**, *37*, 4494–4496. + +50. Han, D.; Kim, Y.S.; Noz, M.E. Wigner rotations and Iwasawa decompositions in polarization optics. *Phys. Rev. E* **1999**, *1*, 1036–1041. + +51. Von Neumann, J. *Die mathematische Grundlagen der Quanten-Mechanik*; Springer: Berlin, Germany, 1932. (von Neumann, I. *Mathematical Foundation of Quantum Mechanics*; Princeton University: Princeton, NJ, USA, 1955.) + +52. Fano, U. Description of States in Quantum Mechanics by Density Matrix and Operator Techniques. *Rev. Mod. Phys.* **1957**, *29*, 74–93. + +53. Wigner E.P.; Yanase, M.M. Information Contents of Distributions. Proc. Natl. Acad. Sci. USA **1963**, *49*, 910–918. + +54. Gell-Mann, M. A Schematic Model of Baryons and Mesons. Phys. Lett. **1964**, *8*, 214-215. + +55. Feynman, R.P. Very High-Energy Collisions of Hadrons. Phys. Rev. Lett. **1969**, *23*, 1415-1417. + +56. Kim, Y.S.; Noz, M.E. Covariant harmonic oscillators and the parton picture. Phys. Rev. D **1977**, *15*, 335-338. + +57. Kim, Y.S. Observable gauge transformations in the parton picture. Phys. Rev. Lett. **1989**, *63*, 348-351. + +58. Hussar, P.E. Valons and harmonic oscillators. Phys. Rev. D **1981**, *23*, 2781-2783. + +59. Leonhardt, U. *Essential Quantum Optics*; Cambridge University Press: London, UK, 2010. + +60. Furusawa, A.; Loock, P.V. *Quantum Teleportation and Entanglement: A Hybrid Approach to Optical Quantum Information Processing*; Wiley-VCH: Weinheim, Germany, 2010. + +© 2016 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +Massless Majorana-Like Charged Carriers in +Two-Dimensional Semimetals + +Halina Grushevskaya † and George Krylov †,* + +Physics Department, Belarusian State University, 4 Nezaleznasti Ave., 220030 Minsk, Belarus; grushevskaja@bsu.by + +* Correspondence: krylov@bsu.by; Tel.: +375-296-62-44-97 + +† These authors contributed equally to this work. + +Academic Editor: Young Suh Kim + +Received: 29 February 2016; Accepted: 1 July 2016; Published: 8 July 2016 + +**Abstract:** The band structure of strongly correlated two-dimensional (2D) semimetal systems is found to be significantly affected by the spin-orbit coupling (SOC), resulting in SOC-induced Fermi surfaces. Dirac, Weyl and Majorana representations are used for the description of different semimetals, though the band structures of all these systems are very similar. We develop a theoretical approach to the band theory of two-dimensional semimetals within the Dirac–Hartree–Fock self-consistent field approximation. It reveals partially breaking symmetry of the Dirac cone affected by quasi-relativistic exchange interactions for 2D crystals with hexagonal symmetry. Fermi velocity becomes an operator within this approach, and elementary excitations have been calculated in the tight-binding approximation when taking into account the exchange interaction of $\pi(p_z)$-electron with its three nearest $\pi(p_z)$-electrons. These excitations are described by the massless Majorana equation instead of the Dirac one. The squared equation for this field is of the Klein–Gordon–Fock type. Such a feature of the band structure of 2D semimetals as the appearance of four pairs of nodes is shown to be described naturally within the developed formalism. Numerical simulation of band structure has been performed for the proposed 2D-model of graphene and a monolayer of Pb atoms. + +**Keywords:** 2D semimetals; Dirac–Hartree–Fock self-consistent field approximation; Majorana-like field; Weyl-like nodes; Fermi velocity operator + +PACS: 73.22.-f, 81.05.Bx + +# 1. Introduction + +Strongly correlated materials, such as two-dimensional (2D) complex oxides of transition metals, graphene, oxides with a perovskite structure, and IV–VI semiconductors being three-dimensional (3D) analogues of graphene, can demonstrate unusual electronic and magnetic properties, such as e.g., half-metallicity. The linear dispersion law for such materials is stipulated by the simultaneous existence of positively and negatively charged carriers [1]. Conical singularities are generic in the quantum crystals having honeycomb lattice symmetry [2]. Bipolarity of the material suggests that the state of an excitonic insulator is possible for it. Since an electron-hole pair is at the same time its own antiparticle, the Majorana representation has been used [3,4] to describe the interaction of pseudospins with the valley currents in a monolayer graphene. + +The electron is a complex fermion, so if one decomposes it into its real and imaginary parts, which would be Majorana fermions, they are rapidly re-mixed by electromagnetic interactions. However, such a decomposition could be reasonable for a superconductor where, because of effective electrostatic screening, the Bogoliubov quasi-fermions behave as if they are neutral excitations [5]. +---PAGE_BREAK--- + +A helical magnetic ordering (commensurate magnetism) occurs due to strong spin-orbit coupling (SOC) between Fe and Pb atoms in the system where a chain of ferromagnetic Fe atoms is placed on the surface of conventional superconductor composed of Pb atoms [6]. In this case, the imposition of SOC results in the appearance of Majorana-like excitations at the ends of the Fe atom chain. + +The discovered p-wave function pairing in this Fe-chain is allowed to assume that there exists a new mechanism of superconductivity in high-temperature superconductors through the exchange of Majorana particles rather than phonons in the Bardeen-Cooper-Schrieffer theory. Such a novel superconducting state emerges, for example, in compound CeCoIn₅ in strong magnetic fields in addition to ordinary superconducting state, [7]. It has been shown [8–10] that the coupling of electrons into Cooper pairs in pnictides (LiFeAs with slabs FeAs) is mediated by the mixing of d-electron orbitals surrounding the atomic cores of transition metal. The new state is mediated by an anti-ferromagnetic order, and its fluctuations appear due to strong spin-orbit coupling [8,9,11]. It has been experimentally confirmed in [10] for LiFeAs. For antiferromagnetic itinerant-electron system LaFe₁₂B₆, ultrasharp magnetization steps have been observed [12]. The last can be only explained by the existence of anti-ferromagnetic order, and its fluctuations appear due to strong spin-orbit coupling. + +Thus, there is a strong evidence that SOC may control the spin ordering in the absence of external magnetic fields. However, the mechanism that leads to such, commensurate magnetism has not been yet established. + +The phenomenon of the contraction of electron density distribution in one direction is called nematicity. It is observed in pnictides BaFe₂(As₁₋₃)Pₓ₂ placed in a magnetic field, and such a phenomenon remains in the superconducting state [13]. The nematicity is coupled with considerable stripe spin fluctuations in FeSe [14]. The very strong spin orbit coupling leads to contraction in a factor of about 10% and rotation on 30° of the hexagonal Brillouin zone of delafossite oxide PtCoO₂, belonging to yet another class of topological insulators in which atoms of metal are in layers with triangular lattices [15]. + +Other topological insulators, namely so-called Weyl materials with a linear dispersion law, are close in properties with layered perovskite-like materials (see [16] and references therein). Currently, the first candidate for such a material has been found, namely TaAs, whose Brillouin zone has Weyl-like nodes and Fermi arcs [17–19]. + +Moreover, the experimental evidence of the similarities between the Fermi surfaces of insulator SmB₆ and metallic rare earth hexaborides (PrB₆ and LaB₆) has been presented in [20]. To explain the accompanying ordering phenomena, each associated with different symmetry breaking, it is necessary to develop a unified theory as it has been pointed out in [9]. + +Electrically charged carriers in the strongly correlated semimetallic systems with half-filled bands are massless fermions [15,21,22]. + +In a low-dimensional system, the exciton binding energy turns out to be high [23] and, respectively, the transition to the state of excitonic insulator is possible. Therefore, the Majorana rather than Weyl representation is preferable for the description of 2D semimetals. An attempt to represent the transition to the state of excitonic insulator as the appearance of Majorana zero-modes solution in graphene with trigonal warping [24] contradicts experimental data on the absence of a gap in band structure of graphene [25] and on diminishing of charged carriers mobility [26] and minimal conductivity [27]. However, at the present time, there exist experimental signatures of graphene Majorana states in graphene-superconductor junctions without the need for spin-orbit coupling [28]. However, modern Quantum Field Theory of pseudo-Dirac quasiparticles in random phase approximation predicts a strong screening that destroys the excitonic pairing instability if the fermion dynamic mass *m*(*p*) dependent on momentum *p* is small in comparison with the chemical potential *μ*: *m*(*p*) ≤ *μ* [29]. + +In the paper, we would like to show how the above described features of the layered materials can be formalized in 2D models, where the charged carriers are the quasiparticles of Majorana rather than of the Weyl type. We also show that, under certain conditions, these quasiparticles reveal themselves as Weyl-like states or massless Dirac pseudofermions. +---PAGE_BREAK--- + +However, the use of the well-known Majorana representations to describe a semimetal as a massless-quasiparticle system is encountered with such a puzzle as the absence of harmonic oscillatory solutions in ultrarelativistic limit for Majorana particles of zero mass [30]. The equations are known for massive Majorana particles only [31–33]. + +In the paper, we reveal different aspects of appearance of Majorana-like quasiparticle states in the band structure of semimetals. 2D Hartree-Fock approximation for graphene, however, predicts experimentally observable increase of the Fermi velocity value $v_F(\vec{p})$ at small momenta $p$ [25] but leads to logarithmically divergent $v_F(\vec{p})$ at $p \to 0$ [34]. To take into account this effect of long range Coulomb interactions correctly, our calculation is based on the quasi-relativistic Dirac-Hartree-Fock self-consistent field approach developed earlier [35,36]. + +The goal is to construct a 2D-semimetal model in which a motion equation is a pseudo-relativistic massless Majorana-like one. We show that the squared equation for this field is of a Klein-Gordon-Fock type, and therefore the charged carriers in such 2D-semimetal models can be assumed massless Majorana-like quasiparticles. + +We study quasiparticle excitations of the electronic subsystem of a hexagonal monoatomic layer (monolayer) of light or heavy atoms in tight-binding approximation. The simulations are performed for the atoms of C and Pb on the assumption that sp²-hybridization for s- and p-electron orbitals is also possible for the atoms of Pb. + +We demonstrate that the band-structure features for the hexagonal monolayers are similar to each other due to the similarity of external electronic shells of their atoms. Despite the similarity of the band structure, the charged carriers in such 2D-semimetal models can possess different features, e.g., the charged carriers in the monolayer of the atoms of C can be thought of as massless Dirac pseudofermions, whereas in the monolayer from the atoms of Pb, they reveal themselves as Weyl-like states. + +The paper is organized as follows. In Section 2, we propose a semimetal model with coupling between pseudospin and valley currents and prove the pseudo-helicity conservation law. In Section 3, we briefly introduce the approach [3,35–37] and use it in a simple tight-binding approximation to obtain the system of equations for a Majorana secondary quantized field. In Section 4, we support the statement that the squared equation for the constructed field is of the Klein-Gordon-Fock type for different model exchange operators. We also discuss features of our model manifesting in the band structure of real semimetals. In Section 5, we discuss the proposed approximations for the exchange interactions in 2D semimetals and summarize our findings. + +## 2. Monolayer Semimetal Model with Partial Unfolding of Dirac Bands + +Semimetals are known to be bipolar materials with half-filled valence and conduction bands. A distinctive feature of the graphene band structure is the existence of Dirac cones in the Dirac points (valleys) K, K' of the Brillouin zone. In the present paper, these Dirac points are designated as $K_A, K_B$. We assume that pseudo-spins of hexagonally packed carbon atoms in the monoatomic layer (monolayer) graphene are anti-ordered, as it is shown schematically in Figure 1a. The fact that the pseudo-helicity (chirality) conservation law forbids massless charged carriers to be in lattice sites with the opposite signs of pseudo-spin, makes possible the existence of valley currents due to jumps through the forbidden sites. This is shown schematically in Figure 1a. Coupling between the pseudo-spin and the valley current in the Majorana representation of bispinors can be determined in the following way. +---PAGE_BREAK--- + +**Figure 1.** (a) graphene lattice, comprised of two sublattices {A} with spin “up” and {B} with spin “down”. Right and left valley currents $J_V^R$ and $J_V^L$ are shown as circular curves with arrows. Double arrows from site A to site $B_L$ and from A to $B_R$ indicate clockwise and anti-clockwise directions. The axis of mirror reflection from $A_R$ to $B_L$ is marked by dash-dotted line; (b) transformations of a q-circumference into ellipses under an action of exchange operators ($\Sigma_{rel}^x$)$_{AB}$ and ($\Sigma_{rel}^y$)$_{BA}$ (in color). + +According to Figure 1a, a particle can travel from a lattice site A to e.g., a lattice site $A_R$ through right or left sites $B_R$ or $B_L$, respectively. Since the particle is symmetrical, its description in the right and left reference frames has to be equivalent. Therefore, a bispinor wave function $\Psi'$ of graphene has to be chosen in the Majorana representation, and its upper and lower spin components $\psi'$, $\psi'$ are transformed by left and right representations of the Lorentz group: + +$$ \Psi' = \begin{pmatrix} \psi'_{\sigma} \\ \psi'_{-\sigma} \end{pmatrix} = \begin{pmatrix} e^{\frac{i}{2}\vec{\sigma}\cdot\vec{n}}\psi_{\sigma} \\ e^{\frac{i}{2}(-\vec{\sigma})\cdot\vec{n}}\psi_{-\sigma} \end{pmatrix}. \quad (1) $$ + +The wave-function $\tilde{\chi}_{\vec{\sigma}}^{\dagger}(\vec{r}_A) |0, +\sigma\rangle$ of a particle (in our case of an electron-hole pair) located on the site A, behaves as a component $\psi_{\sigma}$, while the wave-function $\tilde{\chi}_{-\sigma}^{\dagger}(\vec{r}_B) |0, -\sigma\rangle$ of a particle located on the site B behaves as a component $\psi_{-\sigma}$ of the bispinor (1). + +Relativistic particles with non-zero spin possess the helicity $h$, which is the projection of the particle's spin to the direction of motion [32]: + +$$ h = \vec{p} \cdot \vec{S} = \frac{1}{2} p_i \begin{pmatrix} \sigma_i & 0 \\ 0 & \sigma_i \end{pmatrix}, \quad (2) $$ + +where $\vec{p}$ is the particle momentum, $\vec{S}$ is the spin operator for a particle, $\vec{\sigma}$ is the vector of the Pauli matrices $\sigma_i$, and $i = x, y$. In quantum relativistic field theory, the value of the helicity of a massless particle is preserved in the transition from one reference frame moving with the velocity $v_1$, to another one moving with the velocity $v_2$ [32,38]. + +Let us designate the two-dimensional spin of the quasi-particle in valleys $K_A$ and $K_B$ as $\vec{S}_{AB} = \hbar\vec{\sigma}_{AB}/2$ and $\vec{S}_{BA} = \hbar\vec{\sigma}_{BA}/2$, respectively. + +Let us introduce two-dimensional pseudospin $\vec{S}_{AB}$ and $\vec{S}_{BA}$ of quasi-particles in valleys $K_A$ and $K_B$ through the transformed vector $\vec{\sigma}$ of the Pauli matrices $\sigma_i$, $i = x, y$ as $\vec{S}_{AB} = \hbar\vec{\sigma}_{AB}/2$ and $\vec{S}_{BA} = \hbar\vec{\sigma}_{BA}/2$. The explicit form of this transformation is given in Section 3. + +A valley current $J_V^R$ or $J_V^L$, on the right or left closed contour $\{A \to B_R \to A_R \to B \to A_L \to B_L \to A\}$ or $\{A \to B_L \to A_L \to B \to A_R \to B_R \to A\}$, respectively, in Figure 1, is created by an electron (hole) with pseudo-angular momentum $\vec{l}_{AB_R}$ and momentum $\vec{p}_{AB_R}$ or by an electron (hole) with $\vec{l}_{AB_L}$ and +---PAGE_BREAK--- + +$\vec{p}_{AB_L}$. Pseudo-helicity of bispinors (1), describing the particles right or left the from lattice site A, is defined by the expressions, which are analogous to (2): + +$$h_{BR,A} = \vec{p}_{AB_R} \cdot \vec{S}_{BRA}, \quad (3)$$ + +$$h_{BL,A} = \vec{p}_{ABL} \cdot \vec{S}_{BLA}. \quad (4)$$ + +Let us use the parity operator $P$, which mirrors the bispinor (1) with respect to the line passing through the points A and B. Pseudo-helicity of the mirrored bispinor is defined by the expression: + +$$Ph_{BRAR}P = h_{ALBL} = \vec{p}_{BLAL} \cdot \vec{S}_{ALBL}. \quad (5)$$ + +Pseudo-helicity $h_{AB}$ does not change its value while the valley momentum and the pseudo-spin change signs: $\vec{p}_{ALBL} = -\vec{p}_{BRA_R}$ and $\vec{S}_{ALBL} = -\vec{S}_{BRA_R}$. + +The pseudo-helicity $h_{AB}$ is expressed through the projection $\tilde{\mathcal{M}}_{AB} = \vec{\sigma}_{BA} \cdot (\vec{l}_{AB} + \hbar\vec{\sigma}_{BA}/2)$ of the total angular momentum on the direction of the spin $\vec{\sigma}_{BA}$ as [39,40]: + +$$\vec{\sigma}_{BA} \cdot \vec{p}_{AB} = \sigma^r_{BA} \left( p_{r,BA} + i \frac{\tilde{\mathcal{M}}_{AB}}{r} - \hbar/2 \right) = \sigma^r_{BA} \left( p_{r,BA} + i \frac{\vec{\sigma}_{BA} \cdot \vec{l}_{AB}}{r} \right), \quad (6)$$ + +where $\sigma^r_{BA}$ and $p_{r,BA}$ are radial components of the spin and the momentum, respectively. According to Equation (6), the pseudo-spin-orbit scalar $\vec{\sigma}_{BA} \cdot \vec{l}_{AB}$ describes the coupling (interaction) of the spin with the valley currents flowing along a closed loop clockwise or in opposite directions, as is shown in Figure 1a. Hence, there exists a preferred direction along which the spin projection of the bispinor (1) is not changed after transition from one moving reference frame into another. At this, the spin of a particle precesses. Transformation of the electron and hole into each other in an exciton is a pseudo-precession. + +As a result, the coupling of pseudo-spin and valley currents stipulates the spin precession of exciton charged carriers in graphene. In our model, the orientation of non-equilibrium spin of the states of monolayer graphene in electromagnetic fields may be retained for a long time due to prohibition of change for exciton pseudo-helicity. Pseudo-precession is possible, if spins of p_z-electrons are anti-ordered (pseudo-antiferromagnetic ordering). Therefore, the pseudo-spin precession of the exciton can be implemented through the exchange interaction. Furthermore, we determine the operators $\vec{\sigma}_{BA(AB)}$, $\vec{p}_{AB(BA)}$ and describe the effects of pseudo-spin and valley current coupling. + +### 3. Effects of Coupling between Pseudo-Spin and Valley Current + +In quasi-relativistic approximation ($c^{-1}$ expansion), the eigenproblem for the equation of motion of the secondary quantized field $\hat{\chi}_{-\sigma_A}^\dagger$ in the model shown in Figure 1a has the form: [35–37] + +$$\left\{ \vec{\sigma} \cdot \vec{p} \, \hat{\sigma}_F^{qu} - \frac{1}{c} (i\Sigma_{rel}^x)_{AB} (i\Sigma_{rel}^x)_{BA} \right\} \hat{\chi}_{-\sigma_A}^\dagger (\vec{r}) |0, -\sigma\rangle \\ = E_{qu}(p) \hat{\chi}_{-\sigma_A}^\dagger (\vec{r}) |0, -\sigma\rangle, \quad (7)$$ + +where the Fermi velocity operator $\hat{\sigma}_F^{qu}$ is defined as + +$$\hat{\sigma}_F^{qu} = [(\Sigma_{rel}^x)_{BA} + c\hbar\vec{\sigma} \cdot (\vec{K}_A + \vec{K}_B)] .$$ +---PAGE_BREAK--- + +($\Sigma_{rel}^{x}$)$_{BA}$, ($\Sigma_{rel}^{x}$)$_{AB}$ are determined through an ordinary exchange interaction contribution, +for example [39,40]: + +$$ +\begin{align*} +(\Sigma_{rel}^{x})_{AB} \hat{\chi}_{\sigma_B}^{\dagger}(\vec{r}) |0, \sigma\rangle &= \sum_{i=1}^{N_v N} \int d\vec{r}_i \hat{\chi}_{\sigma_i B}^{\dagger}(\vec{r}) |0, \sigma\rangle \\ +&\quad \times \langle 0, -\sigma_i | \hat{\chi}_{-\sigma_i A}^{\dagger}(\vec{r}_i) V(\vec{r}_i - \vec{r}) \hat{\chi}_{-\sigma_B}(\vec{r}_i) |0, -\sigma_i'\rangle. +\end{align*} +$$ + +$V(\vec{r}_i - \vec{r})$ is the Coulomb interaction between two valent electrons with radius-vectors $\vec{r}_i$ and $\vec{r}$; $N$ is a total number of atoms in the system, $N_v$ is a number of valent electrons in an atom, $c$ is the speed of light. + +After applying the non-unitary transformation to the wave function in the form + +$$ +\tilde{\chi}_{-\sigma_A}^{\uparrow} |0, -\sigma\rangle = (\Sigma_{rel}^{x})_{BA} \tilde{\chi}_{-\sigma_A}^{\uparrow} |0, -\sigma\rangle, +$$ + +we obtain (neglecting mixing of the states for the Dirac points) the equation that is similar to the one +in 2D quantum field theory (QFT) [41–43], but it describes the motion of a particle with pseudo-spin +$\vec{S}_{AB} = \hbar\vec{\sigma}_{AB}/2$: + +$$ +\{\vec{\sigma}_{2D}^{AB} \cdot \vec{p}_{BA} - c^{-1}\Sigma_{BA}\tilde{\Sigma}_{AB}\} \tilde{\chi}_{-\sigma_A}^{\uparrow}(\vec{r}) |0, -\sigma\rangle = \tilde{E}_{qu}(p) \tilde{\chi}_{-\sigma_A}^{\uparrow}(\vec{r}) |0, -\sigma\rangle , \quad (8) +$$ + +with a transformed 2D vector $\vec{\sigma}_{2D}^{AB}$ of the Pauli matrices, which are determined as +$\vec{\sigma}_{2D}^{AB} = (\Sigma_{rel}^{x})_{BA} \vec{\sigma} \cdot (\Sigma_{rel}^{x})_{BA}^{-1}$. The following notions are introduced: $\vec{p}_{BA}\tilde{\chi}_{-\sigma_A}^{\uparrow} = (\Sigma_{rel}^{x})_{BA} \vec{p} \cdot (\Sigma_{rel}^{x})_{BA}^{-1}\tilde{\chi}_{-\sigma_A}^{\uparrow} = [(\Sigma_{rel}^{x})_{BA}\vec{p}] \tilde{\chi}_{-\sigma_A}^{\uparrow}, \tilde{E}_{qu} = E_{qu}/\hat{v}_{F}^{BA}, \hat{v}_{F}^{BA} = (\Sigma_{rel}^{x})_{BA}, \tilde{\Sigma}_{BA}\tilde{\Sigma}_{AB} = (\Sigma_{rel}^{x})_{BA}(i\Sigma_{rel}^{x})_{AB}(i\Sigma_{rel}^{x})_{BA}(\Sigma_{rel}^{x})_{BA}^{-1} = (i\Sigma_{rel}^{x})_{BA}(i\Sigma_{rel}^{x})_{AB}$; and the product of two capital sigma, as one sees from the last chain of formulas, behaves like a scalar mass term. + +Further simulations are performed in nearest neighbor tight-binding approximation [44,45]. +This approximation correctly predicts the graphene band structure in the energy range ±1 eV [46]. +This turns out to be sufficient for our purposes. We use the expressions for the exchange between +$\pi(p_z)$-electrons only. One can find the explicit form of these expressions in [4]. + +The action of the matrices ($\Sigma_{rel}^x$)$_{BA}$, ($\Sigma_{rel}^x$)$_{AB}$ in the momentum space is shown in Figure 1b. +As ($\Sigma_{rel}^x$)$_{BA}$ $\neq$ ($\Sigma_{rel}^x$)$_{AB}$, the vector $\vec{p}_{BA}$ is rotated with respect to $\vec{p}_{AB}$ and stretched. According to +Figure 1b, ellipses in momentum spaces of electrons and holes are rotated 90° with respect to each +other. With an account of the hexagonal symmetry of the system, the last explains the experimentally +observed rotation in 30° of the hexagonal Brillouin zone of PtCoO$_2$ [15]. + +Thus, the sequence of exchange interactions $(\Sigma_{rel}^x)_{AB}$ $(\Sigma_{rel}^x)_{BA}$ $(\Sigma_{rel}^x)_{AB}$ for valley currents makes +rotation initially of the electron Brillouin zone and Dirac band into the hole Brillouin zone and +Dirac band, and then vice-versa. Thus, the exchange $(\Sigma_{rel}^x)_{AB(AB)} \equiv \Sigma_{AB(BA)}$ changes the sublattices +wave functions: + +$$ +|\psi_{AB}\rangle = \Sigma_{AB} |\psi_{BA}^*\rangle. +$$ + +Owing to it and neglecting a very small mass term $c^{-1}\Sigma_{BA}\tilde{\Sigma}_{AB}$, the equation in which the operator of the Fermi velocity enters, can be rewritten as follows: + +$$ +\vec{\sigma}_{2D}^{BA} \cdot \vec{p}_{AB} |\psi_{AB}\rangle = E_{qu} |\psi_{BA}^*\rangle . \qquad (9) +$$ + +Taking into account that $E \to i\frac{\partial}{\partial t}$ and $\vec{p} = -i\vec{\nabla}$, we transform the system of equations for the Majorana bispinor ($\psi_{AB}^\dagger$, ($\psi_{BA}^\dagger$)$^\dagger$: +---PAGE_BREAK--- + +$$ \vec{\sigma}_{2D}^{BA} \cdot \vec{p}_{AB} |\psi_{AB}\rangle = i \frac{\partial}{\partial t} |\psi_{BA}^*\rangle, \quad (10) $$ + +$$ \vec{\sigma}_{2D}^{AB} \cdot \vec{p}_{BA}^* |\psi_{BA}^*\rangle = -i \frac{\partial}{\partial t} |\psi_{AB}\rangle, \quad (11) $$ + +into the wave equation of the form: + +$$ (\vec{\sigma}_{2D}^{AB} \cdot \vec{p}_{BA}^*)(\vec{\sigma}_{2D}^{BA} \cdot \vec{p}_{AB}) |\psi_{AB}\rangle = \frac{\partial^2}{\partial t^2} |\psi_{AB}\rangle. \quad (12) $$ + +Equation (12) describes an oscillator with the energy operator $\hat{\omega}(\vec{p})$ + +$$ \hat{\omega}(\vec{p}) = \frac{1}{\sqrt{2}} [(\vec{\sigma}_{2D}^{AB} \cdot \vec{p}_{BA})(\vec{\sigma}_{2D}^{BA} \cdot \vec{p}_{AB}) + (\vec{\sigma}_{2D}^{BA} \cdot \vec{p}_{AB})(\vec{\sigma}_{2D}^{AB} \cdot \vec{p}_{BA})]^{1/2}. \quad (13) $$ + +Now, one can really see that the obtained equation is the equation of motion for a Majorana bispinor wave function of the semimetal charged carriers. + +Thus, the Fermi velocity becomes an operator within this approach, and elementary excitations are fermionic excitations described by the massless Majorana-like equation rather than Dirac-like one. + +**4. Harmonic Analysis of the Problem** + +Equation (13) can be rewritten in the following form: + +$$ \hat{\omega}^2(\vec{p}) = \frac{1}{2} (\hat{H}_{AB}\hat{H}_{BA} + \hat{H}_{BA}\hat{H}_{AB}). \quad (14) $$ + +In order to describe the proposed secondary quantized field by a set of harmonic oscillators, it is necessary to show that the squared Equation (14), obtained by the symmetrization of the product of the Hamiltonians $\hat{H}_{AB}$ and $\hat{H}_{BA}$, is the Klein-Gordon-Fock operator. This will be the case if the non-diagonal matrix elements of the operator vanish identically, and therefore the components of the equation are independent. Then, $\hat{\omega}^2(\vec{p})$ can be considered as a "square of energy operator". + +Unfortunately, because of the complex form of the exchange operator, the statement is difficult to prove in the general case. Therefore, we do this for several approximations of the exchange interaction and demonstrate that the Equation (14) is a Klein-Gordon-Fock one. + +As a first particular case, when the proposed Majorana-like field is proven to be a harmonic oscillators set, we consider $\epsilon$-neighborhood ($\epsilon \to 0$) of the Dirac point $K_A(K_B)$. + +Let us designate the momentum of a particle in a valley as $\vec{q}$. The momentum $\vec{q}$ is determined as $\vec{q} = \vec{p} - \hbar\vec{K}_A$. In the case of very small values of $\vec{q}, q \to 0$ the exchange operator $\Sigma_{AB(BA)}$ is approximated by a power series expansion up to the fourth order in $q$. Then, an analytical calculation of non-diagonal elements of the operator $\hat{\omega}^2(\vec{p})$ performed in the Mathematica system proves that they are identically zero. + +Band structures for monolayer graphene and monolayer of atoms of Pb are shown in Figure 2a,b. One can see that the Weyl nodes in graphene are located far enough from the Dirac point. The Weyl nodes are shifted to the Dirac point for the Pb-monolayer. Therefore, Weyl-like character in the behavior of charged carriers may be exhibited for the Pb-monolayer under the condition that the contributions up to 4-th order in $q$ are prevailing in the exchange. In accordance with Figure 1b, the exchange operator matrices transform a circumference in the momentum space into a highly stretched ellipse that allows us to assume the presence of nematicity in the model. + +For a given $\vec{q}$, where the eigenfunction of Equation (9) represents 2D spinor $\Psi$, we choose its normalization in the form $\Psi(\vec{q}) = (\psi(\vec{q}), 1)^\dagger$ with lower component equal to unity. Then, as it can be easily shown for the massless Dirac pseudo-fermion model [47], the absolute value of the upper component $|\psi(\vec{q})|$ does not depend upon the wave vector $\vec{q}$, demonstrating the equivalence of all +---PAGE_BREAK--- + +directions in $\vec{q}$ space. We construct $|\psi(\vec{q})|^2$ for Equation (9) in $q^4$-approximation for the exchange. The results are shown in Figure 2c. The isotropy of $|\psi(\vec{q})|^2$ is broken for our model due to the appearance of the preferable directions in the momentum space. + +As one can see from Figure 2c, the existence of almost one-dimensional regions with sharp jump in $|\psi(\vec{q})|^2$ should probably lead to some anisotropy already in the configuration space for the carriers that we consider as manifestation of nematicity. + +The approximation $q^4$ for the exchange operator expression presents a particular interest for systems with strong damping of quasi-particle excitations. + +**Figure 2.** A splitting of Dirac cone replicas: for graphene (a) and Pb monolayer (b). One of the six pairs of Weyl-like nodes: source and sink are indicated; (c) the square of the absolute value of the upper spinor component $|\psi|^2$ of $\vec{q}$-eigenstate in the 2D semimetal model. $\vec{q} = \vec{p} - \vec{K}_A$. (in color) + +The second approximation of the exchange, for which we can prove the harmonic origin of the proposed Majorana-like field, is the model exchange with full exponential factors taken into account, but with the phase-difference between $\pi(p_z)$-electrons wavefunction chosen to be identically zero (see Ref. [4] for detail). Numeric simulation of $\omega^2(\vec{p})$ with this model exchange has been performed on a discrete lattice in the Brillouin zone. It has been demonstrated that the operator $\omega^2(\vec{p})$ is always diagonal in this case. + +Now, we perform the simulations with the exact expression for the exchange term. + +In this general case, the exchange between $\pi(p_z)$-electron and its three nearest $\pi(p_z)$-electrons has been calculated based on the method proposed in [4]. Band structure of the 2D semimetal has the form of a degenerated Dirac cone in the neighborhood of the Dirac point. Then, the emergence of unfolding leads to replica appearance, and further splitting of these replicas gives the octagonal symmetry of the problem, as one can see in Figure 3. Hyperbolic points (saddle points) are located between nodes and at the apex of the Dirac cone (Van-Hove singularities) as one can see in Figure 2a,b [3,48–50]. Therefore, a fractal-like set of Fermi arcs which are shown in Figure 4, is formed in the absence of damping in the system. Contrary to the graphene case, the splitting of the Dirac bands for the Pb-monolayer occurs at sufficiently small $q$, and therefore, can be observed experimentally. In addition, for the Pb-monolayer, there exist regions with huge numbers of Fermi arcs, and, respectively, regions with strong fluctuations of antiferromagnetic ordering. + +Thus, the secondary quantized field described by Equation (9) represents a field in which quanta manifest themselves as Dirac pseudo-fermions in the apex of the Dirac cone and as Weyl-like particles for sufficiently large $q$ at the presence of the dumping in the system. For an ideal system ($\Im m \epsilon(\vec{q}) = 0$), such a behavior is similar to that of the mathematical pendulum in the vicinity of the separatrix [51,52]. +---PAGE_BREAK--- + +**Figure 3.** A band structure in the graphene model with partial unfolding of Dirac cone: real (a) and imaginary (b) parts of $\epsilon(\vec{q})$; range of high momenta. $\vec{q} = \vec{p} - \vec{K}_A$ (in color). + +**Figure 4.** Density of Fermi arcs sets in graphene (a) and Pb-monolayer bands for values of momentum $q$ in the range $0 \ge q/|\vec{K}_A| \le 10^{-4}$, $\vec{q} = \vec{p} - \vec{K}_A$. + +## 5. Discussion + +Discussing the obtained results, we have to point out, firstly, that the excitations of the constructed secondary-quantized pseudo-fermionic field are Majorana-like massless quasiparticles. + +The set of Fermi arcs in our model shows that the splitting of Dirac replicas on a huge number of Weyl-like states occurs in the momentum space except for the Dirac cone apex. + +In contrast to known massless Dirac and Weyl models, in the proposed model, there is a partial removing of the degeneracy of the Dirac cone, and the octagonal symmetry of the bands emerges for sufficiently large $q$. Thus, Majorana particles in our model can be represented as a wave package of infinitely large number of Weyl-like states. + +Secondly, the Dirac cone for the proposed 2D-semimetal model is degenerated in a very small neighborhood of the Dirac point $K_A(K_B)$ at $q \to 0$. + +Thirdly, the first-approximation with damping demonstrates that sufficiently strong decay leads to diminishing the number of the Weyl states and formation of bands having hexagonal symmetry. In accordance with the obtained results, in the system with strong damping, only six pairs of Weyl nodes survive. In this case, each Dirac hole (electron) cone is surrounded by three electron (hole) bands relating to three Weyl pairs. Provided the lifetime of the Weyl-like states is sufficiently large (small but finite damping) to preserve the octagonal symmetry of the bands, each Dirac hole (electron) cone will be surrounded by four electron (hole) bands relating to four Weyl pairs. + +Important features of the proposed model are that the fractal set of Fermi arches manifests pseudospin fluctuations and the phenomenon of nematicity is possible. +---PAGE_BREAK--- + +**6. Conclusions** + +In conclusion, contrary to known Dirac and Weyl models, the constructed 2D-semimetal model allows for description, in a general formalism, the band structure of a wide class of existing strongly. + +**Acknowledgments:** This work has been supported in part by Research grant No. 2.1.01.1 within the Basic Research Program "Microcosm and Universe" of the Republic of Belarus. + +**Author Contributions:** Both authors equally contributed to this work. + +**Conflicts of Interest:** The authors declare no conflict of interest. + +**References** + +1. Grushevskaya, H.V.; Hurski, L.I. Coherent charge transport in strongly correlated electron systems: Negatively charged exciton. *Quantum Matter* **2015**, *4*, 384–386. + +2. Fefferman, C.L.; Weinstein, M.I. Honeycomb lattice potentials and Dirac points. *J. Am. Math. Soc.* **2012**, *25*, 1169–1220. + +3. Grushevskaya, H.V.; Krylov, G. Quantum field theory of graphene with dynamical partial symmetry breaking. *J. Mod. Phys.* **2014**, *5*, 984–994. + +4. Grushevskaya, H.V.; Krylov, G. Semimetals with Fermi Velocity Affected by Exchange Interactions: Two Dimensional Majorana Charge Carriers. *J. Nonlinear Phenom. Complex Syst.* **2015**, *18*, 266–283. + +5. Semenoff, G.W.; Sodano, P. Stretched quantum states emerging from a Majorana medium. *J. Phys. B: At. Mol. Opt. Phys.* **2007**, *40*, 1479–1488. + +6. Nadj-Perge, S.; Drozdov, I.K.; Li, J.; Chen, H.; Jeon, S.; Seo, J.; MacDonald, A.H.; Bernevig, A.; Yazdani, A. Observation of Majorana fermions in ferromagnetic atomic chains on a superconductor. *Science* **2014**, *346*, 602–607. + +7. Gerber, S.; Bartkowiak, M.; Gavilano, J.L.; Ressouche, E.; Egetenmeyer, N.; Niedermayer, C.; Bianchi, A.D.; Movshovich, R.; Bauer, E.D.; Thompson, J.D.; et al. Switching of magnetic domains reveals spatially inhomogeneous superconductivity. *Nat. Phys.* **2014**, *10*, 126–129. + +8. Shimojima, T.; Sakaguchi, F.; Ishizaka, K.; Ishida, Y.; Kiss, T.; Okawa, M.; Togashi, T.; Chen, C.-T.; Watanabe, S.; Arita, M.; et al. Orbital-independent superconducting gaps in iron-pnictides. *Science* **2011**, *332*, 564–567. + +9. Davis, J.C.S.; Lee, D.-H. Concepts relating magnetic interactions, intertwined electronic orders, and strongly correlated superconductivity. *Proc. Natl. Acad. Sci. USA* **2013**, *110*, 17623–17630. + +10. Borisenko, S.V.; Evtushinsky, D.V.; Liu, Z.-H.; Morozov, I.; Kappenberger, R.; Wurmehl, S.; Büchner, B.; Yaresko, A.N.; Kim, T.K.; Hoesch, M.; et al. Direct observation of spin-orbit coupling in iron-based superconductors. *Nat. Phys.* **2015**, doi:10.1038/nphys3594. + +11. Hurski, L.I.; Grushevskaya, H.V.; Kalanda, N.A. Non-adiabatic paramagnetic model of pseudo-gap state in high-temperature cuprate superconductors. *Dokl. Nat. Acad. Sci. Belarus* **2010**, *54*, 55–62. (In Russian) + +12. Diop, L.V.B.; Isnard, O.; Rodriguez-Carvajal, J. Ultrasharp magnetization steps in the antiferromagnetic itinerant-electron system $LaFe_{12}B_6$. *Phys. Rev.* **2016**, *B93*, 014440. + +13. Kasahara, S.; Shi, H.J.; Hashimoto, K.; Tonegawa, S.; Mizukami, Y.; Shibauchi, T.; Sugimoto, K.; Fukuda, T.; Terashima, T.; Nevidomskyy, A.H.; et al. Electronic nematicity above the structural and superconducting transition in $BaFe_2(As_{1-x}P_x)_2$. *Nature* **2012**, *486*, 382–385. + +14. Wang, Q.; Shen, Y.; Pan, B.; Hao, Y.; Ma, M.; Zhou, F.; Steffens, P.; Schmalzl, K.; Forrest, T.R.; Abdel-Hafiez, M.; et al. Strong interplay between stripe spin fluctuations, nematicity and superconductivity in FeSe. *Nat. Mater.* **2016**, *15*, 159–163. + +15. Kushwaha, P.; Sunko, V.; Moll, Ph.J.W.; Bawden, L.; Riley, J.M.; Nandi, N.; Rosner, H.; Schmidt, M.P.; Arnold, F.; Hassinger, E.; et al. Nearly free electrons in a 5d delafossite oxide metal. *Sci. Adv.* **2015**, *e1500692*. + +16. Lv, M.; Zhang, S.-C. Dielectric function, Friedel oscillation and plasmons in Weyl semimetals. *Int. J. Mod. Phys.* **B** **2013**, *27*, 1350177. + +17. Xu, S.-Y.; Belopolski, I.; Alidoust, N.; Neupane, M.; Bian, G.; Zhang, C.; Sankar, R.; Chang, G.; Yuan, Z.; Lee, C.-C.; et al. Discovery of a Weyl Fermion semimetal and topological Fermi arcs. *Science* **2015**, *349*, 613–617. +---PAGE_BREAK--- + +18. Lv, B.Q.; Xu, N.; Weng, H.M.; Ma, J.Z.; Richard, P.; Huang, X.C.; Zhao, L.X.; Chen, G.F.; Matt, C.E.; Bisti, F.; et al. Observation of Weyl nodes in TaAs. *Nat. Phys.* **2015**, *11*, 724–727. + +19. Huang, S.-M.; Xu, S.-Y.; Belopolski, I.; Lee, C.-C.; Chang, G.; Wang, B.K.; Alidoust, N.; Bian, G.; Neupane, M.; Zhang, C.; et al. A Weyl Fermion semimetal with surface Fermi arcs in the transition metal monopnictide TaAs class. *Nat. Commun.* **2015**, *6*, 7373. + +20. Tan, B.S.; Hsu, Y.-T.; Zeng, B.; Ciomaga Hatnean, M.; Harrison, N.; Zhu, Z.; Hartstein, M.; Kiourlappou, M.; Srivastava, A.; Johannes, M.D.; et al. Unconventional Fermi surface in an insulating state. *Science* **2015**, *349*, 287–290. + +21. Falkovsky, L.A. Optical properties of graphene and IV-VI semiconductors. *Phys.-Uspekhi* **2008**, *51*, 887–897. + +22. Novoselov, K.S.; Jiang, D.; Schedin, F.; Booth, T.J.; Khotkevich, V.V.; Morozov, S.V.; Geim, A.K. Two-dimensional atomic crystals. *Proc. Natl. Acad. Sci. USA* **2005**, *102*, 10451–10453. + +23. Keldysh, L.V. Coulomb interaction in thin semiconductor and semimetal films. *Lett. J. Exper. Theor. Phys.* **1979**, *29*, 716–719. + +24. Dora, B.; Gulacsi, M.; Sodano, P. Majorana zero modes in graphene with trigonal warping. *Phys. Status Solidi RRL* **2009**, *3*, 169–171. + +25. Elias, D.C.; Gorbachev, R.V.; Mayorov, A.S.; Morozov, S.V.; Zhukov, A.A.; Blake, P.; Ponomarenko, L.A.; Grigorieva, I.V.; Novoselov, K.S.; Guinea, F.; et al. Dirac cones reshaped by interaction effects in suspended graphene. *Nat. Phys.* **2012**, *8*, 172. + +26. Du, X.; Skachko, I.; Barker, A.; Andrei, E.Y. Approaching ballistic transport in suspended graphene. *Nat. Nanotechnol.* **2008**, *3*, 491–495. + +27. Cooper, D.R.; D'Anjou, B.; Ghattamaneni, N.A.; Harack, B.; Hilke, M.; Horth, A.; Majlis, N.; Massicotte, M.; Vandsburger, L.; Whiteway, E.; et al. Experimental Review of Graphene. *ISRN Condensed Matter Phys.* **2012**, 2012, Article ID 501686. + +28. San-Jose, P.; Lado, J. L.; Aguado, R.; Guinea, F.; Fernandez-Rossier, J. Majorana Zero Modes in Graphene. *Phys. Rev. X* **2015**, *5*, 041042. + +29. Wang, J.R.; Liu, G.Z. Eliashberg theory of excitonic insulating transition in graphene. *J. Phys. Condensed Matter* **2011**, *23*, 155602. + +30. Pessa, E. The Majorana Oscillator. *Electr. J. Theor. Phys.* **2006**, *3*, 285–292. + +31. Majorana, E. Theory of Relativistic Particles with Arbitrary Intrinsic Moment. *Nuovo Cimento* **1932**, *9*, 335. + +32. Peskin, M.E.; Schroeder, D.V. *An Introduction to Quantum Field Theory*; Addison-Wesley Publishing Company: Oxford, UK, 1995. + +33. Simpao, V.A. Exact Solution of Majorana Equation via Heaviside Operational Ansatz. *Electr. J. Theor. Phys.* **2006**, *3*, 239–247. + +34. Hainzl, C.; Lewin, M.; Sparber, C. Ground state properties of graphene in Hartree-Fock theory. *J. Math. Phys.* **2012**, *53*, 095220. + +35. Grushevskaya, H.V.; Krylov, G.G. Charge Carriers Asymmetry and Energy Minigaps in Monolayer Graphene: Dirac-Hartree-Fock approach. *Int. J. Nonliner Phenom. Complex Syst.* **2013**, *16*, 189–208. + +36. Grushevskaya, H.V.; Krylov, G.G. Nanotechnology in the Security Systems, NATO Science for Peace and Security Series C: Environmental Security; Bonča, J., Kruchinin, S., Eds.; Springer: Dordrecht, The Netherlands, 2015; Chapter 3. + +37. Grushevskaya, H.V.; Krylov, G.G. Electronic Structure and Transport in Graphene: QuasiRelativistic Dirac-Hartree-Fock Self-Consistent Field Approximation. In *Graphene Science Handbook*. Vol. 3: Electrical and Optical Properties; Aliofkhazraei, M., Ali, N., Milne, W.I., Ozkan, C.S., Mitura, S., Gervasoni, J.L., Eds.; CRC Press—Taylor&Francis Group: Boca Raton, FL, USA, 2016. + +38. Gribov, V.N. *Quantum Electrodynamics*; R & C Dynamics: Izhevsk, Russia, 2001. (In Russian) + +39. Fock, V.A. *Principles of Quantum Mechanics*; Science: Moscow, Russia, 1976. (In Russian) + +40. Krylova, H.; Hursky, L. Spin Polarization in Strong-Correlated Nanosystems; LAP LAMBERT Academic Publishing, AV Akademikerverlag GmbH & Co.: Saarbrüken, Germany, 2013. + +41. Semenoff, G.W. Condensed-matter simulation of a three-dimensional anomaly. *Phys. Rev. Lett.* **1984**, *53*, 2449. + +42. Abergel, D.S.L.; Apalkov, V.; Berashevich, J.; Ziegler, K.; Chakraborty, T. Properties of graphene: A theoretical perspective. *Adv. Phys.* **2010**, *59*, 261. + +43. Gusynin, V.P.; Sharapov, S.G.; Carbotte, J.P. AC Conductivity of Graphene: From Tight-binding model to 2 + 1-dimensional quantum electrodynamics. *Int. J. Mod. Phys. B* **2007**, *21*, 4611. +---PAGE_BREAK--- + +44. Wallace, P.R. The band theory of graphite. *Phys. Rev.* **1971**, *71*, 622-634. + +45. Saito, R.; Dresselhaus, G.; Dresselhaus, M.S. *Physical Properties of Carbon Nanotubes*; Imperial: London, UK, 1998. + +46. Reich, S.; Maultzsch, J.; Thomsen, C.; Ordejón, P. Tight-binding description of graphene. *Phys. Rev. B* **2002**, *66*, 035412. + +47. Castro Neto, A.H.; Guinea, F.; Peres, N.M.; Novoselov, K.S.; Geim, A.K. The electronic properties of graphene. *Rev. Mod. Phys.* **2009**, *81*, 109. + +48. Brihuega, I.; Mallet, P.; González-Herrero, H.; Trambly de Laissardière, G.; Ugeda, M.M.; Magaud, L.; Gomez-Rodríguez, J.M.; Ynduráin, F.; Veuillen, J.-Y. Unraveling the Intrinsic and Robust Nature of van Hove Singularities in Twisted Bilayer Graphene by Scanning Tunneling Microscopy and Theoretical Analysis. *Phys. Rev. Lett.* **2012**, *109*, 196802; Erratum in *2012*, *109*, 209905. + +49. Andrei, E.Y.; Li, G.; Du, X. Electronic properties of graphene: A perspective from scanning tunneling microscopy and magnetotransport. *Rep. Prog. Phys.* **2012**, *75*, 056501. + +50. Grushevskaya, H.V.; Krylov, G.; Gaisyonok, V.A.; Serow, D.V. Symmetry of Model N = 3 for Graphene with Charged Pseudo-Excitons. *J. Nonliner Phenom. Complex Sys.* **2015**, *18*, 81-98. + +51. Zaslavsky, G. M.; Sagdeev, R.Z.; Usikov, D.A.; Chernikov, A.A. *Weak Chaos and Quasi-Regular Patterns*; Cambridge University Press: New York, NY, USA, 1991. + +52. Guckenheimer, J.; Holmes, P. *Nonlinear Oscillations, Dynamical Systems, and Bifurcations of Vector Fields*; Springer-Verlag: New York, NY, USA, 1990; Volume 42. + +© 2016 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Chapter 3: +Papers Published by This Issue Editor in +Symmetry +---PAGE_BREAK--- + + +---PAGE_BREAK--- + +Article + +Lorentz Harmonics, Squeeze Harmonics and Their +Physical Applications + +Young S. Kim ¹,* and Marilyn E. Noz ² + +¹ Center for Fundamental Physics, University of Maryland, College Park, MD 20742, USA + +² Department of Radiology, New York University, New York, NY 10016, USA + +* E-Mail: yskim@umd.edu; Tel.: 301-405-6024. + +Received: 6 January 2011; in revised form: 7 February 2011 / Accepted: 11 February 2011 / +Published: 14 February 2011 + +**Abstract:** Among the symmetries in physics, the rotation symmetry is most familiar to us. It is known that the spherical harmonics serve useful purposes when the world is rotated. Squeeze transformations are also becoming more prominent in physics, particularly in optical sciences and in high-energy physics. As can be seen from Dirac's light-cone coordinate system, Lorentz boosts are squeeze transformations. Thus the squeeze transformation is one of the fundamental transformations in Einstein's Lorentz-covariant world. It is possible to define a complete set of orthonormal functions defined for one Lorentz frame. It is shown that the same set can be used for other Lorentz frames. Transformation properties are discussed. Physical applications are discussed in both optics and high-energy physics. It is shown that the Lorentz harmonics provide the mathematical basis for squeezed states of light. It is shown also that the same set of harmonics can be used for understanding Lorentz-boosted hadrons in high-energy physics. It is thus possible to transmit physics from one branch of physics to the other branch using the mathematical basis common to them. + +**Keywords:** Lorentz harmonics; relativistic quantum mechanics; squeeze transformation; Dirac's efforts; hidden variables; Lorentz-covariant bound states; squeezed states of light + +Classification: PACS 03.65.Ge, 03.65.Pm + +# 1. Introduction + +In this paper, we are concerned with symmetry transformations in two dimensions, and we are accustomed to the coordinate system specified by x and y variables. On the xy plane, we know how to make rotations and translations. The rotation in the xy plane is performed by the matrix algebra + +$$ \begin{pmatrix} x' \\ y' \end{pmatrix} = \begin{pmatrix} \cos \theta & -\sin \theta \\ \sin \theta & \cos \theta \end{pmatrix} \begin{pmatrix} x \\ y \end{pmatrix} \qquad (1) $$ + +but we are not yet familiar with + +$$ \begin{pmatrix} z' \\ t' \end{pmatrix} = \begin{pmatrix} \cosh \eta & \sinh \eta \\ \sinh \eta & \cosh \eta \end{pmatrix} \begin{pmatrix} z \\ t \end{pmatrix} \qquad (2) $$ +---PAGE_BREAK--- + +We see this form when we learn Lorentz transformations, but there is a tendency in the literature to avoid this form, especially in high-energy physics. Since this transformation can also be written as + +$$ \begin{pmatrix} u' \\ v' \end{pmatrix} = \begin{pmatrix} \exp(\eta) & 0 \\ 0 & \exp(-\eta) \end{pmatrix} \begin{pmatrix} u \\ v \end{pmatrix} \qquad (3) $$ + +with + +$$ u = \frac{z+t}{\sqrt{2}}, \quad v = \frac{z-t}{\sqrt{2}} \qquad (4) $$ + +where the variables *u* and *v* are expanded and contracted respectively, we call Equation (2) or Equation (3) **squeeze transformations** [1]. + +From the mathematical point of view, the symplectic group $Sp(2)$ contains both the rotation and squeeze transformations of Equations (1) and (2), and its mathematical properties have been extensively discussed in the literature [1,2]. This group has been shown to be one of the essential tools in quantum optics. From the mathematical point of view, the squeezed state in quantum optics is a harmonic oscillator representation of this $Sp(2)$ group [1]. + +We are interested in this paper in "squeeze transformations" of localized functions. We are quite familiar with the role of spherical harmonics in three dimensional rotations. We use there the same set of harmonics, but the rotated function has different linear combinations of those harmonics. Likewise, we are interested in a complete set of functions which will serve the same purpose for squeeze transformations. It will be shown that harmonic oscillator wave functions can serve the desired purpose. From the physical point of view, squeezed states define the squeeze or Lorentz harmonics. + +In 2003, Giedke et al. used the Gaussian function to discuss the entanglement problems in information theory [3]. This paper allows us to use the oscillator wave functions to address many interesting current issues in quantum optics and information theory. In 2005, the present authors noted that the formalism of Lorentz-covariant harmonic oscillators leads to a space-time entanglement [4]. We developed the oscillator formalism to deal with hadronic phenomena observed in high-energy laboratories [5]. It is remarkable that the mathematical formalism of Giedke et al. is identical with that of our oscillator formalism. + +While quantum optics or information theory is a relatively new branch of physics, the squeeze transformation has been the backbone of Einstein's special relativity. While Lorentz, Poincaré, and Einstein used the transformation of Equation (2) for Lorentz boosts, Dirac observed that the same equation can be written in the form of Equation (3) [6]. Unfortunately, this squeeze aspect of Lorentz boosts has not been fully addressed in high-energy physics dealing with particles moving with relativistic speeds. + +Thus, we can call the same set of functions "squeeze harmonics" and "Lorentz harmonics" in quantum optics and high-energy physics respectively. This allows us to translate the physics of quantum optics or information theory into that of high-energy physics. + +The physics of high-energy hadrons requires a Lorentz-covariant localized quantum system. This description requires one variable which is hidden in the present form of quantum mechanics. It is the time-separation variable between two constituent particles in a quantum bound system like the hydrogen atom, where the Bohr radius measures the separation between the proton and the electron. What happens to this quantity when the hydrogen atom is boosted and the time-separation variable starts playing its role? The Lorentz harmonics will allow us to address this question. + +In Section 2, it is noted that the Lorentz boost of localized wave functions can be described in terms of one-dimensional harmonic oscillators. Thus, those wave functions constitute the Lorentz harmonics. It is also noted that the Lorentz boost is a squeeze transformation. + +In Section 3, we examine Dirac's life-long efforts to make quantum mechanics consistent with special relativity, and present a Lorentz-covariant form of bound-state quantum mechanics. In Section 4, +---PAGE_BREAK--- + +we construct a set of Lorentz-covariant harmonic oscillator wave functions, and show that they can be +given a Lorentz-covariant probability interpretation. + +In Section 5, the formalism is shown to constitute a mathematical basis for squeezed states of light, and for quantum entangled states. In Section 6, this formalism can serve as the language for Feynman's rest of the universe [7]. Finally, in Section 7, we show that the harmonic oscillator formalism can be applied to high-energy hadronic physics, and what we observe there can be interpreted in terms of what we learn from quantum optics. + +## 2. Lorentz or Squeeze Harmonics + +Let us start with the two-dimensional plane. We are quite familiar with rigid transformations such as rotations and translations in two-dimensional space. Things are different for non-rigid transformations such as a circle becoming an ellipse. + +We start with the well-known one-dimensional harmonic oscillator eigenvalue equation + +$$ \frac{1}{2} \left[ -\left(\frac{\partial}{\partial x}\right)^2 + x^2 \right] \chi_n(x) = \left(n + \frac{1}{2}\right) \chi_n(x) \quad (5) $$ + +For a given value of integer $n$, the solution takes the form + +$$ \chi_n(x) = \left[ \frac{1}{\sqrt{\pi 2^n n!}} \right]^{1/2} H_n(x) \exp \left( -\frac{x^2}{2} \right) \quad (6) $$ + +where $H_n(x)$ is the Hermite polynomial of the n-th degree. We can then consider a set of functions with all integer values of $n$. They satisfy the orthogonality relation + +$$ \int \chi_n(x) \chi_{n'}(x) = \delta_{nn'} \quad (7) $$ + +This relation allows us to define $f(x)$ as + +$$ f(x) = \sum_{n} A_{n} \chi_{n}(x) \quad (8) $$ + +with + +$$ A_n = \int f(x)\chi_n(x)dx \quad (9) $$ + +Let us next consider another variable added to Equation (5), and the differential equation + +$$ \frac{1}{2} \left\{ \left[ -\left(\frac{\partial}{\partial x}\right)^2 + x^2 \right] + \left[ -\left(\frac{\partial}{\partial y}\right)^2 + y^2 \right] \right\} \phi(x,y) = \lambda \phi(x,y) \quad (10) $$ + +This equation can be re-arranged to + +$$ \frac{1}{2} \left\{ -\left(\frac{\partial}{\partial x}\right)^2 - \left(\frac{\partial}{\partial y}\right)^2 + x^2 + y^2 \right\} \phi(x,y) = \lambda \phi(x,y) \quad (11) $$ + +This differential equation is invariant under the rotation defined in Equation (1). In terms of the polar coordinate system with + +$$ r = \sqrt{x^2 + y^2}, \qquad \tan \theta = \left(\frac{y}{x}\right) \quad (12) $$ + +this equation can be written: + +$$ \frac{1}{2} \left\{ -\frac{\partial^2}{\partial r^2} - \frac{1}{r} \frac{\partial}{\partial r} - \frac{1}{r^2} \frac{\partial^2}{\partial \theta^2} + r^2 \right\} \phi(r, \theta) = \lambda \phi(r, \theta) \quad (13) $$ +---PAGE_BREAK--- + +and the solution takes the form + +$$ +\phi(r, \theta) = e^{-r^2/2} R_{n,m}(r) \{ A_m \cos(m\theta) + B_n \sin(m\theta) \} \quad (14) +$$ + +The radial equation should satisfy + +$$ +\frac{1}{2} \left\{ -\frac{\partial^2}{\partial r^2} - \frac{1}{r} \frac{\partial}{\partial r} + \frac{m^2}{r^2} + r^2 \right\} R_{n,m}(r) = (n+m+1)R_{n,m}(r) \quad (15) +$$ + +In the polar form of Equation (14), we can achieve the rotation of this function by changing the angle variable $\theta$. + +On the other hand, the differential equation of Equation (10) is separable in the x and y variables. +The eigen solution takes the form + +$$ +\Phi_{n_x, n_y}(x, y) = \chi_{n_x}(x) \chi_{n_y}(y) \tag{16} +$$ + +with + +$$ +\lambda = n_x + n_y + 1 +\quad +(17) +$$ + +If a function $f(x,y)$ is sufficiently localized around the origin, it can be expanded as + +$$ +f(x,y) = \sum_{n_x, n_y} A_{n_x, n_y} \chi_{n_x}(x) \chi_{n_y}(y) \qquad (18) +$$ + +with + +$$ +A_{n_x, n_y} = \int f(x,y)\chi_{n_x}(x)\chi_{n_y}(y) dx dy \quad (19) +$$ + +If we rotate $f(x,y)$ according to Equation (1), it becomes $f(x^*, y^*)$, with + +$$ +x^* = (\cos \theta)x - (\sin \theta)y, \quad y^* = (\sin \theta)x + (\cos \theta)y \tag{20} +$$ + +This rotated function can also be expanded in terms of $\chi_{n_x}(x)$ and $\chi_{n_y}(y)$: + +$$ +f(x^*, y^*) = \sum_{n_x, n_y} A_{n_x, n_y}^* \chi_{n_x}(x) \chi_{n_y}(y) \quad (21) +$$ + +with + +$$ +A_{n_x, n_y}^* = \int f(x^*, y^*) \chi_{n_x}(x) \chi_{n_y}(y) dx dy \quad (22) +$$ + +Next, let us consider the differential equation + +$$ +\frac{1}{2} \left\{ -\left(\frac{\partial}{\partial z}\right)^2 + \left(\frac{\partial}{\partial t}\right)^2 + z^2 - t^2 \right\} \psi(z,t) = \lambda \psi(z,t) \quad (23) +$$ + +Here we use the variables *z* and *t*, instead of *x* and *y*. Clearly, this equation can be also separated in the +*z* and *t* coordinates, and the eigen solution can be written as + +$$ +\psi_{nz,n_l}(z,t) = \chi_{nz}(z)\chi_{nl}(z,t) \tag{24} +$$ + +with + +$$ +\lambda = n_z - n_t. \tag{25} +$$ + +The oscillator equation is not invariant under coordinate rotations of the type given in Equation (1). +It is however invariant under the squeeze transformation given in Equation (2). +---PAGE_BREAK--- + +The differential equation of Equation (23) becomes + +$$ +\frac{1}{4} \left\{ -\frac{\partial}{\partial u} \frac{\partial}{\partial v} + uv \right\} \psi(u, v) = \lambda \psi(u, v) \quad (26) +$$ + +Both Equation (11) and Equation (23) are two-dimensional differential equations. They are +invariant under rotations and squeeze transformations respectively. They take convenient forms +in the polar and squeeze coordinate systems respectively as shown in Equation (13) and Equation (26). + +The solutions of the rotation-invariant equation are well known, but the solutions of the squeeze-invariant equation are still strange to the physics community. Fortunately, both equations are separable in the Cartesian coordinate system. This allows us to study the latter in terms of the familiar rotation-invariant equation. This means that if the solution is sufficiently localized in the z and t plane, it can be written as + +$$ +\psi(z, t) = \sum_{n_z, n_t} A_{n_z, n_t} \chi_{n_z}(z) \chi_{n_t}(t) \tag{27} +$$ + +with + +$$ +A_{n_z, n_t} = \int \psi(z,t) \chi_{n_z}(z) \chi_{n_t}(t) \, dz \, dt \quad (28) +$$ + +If we squeeze the coordinate according to Equation (2), + +$$ +\psi(z^*, t^*) = \sum_{n_z, n_t} A_{n_z, n_t}^* \chi_{n_z}(z) \chi_{n_t}(t) \quad (29) +$$ + +with + +$$ +A_{n_z, n_t}^* = \int \psi(z^*, t^*) \chi_{n_z}(z) \chi_{n_t}(t) \, dz \, dt \quad (30) +$$ + +Here again both the original and transformed wave functions are linear combinations of the wave +functions for the one-dimensional harmonic oscillator given in Equation (6). + +The wave functions for the one-dimensional oscillator are well known, and they play important +roles in many branches of physics. It is gratifying to note that they could play an essential role in +squeeze transformations and Lorentz boosts, see Table (1). We choose to call them Lorentz harmonics +or squeeze harmonics. + +**Table 1.** Cylindrical and hyperbolic equations. The cylindrical equation is invariant under rotation while the hyperbolic equation is invariant under squeeze transformation + + + + + + + + + + + + + + + + + + + + + +
+ Equation + + Invariant under + + Eigenvalue +
+ Cylindrical + + Rotation + + λ = nx + ny + 1 +
+ Hyperbolic + + Squeeze + + λ = nx - ny +
+ +**3. The Physical Origin of Squeeze Transformations** + +Paul A. M. Dirac made it his life-long effort to combine quantum mechanics with special relativity. +We examine the following four of his papers. + +* In 1927 [8], Dirac pointed out the time-energy uncertainty should be taken into consideration for efforts to combine quantum mechanics and special relativity. + +* In 1945 [9], Dirac considered four-dimensional harmonic oscillator wave functions with + +$$ +\exp\left\{-\frac{1}{2}\left(x^2 + y^2 + z^2 + t^2\right)\right\} \qquad (31) +$$ + +and noted that this form is not Lorentz-covariant. +---PAGE_BREAK--- + +* In 1949 [6], Dirac introduced the light-cone variables of Equation (4). He also noted that the construction of a Lorentz-covariant quantum mechanics is equivalent to the construction of a representation of the Poncaré group. + +* In 1963 [10], Dirac constructed a representation of the (3 + 2) deSitter group using two harmonic oscillators. This deSitter group contains three (3 + 1) Lorentz groups as its subgroups. + +In each of these papers, Dirac presented the original ingredients which can serve as building blocks for making quantum mechanics relativistic. We combine those elements using Wigner's little groups [11] and Feynman's observation of high-energy physics [12–14]. + +First of all, let us combine Dirac’s 1945 paper and his light-cone coordinate system given in his 1949 paper. Since x and y variables are not affected by Lorentz boosts along the z direction in Equation (31), it is sufficient to study the Gaussian form + +$$ \exp\left\{-\frac{1}{2}(z^2 + t^2)\right\} \qquad (32) $$ + +This form is certainly not invariant under Lorentz boost as Dirac noted. On the other hand, it can be written as + +$$ \exp\left\{-\frac{1}{2}(u^2 + v^2)\right\} \qquad (33) $$ + +where *u* and *v* are the light-cone variables defined in Equation (4). If we make the Lorentz-boost or Lorentz squeeze according to Equation (3), this Gaussian form becomes + +$$ \exp\left\{-\frac{1}{2}\left(e^{-2\eta}u^2 + e^{2\eta}v^2\right)\right\} \qquad (34) $$ + +If we write the Lorentz boost as + +$$ z' = \frac{z + \beta t}{\sqrt{1 - \beta^2}} \qquad t' = \frac{t + \beta z}{\sqrt{1 - \beta^2}} \qquad (35) $$ + +where $\beta$ is the velocity parameter $v/c$, then $\beta$ is related to $\eta$ by + +$$ \beta = \tanh(\eta) \qquad (36) $$ + +Let us go back to the Gaussian form of Equation (32), this expression is consistent with Dirac’s earlier paper on the time-energy uncertainty relation [8]. According to Dirac, this is a c-number uncertainty relation without excitations. The existence of the time-energy uncertainty is illustrated in the first part of Figure 1. + +In his 1927 paper, Dirac noted the space-time asymmetry in uncertainty relations. While there are no time-like excitations, quantum mechanics allows excitations along the z direction. How can we take care of problem? + +If we suppress the excitations along the *t* coordinate, the normalized solution of this differential equation, Equation (24), is + +$$ \psi(z,t) = \left( \frac{1}{\pi 2^n n!} \right)^{1/2} H_n(z) \exp \left\{ - \left( \frac{z^2 + t^2}{2} \right) \right\} \qquad (37) $$ +---PAGE_BREAK--- + +**Figure 1.** Space-time picture of quantum mechanics. In his 1927 paper, Dirac noted that there is a c-number time-energy uncertainty relation, in addition to Heisenberg's position-momentum uncertainty relations, with quantum excitations. This idea is illustrated in the first figure (upper left). In his 1949 paper, Dirac produced his light-cone coordinate system as illustrated in the second figure (upper right). It is then not difficult to produce the third figure, for a Lorentz-covariant picture of quantum mechanics. This Lorentz-squeeze property is observed in high-energy laboratories through Feynman's parton picture discussed in Section 7. + +If we boost the coordinate system, the Lorentz-boosted wave functions should take the form + +$$ \begin{aligned} \psi_{\eta}^{n}(z, t) = & \left( \frac{1}{\pi 2^{n} n!} \right)^{1/2} H_n \left( z \cosh \eta - t \sinh \eta \right) \\ & \times \exp \left\{ - \left[ \frac{( \cosh 2\eta )(z^2 + t^2) - 4( \sinh 2\eta )zt }{2} \right] \right\} \end{aligned} \quad (38) $$ + +These are the solutions of the phenomenological equation of Feynman *et al.* [12] for internal motion of the quarks inside a hadron. In 1971, Feynman *et al.* wrote down a Lorentz-invariant differential equation of the form + +$$ \frac{1}{2} \left\{ - \left( \frac{\partial}{\partial x_{\mu}} \right)^2 + x_{\mu}^2 \right\} \psi(x_{\mu}) = (\lambda + 1) \psi(x_{\mu}) \quad (39) $$ + +where $x_\mu$ is for the Lorentz-covariant space-time four vector. This oscillator equation is separable in the Cartesian coordinate system, and the transverse components can be separated out. Thus, the differential of Equation (23) contains the essential element of the Lorentz-invariant Equation (39). + +However, the solutions contained in Reference [12] are not normalizable and therefore cannot carry physical interpretations. It was shown later that there are normalizable solutions which constitute a representation of Wigner's O(3)-like little group [5,11,15]. The O(3) group is the three-dimensional rotation group without a time-like direction or time-like excitations. This addresses Dirac's concern about the space-time asymmetry in uncertainty relations [8]. Indeed, the expression of Equation (37) is considered to be the representation of Wigner's little group for quantum bound states [11,15]. We shall return to more physical questions in Section 7. + +## 4. Further Properties of the Lorentz Harmonics + +Let us continue our discussion of quantum bound states using harmonic oscillators. We are interested in this section to see how the oscillator solution of Equation (37) would appear to a moving observer. +---PAGE_BREAK--- + +The variable z and *t* are the longitudinal and time-like separations between the two constituent particles. In terms of the light-cone variables defined in Equation (4), the solution of Equation (37) takes the form + +$$ +\psi_0^n(z, t) = \left[ \frac{1}{\pi n! 2^n} \right]^{1/2} H_n \left( \frac{u+v}{\sqrt{2}} \right) \exp \left\{ - \left( \frac{u^2 + v^2}{2} \right) \right\} \quad (40) +$$ + +and + +$$ +\psi_{\eta}^{n}(z,t) = \left[ \frac{1}{\pi n! 2^n} \right]^{1/2} H_n \left( \frac{e^{-\eta} u + e^{\eta} v}{\sqrt{2}} \right) \exp \left\{ - \left( \frac{e^{-2\eta} u^2 + e^{2\eta} v^2}{2} \right) \right\} \quad (41) +$$ + +for the rest and moving hadrons respectively. + +It is mathematically possible to expand this as [5,16] + +$$ +\psi_{\eta}^{n}(z, t) = \left(\frac{1}{\cosh \eta}\right)^{(n+1)} \sum_{k} \left[\frac{(n+k)!}{n!k!}\right]^{1/2} (\tanh \eta)^{k} \chi_{n+k}(z) \chi_{n}(t) \quad (42) +$$ + +where $\chi_n(z)$ is the $n$-th excited state oscillator wave function which takes the familiar form + +$$ +\chi_n(z) = \left[ \frac{1}{\sqrt{\pi 2^n n!}} \right]^{1/2} H_n(z) \exp \left( -\frac{z^2}{2} \right) \qquad (43) +$$ + +as given in Equation (6). This is an expansion of the Lorentz-boosted wave function in terms of the Lorentz harmonics. + +If the hadron is at rest, there are no time-like oscillations. There are time-like oscillations for a moving hadron. This is the way in which the space and time variable mix covariantly. This also provides a resolution of the space-time asymmetry pointed out by Dirac in his 1927 paper [8]. We shall return to this question in Section 6. Our next question is whether those oscillator equations can be given a probability interpretation. + +Even though we suppressed the excitations along the *t* direction in the hadronic rest frame, it is an interesting mathematical problem to start with the oscillator wave function with an excited state in the time variable. This problem was addressed by Rotbart in 1981 [17]. + +## 4.1. Lorentz-Invariant Orthogonality Relations + +Let us consider two wave functions $\psi_\eta^n(z, t)$. If two covariant wave functions are in the same Lorentz frame and have thus the same value of $\eta$, the orthogonality relation + +$$ +(\psi_{\eta}^{n'}, \psi_{\eta}^{n}) = \delta_{nn'} \quad (44) +$$ + +is satisfied. + +If those two wave functions have different values of η, we have to start with + +$$ +(\psi_{\eta'}^{n'}, \psi_{\eta}^{n}) = \int (\psi_{\eta'}^{n'}(z,t))^* \psi_{\eta}^{n}(z,t) dz dt \quad (45) +$$ + +Without loss of generality, we can assume $\eta' = 0$ in the system where $\eta = 0$, and evaluate the integration. The result is [18] + +$$ +(\psi_0^{n'}, \psi_\eta^n) = \int (\psi_0^{n'}(z,t))^2 \psi_\eta^n(z,t) dxdt = (\sqrt{1-\beta^2})^{(n+1)} \delta_{n,n'} \quad (46) +$$ + +where $\beta = \tanh(\eta)$, as given in Equation (36). This is like the Lorentz-contraction property of a rigid rod. The ground state is like a single rod. Since we obtain the first excited state by applying a step-up operator, this state should behave like a multiplication of two rods, and a similar argument can be given to *n* rigid rods. This is illustrated in Figure 2. +---PAGE_BREAK--- + +**Figure 2.** Orthogonality relations for the covariant harmonic oscillators. The orthogonality remains invariant. For the two wave functions in the orthogonality integral, the result is zero if they have different values of *n*. If both wave functions have the same value of *n*, the integral shows the Lorentz contraction property. + +With these orthogonality properties, it is possible to give quantum probability interpretation in the Lorentz-covariant world, and it was so stated in our 1977 paper [19]. + +## 4.2. Probability Interpretations + +Let us study the probability issue in terms of the one-dimensional oscillator solution of Equation (6) whose probability interpretation is indisputable. Let us also go back to the rotationally invariant differential equation of Equation (11). Then the product + +$$ \chi_{n_x}(x) \chi_{n_y}(y) \quad (47) $$ + +also has a probability interpretation with the eigen value $(n_x + n_y + 1)$. Thus the series of the form [1,5] + +$$ \phi_{\eta}^{n}(x, y) = \left( \frac{1}{\cosh \eta} \right)^{(n+1)} \sum_{k} \left[ \frac{(n+k)!}{n!k!} \right]^{1/2} (\tanh \eta)^k \chi_{n+k}(x) \chi_n(y) \quad (48) $$ + +also has its probability interpretation, but it is not in an eigen state. Each term in this series has an eigenvalue $(2n + k + 1)$. The expectation value of Equation (11) is + +$$ \left(\frac{1}{\cosh \eta}\right)^{2(n+1)} \sum_k \frac{(2n+k+1)(n+k)!}{n!k!} (\tanh \eta)^{2k} \quad (49) $$ + +If we replace the variables *x* and *y* by *z* and *t* respectively in the above expression of Equation (48), it becomes the Lorentz-covariant wave function of Equation (42). Each term $\chi_{n+k}(z)\chi_k(t)$ in the series has the eigenvalue *n*. Thus the series is in the eigen state with the eigenvalue *n*. + +This difference does not prevent us from importing the probability interpretation from that of Equation (48). + +In the present covariant oscillator formalism, the time-separation variable can be separated from the rest of the wave function, and does not require further interpretation. For a moving +---PAGE_BREAK--- + +hadron, time-like excitations are mixed with longitudinal excitations. Is it possible to give a physical interpretation to those time-like excitations? To address this issue, we shall study in Section 5 two-mode squeezed states also based on the mathematics of Equation (48). There, both variables have their physical interpretations. + +**5. Two-Mode Squeezed States** + +Harmonic oscillators play the central role also in quantum optics. There the $n^{th}$ excited oscillator state corresponds to the *n*-photon state $|n\rangle$. The ground state means the zero-photon or vacuum state $|0\rangle$. The single-photon coherent state can be written as + +$$|\alpha\rangle = e^{-\alpha a^*/2} \sum_n \frac{a^n}{\sqrt{n!}} |n\rangle \quad (50)$$ + +which can be written as [1] + +$$|\alpha\rangle = e^{-\alpha a^*/2} \sum_n \frac{\alpha^n}{n!} (\hat{a}^\dagger)^n |0\rangle = \left\{e^{-\alpha a^*/2}\right\} \exp\{\alpha \hat{a}^\dagger\} |0\rangle \quad (51)$$ + +This aspect of the single-photon coherent state is well known. Here we are dealing with one kind of photon, namely with a given momentum and polarization. The state $|n\rangle$ means there are $n$ photons of this kind. + +Let us next consider a state of two kinds of photons, and write $|n_1, n_2\rangle$ as the state of $n_1$ photons of the first kind, and $n_2$ photons of the second kind [20]. We can then consider the form + +$$\frac{1}{\cosh \eta} \exp \{(\tanh \eta) \hat{a}_1^\dagger \hat{a}_2^\dagger\} |0, 0\rangle \quad (52)$$ + +The operator $\hat{a}_1^\dagger \hat{a}_2^\dagger$ was studied by Dirac in connection with his representation of the deSitter group, as we mentioned in Section 3. After making a Taylor expansion of Equation (52), we arrive at + +$$\frac{1}{\cosh \eta} \sum_k (\tanh \eta)^k |k, k\rangle \quad (53)$$ + +which is the squeezed vacuum state or two-photon coherent state [1,20]. This expression is the wave function of Equation (48) in a different notation. This form is also called the entangled Gaussian state of two photons [3] or the entangled oscillator state of space and time [4]. + +If we start with the *n*-particle state of the first photon, we obtain + +$$ \begin{aligned} & \left[ \frac{1}{\cosh \eta} \right]^{(n+1)} \exp \left\{ (\tanh \eta) \hat{a}_1^\dagger \hat{a}_2^\dagger \right\} |n, 0\rangle \\ &= \left[ \frac{1}{\cosh \eta} \right]^{(n+1)} \sum_k \left[ \frac{(n+k)!}{n!k!} \right]^{1/2} (\tanh \eta)^k |k+n, k\rangle \end{aligned} \quad (54) $$ + +which is the wave function of Equation (42) in a different notation. This is the *n*-photon squeezed state [1]. + +Since the two-mode squeezed state and the covariant harmonic oscillators share the same set of mathematical formulas, it is possible to transmit physical interpretations from one to the other. For two-mode squeezed state, both photons carry physical interpretations, while the interpretation is yet to be given to the time-separation variable in the covariant oscillator formalism. It is clear from Equation (42) and Equation (54) that the time-like excitations are like the second-photon states. + +What would happen if the second photon is not observed? This interesting problem was addressed by Yurke and Potasek [21] and by Ekert and Knight [22]. They used the density matrix formalism and +---PAGE_BREAK--- + +integrated out the second-photon states. This increases the entropy and temperature of the system. We choose not to reproduce their mathematics, because we will be presenting the same mathematics in Section 6. + +**6. Time-Separation Variable in Feynman's Rest of the Universe** + +As was noted in the previous section, the time-separation variable has an important role in the covariant formulation of the harmonic oscillator wave functions. It should exist wherever the space separation exists. The Bohr radius is the measure of the separation between the proton and electron in the hydrogen atom. If this atom moves, the radius picks up the time separation, according to Einstein [23]. + +On the other hand, the present form of quantum mechanics does not include this time-separation variable. The best way we can interpret it at the present time is to treat this time-separation as a variable in Feynman's rest of the universe [24]. In his book on statistical mechanics [7], Feynman states + +> When we solve a quantum-mechanical problem, what we really do is divide the universe into two parts - the system in which we are interested and the rest of the universe. We then usually act as if the system in which we are interested comprised the entire universe. To motivate the use of density matrices, let us see what happens when we include the part of the universe outside the system. + +The failure to include what happens outside the system results in an increase of entropy. The entropy is a measure of our ignorance and is computed from the density matrix [25]. The density matrix is needed when the experimental procedure does not analyze all relevant variables to the maximum extent consistent with quantum mechanics [26]. If we do not take into account the time-separation variable, the result is an increase in entropy [27,28]. + +For the covariant oscillator wave functions defined in Equation (42), the pure-state density matrix is + +$$ \rho_{\eta}^{n}(z, t; z', t') = \psi_{\eta}^{n}(z, t) \psi_{\eta}^{n}(z', t') \quad (55) $$ + +which satisfies the condition $\rho^2 = \rho$: + +$$ \rho_{\eta}^{n}(z, t; x', t') = \int \rho_{\eta}^{n}(z, t; x'', t'') \rho_{\eta}^{n}(z'', t''; z', t') dz'' dt'' \quad (56) $$ + +However, in the present form of quantum mechanics, it is not possible to take into account the time separation variables. Thus, we have to take the trace of the matrix with respect to the $t$ variable. Then the resulting density matrix is: + +$$ \begin{aligned} \rho_{\eta}^{n}(z, z') &= \int \psi_{\eta}^{n}(z, t) \psi_{\eta}^{n}(z', t) dt \\ &= \left(\frac{1}{\cosh \eta}\right)^{2(n+1)} \sum_{k} \frac{(n+k)!}{n!k!} (\tanh \eta)^{2k} \psi_{n+k}(z) \psi_{n+k}^{*}(z') \end{aligned} \quad (57) $$ + +The trace of this density matrix is one, but the trace of $\rho^2$ is less than one, as: + +$$ \begin{aligned} \mathrm{Tr}(\rho^2) &= \int \rho_{\eta}^{n}(z,z') \rho_{\eta}^{n}(z',z) dzdz' \\ &= \left(\frac{1}{\cosh \eta}\right)^{4(n+1)} \sum_{k} \left[\frac{(n+k)!}{n!k!}\right]^2 (\tanh \eta)^{4k} \end{aligned} \quad (58) $$ + +which is less than one. This is due to the fact that we do not know how to deal with the time-like separation in the present formulation of quantum mechanics. Our knowledge is less than complete. +---PAGE_BREAK--- + +The standard way to measure this ignorance is to calculate the entropy defined as + +$$S = -\mathrm{Tr}(\rho \ln(\rho)) \qquad (59)$$ + +If we pretend to know the distribution along the time-like direction and use the pure-state density matrix given in Equation (55), then the entropy is zero. However, if we do not know how to deal with the distribution along $t$, then we should use the density matrix of Equation (57) to calculate the entropy, and the result is + +$$S = 2(n+1) \left\{ (\cosh \eta)^2 \ln(\cosh \eta) - (\sinh \eta) \ln(\sinh \eta) \right\} \\ - \left( \frac{1}{\cosh \eta} \right)^{2(n+1)} \sum_k \frac{(n+k)!}{n!k!} \ln \left[ \frac{(n+k)!}{n!k!} \right] (\tanh \eta)^{2k} \qquad (60)$$ + +In terms of the velocity $v$ of the hadron, + +$$S = -(n+1) \left\{ \ln \left[ 1 - \left( \frac{v}{c} \right)^2 \right] + \frac{(v/c)^2 \ln(v/c)^2}{1 - (v/c)^2} \right\} \\ - \left[ 1 - \left( \frac{1}{v} \right)^2 \right] \sum_k \frac{(n+k)!}{n!k!} \ln \left[ \frac{(n+k)!}{n!k!} \right] \left( \frac{v}{c} \right)^{2k} \qquad (61)$$ + +Let us go back to the wave function given in Equation (41). As is illustrated in Figure 3, its localization property is dictated by the Gaussian factor which corresponds to the ground-state wave function. For this reason, we expect that much of the behavior of the density matrix or the entropy for the $n^{th}$ excited state will be the same as that for the ground state with $n=0$. For this state, the density matrix and the entropy are + +$$\rho(z,z') = \left(\frac{1}{\pi \cosh(2\eta)}\right)^{1/2} \exp\left\{-\frac{1}{4}\left[\frac{(z+z')^2}{\cosh(2\eta)} + (z-z')^2 \cosh(2\eta)\right]\right\} \qquad (62)$$ + +and + +$$S = 2 \left\{ (\cosh \eta)^2 \ln(\cosh \eta) - (\sinh \eta)^2 \ln(\sinh \eta) \right\} \qquad (63)$$ + +respectively. The quark distribution $\rho(z, z)$ becomes + +$$\rho(z, z) = \left( \frac{1}{\pi \cosh(2\eta)} \right)^{1/2} \exp \left( \frac{-z^2}{\cosh(2\eta)} \right) \qquad (64)$$ + +The width of the distribution becomes $\sqrt{\cosh\eta}$, and becomes wide-spread as the hadronic speed increases. Likewise, the momentum distribution becomes wide-spread [5,29]. This simultaneous increase in the momentum and position distribution widths is called the parton phenomenon in high-energy physics [13,14]. The position-momentum uncertainty becomes $\cosh\eta$. This increase in uncertainty is due to our ignorance about the physical but unmeasurable time-separation variable. + +Let us next examine how this ignorance will lead to the concept of temperature. For the Lorentz-boosted ground state with $n=0$, the density matrix of Equation (62) becomes that of the harmonic oscillator in a thermal equilibrium state if $(\tanh\eta)^2$ is identified as the Boltzmann factor [29]. For other states, it is very difficult, if not impossible, to describe them as thermal equilibrium states. Unlike the case of temperature, the entropy is clearly defined for all values of $n$. Indeed, the entropy in this case is derivable directly from the hadronic speed. + +The time-separation variable exists in the Lorentz-covariant world, but we pretend not to know about it. It thus is in Feynman's rest of the universe. If we do not measure this time-separation, it becomes translated into the entropy. +---PAGE_BREAK--- + +Figure 3. Localization property in the $zt$ plane. When the hadron is at rest, the Gaussian form is concentrated within a circular region specified by $(z+t)^2 + (z-t)^2 = 1$. As the hadron gains speed, the region becomes deformed to $e^{-2\eta}(z+t)^2 + e^{2\eta}(z-t)^2 = 1$. Since it is not possible to make measurements along the $t$ direction, we have to deal with information that is less than complete. + +Figure 4. The uncertainty from the hidden time-separation coordinate. The small circle indicates the minimal uncertainty when the hadron is at rest. More uncertainty is added when the hadron moves. This is illustrated by a larger circle. The radius of this circle increases by $\sqrt{\cosh(2\eta)}$. + +We can see the uncertainty in our measurement process from the Wigner function defined as + +$$W(z,p) = \frac{1}{\pi} \int \rho(z+y,z-y)e^{ipy} dy \quad (65)$$ + +After integration, this Wigner function becomes + +$$W(z,p) = \frac{1}{\pi \cosh(2\eta)} \exp \left\{ - \left( \frac{z^2 + p^2}{\cosh(2\eta)} \right) \right\} \quad (66)$$ + +This Wigner phase distribution is illustrated in Figure 4. The smaller inner circle corresponds to the minimal uncertainty of the single oscillator. The larger circle is for the total uncertainty including the statistical uncertainty from our failure to observe the time-separation variable. The two-mode squeezed state tells us how this happens. In the two-mode case, both the first and second photons are observable, but we can choose not to observe the second photon. + +## 7. Lorentz-Covariant Quark Model + +The hydrogen atom played the pivotal role while the present form of quantum mechanics was developed. At that time, the proton was in the absolute Galilean frame of reference, and it was thinkable that the proton could move with a speed close to that of light. + +Also, at that time, both the proton and electron were point particles. However, the discovery of Hofstadter *et al*. changed the picture of the proton in 1955 [30]. The proton charge has its internal +---PAGE_BREAK--- + +distribution. Within the framework of quantum electrodynamics, it is possible to calculate the Rutherford formula for the electron-proton scattering when both electron and proton are point particles. Because the proton is not a point particle, there is a deviation from the Rutherford formula. We describe this deviation using the formula called the “proton form factor” which depends on the momentum transfer during the electron-proton scattering. + +Indeed, the study of the proton form factor has been and still is one of the central issues in high-energy physics. The form factor decreases as the momentum transfer increases. Its behavior is called the “dipole cut-off” meaning an inverse-square decrease, and it has been a challenging problem in quantum field theory and other theoretical models [31]. Since the emergence of the quark model in 1964 [32], the hadrons are regarded as quantum bound states of quarks with space-time wave functions. Thus, the quark model is responsible for explaining this form factor. There are indeed many papers written on this subject. We shall return to this problem in Subsection 7.2. + +Another problem in high-energy physics is Feynman's parton picture [13,14]. If the hadron is at rest, we can approach this problem within the framework of bound-state quantum mechanics. If it moves with a speed close to that of light, it appears as a collection of an infinite number of partons, which interact with external signals incoherently. This phenomenon raises the question of whether the Lorentz boost destroys quantum coherence [33]. This leads to the concept of Feynman's decoherence [34]. We shall discuss this problem first. + +## 7.1. Feynman's Parton Picture and Feynman's Decoherence + +In 1969, Feynman observed that a fast-moving hadron can be regarded as a collection of many “partons” whose properties appear to be quite different from those of the quarks [5,14]. For example, the number of quarks inside a static proton is three, while the number of partons in a rapidly moving proton appears to be infinite. The question then is how the proton looking like a bound state of quarks to one observer can appear different to an observer in a different Lorentz frame? Feynman made the following systematic observations. + +a. The picture is valid only for hadrons moving with velocity close to that of light. + +b. The interaction time between the quarks becomes dilated, and partons behave as free independent particles. + +c. The momentum distribution of partons becomes widespread as the hadron moves fast. + +d. The number of partons seems to be infinite or much larger than that of quarks. + +Because the hadron is believed to be a bound state of two or three quarks, each of the above phenomena appears as a paradox, particularly (b) and (c) together. How can a free particle have a wide-spread momentum distribution? + +In order to address this question, let us go to Figure 5, which illustrates the Lorentz-squeeze property of the hadron as the hadron gains its speed. If we use the harmonic oscillator wave function, its momentum-energy wave function takes the same form as the space-time wave function. As the hadron gains its speed, both wave functions become squeezed. + +As the wave function becomes squeezed, the distribution becomes wide-spread, the spring constant appears to become weaker. Consequently, the constituent quarks appear to become free particles. + +If the constituent particles are confined in the narrow elliptic region, they become like massless particles. If those massless particles have a wide-spread momentum distribution, it is like a black-body radiation with infinite number of photon distributions. + +We have addressed this question extensively in the literature, and concluded Gell-Mann's quark model and Feynman's parton model are two different manifestations of the same Lorentz-covariant quantity [19,35,36]. Thus coherent quarks and incoherent partons are perfectly consistent within the framework of quantum mechanics and special relativity [33]. Indeed, this defines Feynman's decoherence [34]. +---PAGE_BREAK--- + +**Figure 5.** Lorentz-squeezed space-time and momentum-energy wave functions. As the hadron’s speed approaches that of light, both wave functions become concentrated along their respective positive light-cone axes. These light-cone concentrations lead to Feynman’s parton picture. + +More recently, we were able to explain this decoherence problem in terms of the interaction time among the constituent quarks and the time required for each quark to interact with external signals [4]. + +## 7.2. Proton Form Factors and Lorentz Coherence + +As early as in 1970, Fujimura et al. calculated the electromagnetic form factor of the proton using the wave functions given in this paper and obtained the so-called “dipole” cut-off of the form factor [37]. At that time, these authors did not have a benefit of the differential equation of Feynman and his co-authors [12]. Since their wave functions can now be given a bona-fide covariant probability interpretation, their calculation could be placed between the two limiting cases of quarks and partons. + +Even before the calculation of Fujimura et al. in 1965, the covariant wave functions were discussed by various authors [38–40]. In 1970, Licht and Pagnamenta also discussed this problem with Lorentz-contracted wave functions [41]. + +In our 1973 paper [42], we attempted to explain the covariant oscillator wave function in terms of the coherence between the incoming signal and the width of the contracted wave function. This aspect was explained in terms of the overlap of the energy-momentum wave function in our book [5]. + +In this paper, we would like to go back to the coherence problem we raised in 1973, and follow-up on it. In the Lorentz frame where the momentum of the proton has the opposite signs before and after the collision, the four-momentum transfer is + +$$ (p, E) - (-p, E) = (2p, 0) \qquad (67) $$ + +where the proton comes along the z direction with its momentum $p$, and its energy $\sqrt{p^2 + m^2}$. +---PAGE_BREAK--- + +Then the form factor becomes + +$$F(p) = \int e^{2ipz} (\psi_{\eta}(z,t))^* \psi_{-\eta}(z,t) dz dt \quad (68)$$ + +If we use the ground-state oscillator wave function, this integral becomes + +$$\frac{1}{\pi} \int e^{2ipz} \exp \left\{ -\cosh(2\eta) (z^2 + t^2) \right\} dz dt \quad (69)$$ + +After the $t$ integration, this integral becomes + +$$\frac{1}{\sqrt{\pi} \cosh(2\eta)} \int e^{2ipz} \exp \{-z^2 \cosh(2\eta)\} dz \quad (70)$$ + +The integrand is a product of a Gaussian factor and a sinusoidal oscillation. The width of the Gaussian factor shrinks by $1/\sqrt{\cosh(2\eta)}$, which becomes $\exp(-\eta)$ as $\eta$ becomes large. The wave length of the sinusoidal factor is inversely proportional to the momentum $p$. The wave length decreases also at the rate of $\exp(-\eta)$. Thus, the rate of the shrinkage is the same for both the Gaussian and sinusoidal factors. For this reason, the cutoff rate of the form factor of Equation (68) should be less than that for + +$$\int e^{2ipz} (\psi_0(z,t))^* \psi_0(z,t) dz dt = \frac{1}{\sqrt{\pi}} \int e^{2ipz} \exp(-z^2) dz \quad (71)$$ + +which corresponds to the form factor without the squeeze effect on the wave function. The integration of this expression leads to $\exp(-p^2)$, which corresponds to an exponential cut-off as $p^2$ becomes large. Let us go back to the form factor of Equation (68). If we complete the integral, it becomes + +$$F(p) = \frac{1}{\cosh(2\eta)} \exp \left\{ \frac{-p^2}{\cosh(2\eta)} \right\} \quad (72)$$ + +As $p^2$ becomes large, the Gaussian factor becomes a constant. However, the factor $1/\cosh(2\eta)$ leads the form factor decrease of $1/p^2$, which is a much slower decrease than the exponential cut-off without squeeze effect. + +There still is a gap between this mathematical formula and the observed experimental data. Before looking at the experimental curve, we have to realize that there are three quarks inside the hadron with two oscillator mode. This will lead to a $(1/p^2)^2$ cut-off, which is commonly called the dipole cut-off in the literature. + +There is still more work to be done. For instance, the effect of the quark spin should be addressed [43,44]. Also there are reports of deviations from the exact dipole cut-off [45]. There have been attempts to study the form factors based on the four-dimensional rotation group [46], and also on the lattice QCD [47]. + +Yet, it is gratifying to note that the effect of Lorentz squeeze leads to the polynomial decrease in the momentum transfer, thanks to the Lorentz coherence illustrated in Figure 6. We started our logic from the fundamental principles of quantum mechanics and relativity. + +## 8. Conclusions + +In this paper, we presented one mathematical formalism applicable both to the entanglement problems in quantum optics [3] and to high-energy hadronic physics [4]. The formalism is based on harmonic oscillators familiar to us. We have presented a complete orthonormal set with a Lorentz-covariant probability interpretation. + +Since both branches of physics share the same mathematical base, it is possible to translate physics from one branch to the other. In this paper, we have given a physical interpretation to the +---PAGE_BREAK--- + +**Figure 6.** Coherence between the wavelength and the proton size. As the momentum transfer increases, the external signal sees Lorentz-contracting proton distribution. On the other hand, the wavelength of the signal also decreases. Thus, the cutoff is not as severe as the case where the proton distribution is not contracted. + +time-separation variable as a hidden variable in Feynman's rest of the universe, in terms of the two-mode squeezed state where both photons are observable. + +This paper is largely a review paper with an organization to suit the current interest in physics. For instance, the concepts of entanglement and decohercne did not exist when those original papers were written. Furthermore, the probability interpretation given in Subsection 4.2 has not been published before. + +The rotation symmetry plays its role in all branches of physics. We noted that the squeeze symmetry plays active roles in two different subjects of physics. It is possible that the squeeze transformation can serve useful purposes in many other fields, although we are not able to specify them at this time. + +References + +1. Kim, Y.S.; Noz, M.E. *Phase Space Picture of Quantum Mechanics*; World Scientific Publishing Company: Singapore, 1991. + +2. Guillemin, V.; Sternberg, S. *Symplectic Techniques in Physics*; Cambridge University: Cambridge, UK, 1984. + +3. Giedke, G.; Wolf, M.M.; Krger, O.; Werner, R.F.; Cirac, J.J. Entanglement of formation for symmetric Gaussian states. *Phys. Rev. Lett.* **2003**, *91*, 107901-107904. + +4. Kim, Y.S.; Noz, M.E. Coupled oscillators, entangled oscillators, and Lorentz-covariant harmonic oscillators. *J. Opt. B: Quantum Semiclass. Opt.* **2005**, *7*, S458-S467. + +5. Kim, Y.S.; Noz, M.E. *Theory and Applications of the Poincaré Group* D; Reidel Publishing Company: Dordrecht, The Netherlands, 1986. + +6. Dirac, P.A.M. Forms of Relativistic Dynamics. *Rev. Mod. Phys.* **1949**, *21*, 392-399. + +7. Feynman, R.P. *Statistical Mechanics*; Benjamin/Cummings: Reading, MA, USA, 1972. + +8. Dirac, P.A.M. The Quantum Theory of the Emission and Absorption of Radiation. Proc. Roy. Soc. (London) **1927**, A114, 243-265. + +9. Dirac, P.A.M. Unitary Representations of the Lorentz Group. Proc. Roy. Soc. (London) **1945**, A183, 284-295. + +10. Dirac, P.A.M. A Remarkable Representation of the 3 + 2 de Sitter Group. J. Math. Phys. **1963**, *4*, 901-909. + +11. Wigner, E. On Unitary Representations of the Inhomogeneous Lorentz Group. Ann. Math. **1939**, *40*, 149-204. +---PAGE_BREAK--- + +12. Feynman, R.P.; Kislinger, M.; Ravndal F. Current Matrix Elements from a Relativistic Quark Model. Phys. Rev. D **1971**, *3*, 2706-2732. + +13. Feynman, R.P. Very High-Energy Collisions of Hadrons. Phys. Rev. Lett. **1969**, *23*, 1415-1417. + +14. Feynman, R.P. The Behavior of Hadron Collisions at Extreme Energies in High-Energy Collisions. In Proceedings of the Third International Conference; Gordon and Breach: New York, NY, USA, 1969; pp. 237-249. + +15. Kim, Y.S.; Noz, M.E.; Oh, S.H. Representations of the Poincaré group for relativistic extended hadrons. J. Math. Phys. **1979**, *20*, 1341-1344. + +16. Kim, Y.S.; Noz, M.E.; Oh, S.H.; A Simple Method for Illustrating the Difference between the Homogeneous and Inhomogeneous Lorentz Groups. Am. J. Phys. **1979**, *47*, 892-897. + +17. Rotbart, F.C. Complete orthogonality relations for the covariant harmonic oscillator. Phys. Rev. D **1981**, *12*, 3078-3090. + +18. Ruiz, M.J. Orthogonality relations for covariant harmonic oscillator wave functions. Phys. Rev. D **1974**, *10*, 4306-4307. + +19. Kim, Y.S.; Noz, M.E. Covariant Harmonic Oscillators and the Parton Picture. Phys. Rev. D **1977**, *15*, 335-338. + +20. Yuen, H.P. Two-photon coherent states of the radiation field. Phys. Rev. A **1976**, *13*, 2226-2243. + +21. Yurke, B.; Potasek, M. Obtainment of Thermal Noise from a Pure State. Phys. Rev. A **1987**, *36*, 3464-3466. + +22. Ekert, A.K.; Knight, P.L. Correlations and squeezing of two-mode oscillations. Am. J. Phys. **1989**, *57*, 692-697. + +23. Kim, Y.S.; Noz, M.E. The Question of Simultaneity in Relativity and Quantum Mechanics. In Quantum Theory: Reconsideration of Foundations-3; Adenier, G., Khrennikov, A., Nieuwenhuizen, T.M., Eds.; AIP Conference Proceedings 180, American Institute of Physics, College Park, MD, USA, 2006; pp. 168-178. + +24. Han, D.; Kim, Y.S.; Noz, M.E. Illustrative Example of Feynman's Rest of the Universe. Am. J. Phys. **1999**, *67*, 61-66. + +25. von Neumann, J. *Die Mathematische Grundlagen der Quanten-mechanik*; Springer: Berlin, Germany, 1932. + +26. Fano, U. Description of States in Quantum Mechanics by Density Matrix and Operator Techniques. Rev. Mod. Phys. **1967**, *29*, 74-93. + +27. Kim, Y.S.; Wigner, E.P. Entropy and Lorentz Transformations. Phys. Lett. A **1990**, *147*, 343-347. + +28. Kim, Y.S. Coupled oscillators and Feynman's three papers. J. Phys. Conf. Ser. **2007**, *70*, 012010: 1-19. + +29. Han, D.; Kim, Y.S.; Noz, M.E. Lorentz-Squeezed Hadrons and Hadronic Temperature. Phys. Lett. A **1990**, *144*, 111-115. + +30. Hofstadter, R.; McAllister, R.W. Electron Scattering from the Proton. Phys. Rev. **1955**, *98*, 217-218. + +31. Frazer, W.; Fulco, J. Effect of a Pion-Pion Scattering Resonance on Nucleon Structure. Phys. Rev. Lett. **1960**, *2*, 365-368. + +32. Gell-Mann, M. Nonleptonic Weak Decays and the Eightfold Way. Phys. Lett. **1964**, *12*, 155-156. + +33. Kim, Y.S. Does Lorentz Boost Destroy Coherence? Fortschr. der Physik **1998**, *46*, 713-724. + +34. Kim, Y.S.; Noz, M.E. Feynman's Decoherence. Optics Spectro. **2003**, *47*, 733-740. + +35. Hussar, P.E. Valons and harmonic oscillators. Phys. Rev. D **1981**, *23*, 2781-2783. + +36. Kim, Y.S. Observable gauge transformations in the parton picture. Phys. Rev. Lett. **1989**, *63*, 348-351. + +37. Fujimura, K.; Kobayashi, T.; Namiki, M. Nucleon Electromagnetic Form Factors at High Momentum Transfers in an Extended Particle Model Based on the Quark Model. Prog. Theor. Phys. **1970**, *43*, 73-79. + +38. Yukawa, H. Structure and Mass Spectrum of Elementary Particles. I. General Considerations. Phys. Rev. **1953**, *91*, 415-416. + +39. Markov, M. On Dynamically Deformable Form Factors in the Theory Of Particles. Suppl. Nuovo Cimento **1956**, *3*, 760-772. + +40. Ginzburg, V.L.; Man'ko, V.I. Relativistic oscillator models of elementary particles. Nucl. Phys. **1965**, *74*, 577-588. + +41. Licht, A.L.; Pagnamenta, A. Wave Functions and Form Factors for Relativistic Composite Particles I. Phys. Rev. D **1970**, *2*, 1150-1156. + +42. Kim, Y.S.; Noz, M.E. Covariant harmonic oscillators and the quark model. Phys. Rev. D **1973**, *8*, 3521-3627. + +43. Lipes, R. Electromagnetic Excitations of the Nucleon in a Relativistic Quark Model. Phys. Rev. D **1972**, *5*, 2849-2863. + +44. Henriques, A.B.; Keller, B.H.; Moorhouse, R.G. General three-spinor wave functions and the relativistic quark model. Ann. Phys. (NY) **1975**, *93*, 125-151 +---PAGE_BREAK--- + +45. Punjabi, V.; Perdrisat, C.F.; Aniol, K.A.; Baker, F.T.; Berthot, J.; Bertin, P.Y.; Bertozzi, W.; Besson, A.; Bimbot, L.; Boeglin, W.U.; et al. Proton elastic form factor ratios to Q2 = 3.5 GeV2 by polarization transfer. *Phys. Rev. C* **2005**, *71*, 055202-27. + +46. Alkofer, R.; Holl, A.; Kloker, M.; Karssnigg A.; Roberts, C.D. On Nucleon Electromagnetic Form Factors. *Few-Body Sys.* **2005**, *37*, 1-31. + +47. Matevosyan, H.H.; Thomas, A.W.; Miller, G.A. Study of lattice QCD form factors using the extended Gari-Krumpelmann model. *Phys. Rev. C* **2005**, *72*, 065204-5. + +© 2011 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +Dirac Matrices and Feynman's Rest of the Universe + +Young S. Kim ¹,* and Marilyn E. Noz ² + +¹ Center for Fundamental Physics, University of Maryland, College Park, MD 20742, USA + +² Department of Radiology, New York University, New York, NY 10016, USA; marilyne.noz@gmail.com + +* Author to whom correspondence should be addressed; yskim@umd.edu; Tel.: +1-301-937-6306. + +Received: 25 June 2012; in revised form: 6 October 2012; Accepted: 23 October 2012; Published: 30 October 2012 + +**Abstract:** There are two sets of four-by-four matrices introduced by Dirac. The first set consists of fifteen Majorana matrices derivable from his four $\gamma$ matrices. These fifteen matrices can also serve as the generators of the group $SL(4, r)$. The second set consists of ten generators of the $Sp(4)$ group which Dirac derived from two coupled harmonic oscillators. It is shown possible to extend the symmetry of $Sp(4)$ to that of $SL(4, r)$ if the area of the phase space of one of the oscillators is allowed to become smaller without a lower limit. While there are no restrictions on the size of phase space in classical mechanics, Feynman's rest of the universe makes this $Sp(4)$-to-$SL(4, r)$ transition possible. The ten generators are for the world where quantum mechanics is valid. The remaining five generators belong to the rest of the universe. It is noted that the groups $SL(4, r)$ and $Sp(4)$ are locally isomorphic to the Lorentz groups $O(3, 3)$ and $O(3, 2)$ respectively. This allows us to interpret Feynman's rest of the universe in terms of space-time symmetry. + +**Keywords:** Dirac gamma matrices; Feynman's rest of the universe; two coupled oscillators; Wigner's phase space; non-canonical transformations; group generators; $SL(4, r)$ isomorphic $O(3, 3)$; quantum mechanics interpretation + +# 1. Introduction + +In 1963, Paul A. M. Dirac published an interesting paper on the coupled harmonic oscillators [1]. Using step-up and step-down operators, Dirac was able to construct ten operators satisfying a closed set of commutation relations. He then noted that this set of commutation relations can also be used as the Lie algebra for the $O(3, 2)$ de Sitter group applicable to three space and two time dimensions. He noted further that this is the same as the Lie algebra for the four-dimensional symplectic group $Sp(4)$. + +His algebra later became the fundamental mathematical language for two-mode squeezed states in quantum optics [2–5]. Thus, Dirac’s ten oscillator matrices play a fundamental role in modern physics. + +In the Wigner phase-space representation, it is possible to write the Wigner function in terms of two position and two momentum variables. It was noted that those ten operators of Dirac can be translated into the operators with these four variables [4,6], which then can be written as four-by-four matrices. There are thus ten four-by-four matrices. We shall call them Dirac’s oscillator matrices. They are indeed the generators of the symplectic group $Sp(4)$. + +We are quite familiar with four Dirac matrices for the Dirac equation, namely $\gamma_1, \gamma_2, \gamma_3$, and $\gamma_0$. They all become imaginary in the Majorana representation. From them we can construct fifteen linearly independent four-by-four matrices. It is known that these four-by-four matrices can serve as the generators of the $SL(4, r)$ group [6,7]. It is also known that this $SL(4, r)$ group is locally isomorphic to the Lorentz group $O(3, 3)$ applicable to the three space and three time dimensions [6,7]. + +There are now two sets of the four-by-four matrices constructed by Dirac. The first set consists of his ten oscillator matrices, and there are fifteen $\gamma$ matrices coming from his Dirac equation. There is +---PAGE_BREAK--- + +thus a difference of five matrices. The question is then whether this difference can be explained within +the framework of the oscillator formalism with tangible physics. + +It was noted that his original O(3,2) symmetry can be extended to that of O(3,3) Lorentz group applicable to the six dimensional space consisting of three space and three time dimensions. This requires the inclusion of non-canonical transformations in classical mechanics [6]. These non-canonical transformations cannot be interpreted in terms of the present form of quantum mechanics. + +On the other hand, we can use this non-canonical effect to illustrate the concept of Feynman's rest of the universe. This oscillator system can serve as two different worlds. The first oscillator is the world in which we do quantum mechanics, and the second is for the rest of the universe. Our failure to observe the second oscillator results in the increase in the size of the Wigner phase space, thus increasing the entropy [8]. + +Instead of ignoring the second oscillator, it is of interest to see what happens to it. In this paper, +it is shown that Planck's constant does not have a lower limit. This is allowed in classical mechanics, +but not in quantum mechanics. + +Indeed, Dirac's ten oscillator matrices explain the quantum world for both oscillators. The set of Dirac's fifteen $\gamma$ matrices contains his ten oscillator matrices as a subset. We discuss in this paper the physics of this difference. + +In Section 2, we start with Dirac’s four $\gamma$ matrices in the Majorana representation and construct all fifteen four-by-four matrices applicable to the Majorana form of the Dirac spinors. Section 3 reproduces Dirac’s derivation of the $O(3,2)$ symmetry with ten generators from two coupled oscillators. This group is locally isomorphic to $Sp(4)$, which allows canonical transformations in classical mechanics. + +In Section 4, we translate Dirac’s formalism into the language of the Wigner phase space. +This allows us to extend the $Sp(4)$ symmetry into the non-canonical region in classical mechanics. +The resulting symmetry is that of $SL(4,r)$, isomorphic to that of the Lorentz group $O(3,3)$ with fifteen +generators. This allows us to establish the correspondence between Dirac’s Majorana matrices with +those $SL(4,r)$ four-by-four matrices applicable to the two oscillator system, as well as the fifteen +six-by-six matrices that serve as the generators of the $O(3,3)$ group. + +Finally, in Section 5, it is shown that the difference between the ten oscillator matrices and the +fifteen Majorana matrix can serve as an illustrative example of Feynman’s rest of the universe [8,9]. + +## 2. Dirac Matrices in the Majorana Representation + +Since all the generators for the two coupled oscillator system can be written as four-by-four +matrices with imaginary elements, it is convenient to work with Dirac matrices in the Majorana +representation, where all the elements are imaginary [7,10,11]. In the Majorana representation, +the four Dirac $\gamma$ matrices are + +$$ \gamma_1 = i \begin{pmatrix} \sigma_3 & 0 \\ 0 & \sigma_3 \end{pmatrix}, \quad \gamma_2 = \begin{pmatrix} 0 & -\sigma_2 \\ \sigma_2 & 0 \end{pmatrix} $$ + +$$ \gamma_3 = -i \begin{pmatrix} \sigma_1 & 0 \\ 0 & \sigma_1 \end{pmatrix}, \quad \gamma_0 = \begin{pmatrix} 0 & \sigma_2 \\ \sigma_2 & 0 \end{pmatrix} \qquad (1) $$ + +where + +$$ \sigma_1 = \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}, \sigma_2 = \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix}, \sigma_3 = \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} $$ + +These $\gamma$ matrices are transformed like four-vectors under Lorentz transformations. From these four matrices, we can construct one pseudo-scalar matrix + +$$ \gamma_5 = i\gamma_0\gamma_1\gamma_2\gamma_3 = \begin{pmatrix} \sigma_2 & 0 \\ 0 & -\sigma_2 \end{pmatrix} \qquad (2) $$ +---PAGE_BREAK--- + +and a pseudo vector $i\gamma_5\gamma_\mu$ consisting of + +$$ +\begin{align} +i\gamma_5\gamma_1 &= i \begin{pmatrix} -\sigma_1 & 0 \\ 0 & \sigma_1 \end{pmatrix}, & +i\gamma_5\gamma_2 &= -i \begin{pmatrix} 0 & I \\ I & 0 \end{pmatrix} \\ +i\gamma_5\gamma_0 &= i \begin{pmatrix} 0 & I \\ -I & 0 \end{pmatrix}, & +i\gamma_5\gamma_3 &= i \begin{pmatrix} -\sigma_3 & 0 \\ 0 & +\sigma_3 \end{pmatrix} +\end{align} +$$ + +(3) + +In addition, we can construct the tensor of the $\gamma$ as + +$$ +T_{\mu\nu} = \frac{i}{2} (\gamma_{\mu}\gamma_{\nu} - \gamma_{\nu}\gamma_{\mu}) \quad (4) +$$ + +This antisymmetric tensor has six components. They are + +$$ +i\gamma_0\gamma_1 = -i \begin{pmatrix} 0 & \sigma_1 \\ \sigma_1 & 0 \end{pmatrix}, i\gamma_0\gamma_2 = -i \begin{pmatrix} -I & 0 \\ 0 & I \end{pmatrix}, i\gamma_0\gamma_3 = -i \begin{pmatrix} 0 & \sigma_3 \\ \sigma_3 & 0 \end{pmatrix} \quad (5) +$$ + +and + +$$ +i\gamma_1\gamma_2 = i \begin{pmatrix} 0 & -\sigma_1 \\ \sigma_1 & 0 \end{pmatrix}, i\gamma_2\gamma_3 = -i \begin{pmatrix} 0 & -\sigma_3 \\ \sigma_3 & 0 \end{pmatrix}, i\gamma_3\gamma_1 = \begin{pmatrix} \sigma_2 & 0 \\ 0 & \sigma_2 \end{pmatrix} \quad (6) +$$ + +There are now fifteen linearly independent four-by-four matrices. They are all traceless and their components are imaginary [7]. We shall call these Dirac's Majorana matrices. + +In 1963 [1], Dirac constructed another set of four-by-four matrices from two coupled harmonic oscillators, within the framework of quantum mechanics. He ended up with ten four-by-four matrices. It is of interest to compare his oscillator matrices and his fifteen Majorana matrices. + +**3. Dirac’s Coupled Oscillators** + +In his 1963 paper [1], Dirac started with the Hamiltonian for two harmonic oscillators. It can be written as + +$$ +H = \frac{1}{2} (p_1^2 + x_1^2) + \frac{1}{2} (p_2^2 + x_2^2) \tag{7} +$$ + +The ground-state wave function for this Hamiltonian is + +$$ +\psi_0(x_1, x_2) = \frac{1}{\sqrt{\pi}} \exp \left\{ -\frac{1}{2} (x_1^2 + x_2^2) \right\} \qquad (8) +$$ + +We can now consider unitary transformations applicable to the ground-state wave function of +Equation (8), and Dirac noted that those unitary transformations are generated by [1] + +$$ +\begin{align*} +L_1 &= \frac{1}{2}(a_1^\dagger a_2 + a_2^\dagger a_1), & L_2 &= \frac{1}{2i}(a_1^\dagger a_2 - a_2^\dagger a_1) \\ +L_3 &= \frac{1}{2}(a_1^\dagger a_1 - a_2^\dagger a_2), & S_3 &= \frac{i}{2}(a_1^\dagger a_1 + a_2^\dagger a_2) \\ +K_1 &= -\frac{1}{4}(a_1^\dagger a_1^\dagger + a_1 a_1 - a_2^\dagger a_2^\dagger - a_2 a_2) \\ +K_2 &= \frac{i}{4}(a_1^\dagger a_1^\dagger - a_1 a_1 + a_2^\dagger a_2^\dagger - a_2 a_2) \\ +K_3 &= \frac{i}{2}(a_1^\dagger a_2^\dagger + a_1 a_2) \\ +Q_1 &= -\frac{i}{4}(a_1^\dagger a_1^\dagger - a_1 a_1 - a_2^\dagger a_2^\dagger + a_2 a_2) \\ +Q_2 &= -\frac{i}{4}(a_1^\dagger a_1^\dagger + a_1 a_1 + a_2^\dagger a_2^\dagger + a_2 a_2) \\ +Q_3 &= \frac{i}{2}(a_1^\dagger a_2^\dagger - a_1 a_2) +\end{align*} +$$ + +(9) + +where $a^\dagger$ and $a$ are the step-up and step-down operators applicable to harmonic oscillator wave functions. These operators satisfy the following set of commutation relations. +---PAGE_BREAK--- + +$$ +\begin{align} +[L_i, L_j] &= i\epsilon_{ijk}L_k, \quad [L_i, K_j] = i\epsilon_{ijk}K_k, \quad [L_i, Q_j] = i\epsilon_{ijk}Q_k \nonumber \\ +[K_i, K_j] &= [Q_i, Q_j] = -i\epsilon_{ijk}L_k, \quad [L_i, S_3] = 0 \nonumber \\ +[K_i, Q_j] &= -i\delta_{ij}S_3, \quad [K_i, S_3] = -iQ_i, \quad [Q_i, S_3] = iK_i \tag{10} +\end{align} +$$ + +Dirac then determined that these commutation relations constitute the Lie algebra for the O(3,2) de Sitter group with ten generators. This de Sitter group is the Lorentz group applicable to three space coordinates and two time coordinates. Let us use the notation (x,y,z,t,s), with (x,y,z) as space coordinates and (t,s) as two time coordinates. Then the rotation around the z axis is generated by + +$$ +L_3 = \begin{pmatrix} +0 & -i & 0 & 0 & 0 \\ +i & 0 & 0 & 0 & 0 \\ +0 & 0 & 0 & 0 & 0 \\ +0 & 0 & 0 & 0 & 0 \\ +0 & 0 & 0 & 0 & 0 +\end{pmatrix} +\qquad (11) +$$ + +The generators $L_1$ and $L_2$ can be also be constructed. The $K_3$ and $Q_3$ will take the form + +$$ +K_3 = \begin{pmatrix} 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & i & 0 \\ 0 & 0 & i & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 \end{pmatrix}, Q_3 = \begin{pmatrix} 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & i \\ 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & i & 0 & 0 \end{pmatrix} \tag{12} +$$ + +From these two matrices, the generators $K_1, K_2, Q_1, Q_2$ can be constructed. The generator $S_3$ can be written as + +$$ +S_3 = \begin{pmatrix} 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & -i \\ 0 & 0 & i & 0 & 0 \end{pmatrix} \tag{13} +$$ + +The last five-by-five matrix generates rotations in the two-dimensional space of $(t,s)$. + +In his 1963 paper [1], Dirac states that the Lie algebra of Equation (10) can serve as the four-dimensional symplectic group $Sp(4)$. In order to see this point, let us go to the Wigner phase-space picture of the coupled oscillators. + +### **3.1. Wigner Phase-Space Representation** + +For this two-oscillator system, the Wigner function is defined as [4,6] + +$$ +W(x_1, x_2; p_1, p_2) = \left(\frac{1}{\pi}\right)^2 \int \exp\{-2i(p_1 y_1 + p_2 y_2)\} \\ +\times \psi^*(x_1+y_1, x_2+y_2) \psi(x_1-y_1, x_2-y_2) dy_1 dy_2 \tag{14} +$$ + +Indeed, the Wigner function is defined over the four-dimensional phase space of $(x_1, p_1, x_2, p_2)$ just as in the case of classical mechanics. The unitary transformations generated by the operators of Equation (9) are translated into linear canonical transformations of the Wigner function [4]. The canonical transformations are generated by the differential operators [4]: + +$$ +L_1 = +i \frac{1}{2} \left\{ \left( x_1 \frac{\partial}{\partial p_2} - p_2 \frac{\partial}{\partial x_1} \right) + \left( x_2 \frac{\partial}{\partial p_1} - p_1 \frac{\partial}{\partial x_2} \right) \right\} +$$ +---PAGE_BREAK--- + +$$ +\begin{align*} +L_2 &= -\frac{i}{2} \left\{ \left(x_1 \frac{\partial}{\partial x_2} - x_2 \frac{\partial}{\partial x_1}\right) + \left(p_1 \frac{\partial}{\partial p_2} - p_2 \frac{\partial}{\partial p_1}\right) \right\} \\ +L_3 &= +\frac{i}{2} \left\{ \left(x_1 \frac{\partial}{\partial p_1} - p_1 \frac{\partial}{\partial x_1}\right) - \left(x_2 \frac{\partial}{\partial p_2} - p_2 \frac{\partial}{\partial x_2}\right) \right\} \\ +S_3 &= -\frac{i}{2} \left\{ \left(x_1 \frac{\partial}{\partial p_1} - p_1 \frac{\partial}{\partial x_1}\right) + \left(x_2 \frac{\partial}{\partial p_2} - p_2 \frac{\partial}{\partial x_2}\right) \right\} +\end{align*} +$$ + +and + +$$ +\begin{align} +K_1 &= -\frac{i}{2} \left\{ \left( x_1 \frac{\partial}{\partial p_1} + p_1 \frac{\partial}{\partial x_1} \right) - \left( x_2 \frac{\partial}{\partial p_2} + p_2 \frac{\partial}{\partial x_2} \right) \right\} \\ +K_2 &= -\frac{i}{2} \left\{ \left( x_1 \frac{\partial}{\partial x_1} - p_1 \frac{\partial}{\partial p_1} \right) + \left( x_2 \frac{\partial}{\partial x_2} - p_2 \frac{\partial}{\partial p_2} \right) \right\} \\ +K_3 &= +\frac{i}{2} \left\{ \left( x_1 \frac{\partial}{\partial p_2} + p_2 \frac{\partial}{\partial x_1} \right) + \left( x_2 \frac{\partial}{\partial p_1} + p_1 \frac{\partial}{\partial x_2} \right) \right\} \\ +Q_1 &= +\frac{i}{2} \left\{ \left( x_1 \frac{\partial}{\partial x_1} - p_1 \frac{\partial}{\partial p_1} \right) - \left( x_2 \frac{\partial}{\partial x_2} - p_2 \frac{\partial}{\partial p_2} \right) \right\} \\ +Q_2 &= -\frac{i}{2} \left\{ \left( x_1 \frac{\partial}{\partial p_1} + p_1 \frac{\partial}{\partial x_1} \right) + \left( x_2 \frac{\partial}{\partial p_2} + p_2 \frac{\partial}{\partial x_2} \right) \right\} \\ +Q_3 &= -\frac{i}{2} \left\{ \left( x_2 \frac{\partial}{\partial x_1} + x_1 \frac{\partial}{\partial x_2} \right) - \left( p_2 \frac{\partial}{\partial p_1} + p_1 \frac{\partial}{\partial p_2} \right) \right\} +\end{align} +$$ + +$$ +(15) +$$ + +$$ +(K_i M) = (M K_i)^{-1} +$$ + +$$ +(M J M^*) = J +$$ + +where *M* is a four-by-four matrix defined by + +$$ +M_{ij} = \frac{\partial}{\partial \eta_j} \xi_i +$$ + +and + +$$ +J = \begin{pmatrix} +0 & 1 & 0 & 0 \\ +-1 & 0 & 0 & 0 \\ +0 & 0 & 0 & 1 \\ +0 & 0 & -1 & 0 +\end{pmatrix} +$$ + +(19) + +According to this form of the *J* matrix, the area of the phase space for *x*₁ and *p*₁ variables remains invariant, and the story is the same for the phase space of *x*₂ and *p*₂. + +We can then write the generators of the Sp(4) group as + +$$ +L_1 = -\frac{1}{2} \begin{pmatrix} 0 & \sigma_2 \\ \sigma_2 & 0 \end{pmatrix}, L_2 = \frac{i}{2} \begin{pmatrix} 0 & -I \\ I & 0 \end{pmatrix} +$$ + +$$ +L_3 = \frac{1}{2} \begin{pmatrix} -\sigma_2 & 0 \\ 0 & \sigma_2 \end{pmatrix}, S_3 = \frac{1}{2} \begin{pmatrix} \sigma_2 & 0 \\ 0 & \sigma_2 \end{pmatrix} +$$ + +and + +$$ +K_1 = i \begin{pmatrix} \sigma_1 & 0 \\ 0 & -\sigma_1 \end{pmatrix}, K_2 = i \begin{pmatrix} \sigma_3 & 0 \\ 0 & \sigma_3 \end{pmatrix}, K_3 = -i \begin{pmatrix} 0 & \sigma_1 \\ \sigma_1 & 0 \end{pmatrix} +$$ +---PAGE_BREAK--- + +and + +$$Q_1 = \frac{i}{2} \begin{pmatrix} -\sigma_3 & 0 \\ 0 & \sigma_3 \end{pmatrix}, Q_2 = \frac{i}{2} \begin{pmatrix} \sigma_1 & 0 \\ 0 & \sigma_1 \end{pmatrix}, Q_3 = \frac{i}{2} \begin{pmatrix} 0 & \sigma_3 \\ \sigma_3 & 0 \end{pmatrix} \quad (21)$$ + +These four-by-four matrices satisfy the commutation relations given in Equation (10). Indeed, the de Sitter group *O*(3,2) is locally isomorphic to the *Sp*(4) group. The remaining question is whether these ten matrices can serve as the fifteen Dirac matrices given in Section 2. The answer is clearly no. How can ten matrices describe fifteen matrices? We should therefore add five more matrices. + +**4. Extension to O(3,3) Symmetry** + +Unlike the case of the Schrödinger picture, it is possible to add five non-canonical generators to the above list. They are + +$$S_1 = +\frac{i}{2} \left\{ \left(x_1 \frac{\partial}{\partial x_2} - x_2 \frac{\partial}{\partial x_1}\right) - \left(p_1 \frac{\partial}{\partial p_2} - p_2 \frac{\partial}{\partial p_1}\right) \right\}$$ + +$$S_2 = -\frac{i}{2} \left\{ \left(x_1 \frac{\partial}{\partial p_2} - p_2 \frac{\partial}{\partial x_1}\right) + \left(x_2 \frac{\partial}{\partial p_1} - p_1 \frac{\partial}{\partial x_2}\right) \right\} \quad (22)$$ + +as well as three additional squeeze operators: + +$$G_1 = -\frac{i}{2} \left\{ \left(x_1 \frac{\partial}{\partial x_2} + x_2 \frac{\partial}{\partial x_1}\right) + \left(p_1 \frac{\partial}{\partial p_2} + p_2 \frac{\partial}{\partial p_1}\right) \right\}$$ + +$$G_2 = \frac{i}{2} \left\{ \left(x_1 \frac{\partial}{\partial p_2} + p_2 \frac{\partial}{\partial x_1}\right) - \left(x_2 \frac{\partial}{\partial p_1} + p_1 \frac{\partial}{\partial x_2}\right) \right\}$$ + +$$G_3 = -\frac{i}{2} \left\{ \left(x_1 \frac{\partial}{\partial x_1} + p_1 \frac{\partial}{\partial p_1}\right) + \left(x_2 \frac{\partial}{\partial p_1} + p_1 \frac{\partial}{\partial x_2}\right) \right\} \quad (23)$$ + +These five generators perform well-defined operations on the Wigner function. However, the question is whether these additional generators are acceptable in the present form of quantum mechanics. + +In order to answer this question, let us note that the uncertainty principle in the phase-space picture of quantum mechanics is stated in terms of the minimum area in phase space for a given pair of conjugate variables. The minimum area is determined by Planck's constant. Thus we are allowed to expand the phase space, but are not allowed to contract it. With this point in mind, let us go back to $G_3$ of Equation (23), which generates transformations that simultaneously expand one phase space and contract the other. Thus, the $G_3$ generator is not acceptable in quantum mechanics even though it generates well-defined mathematical transformations of the Wigner function. + +If the five generators of Equations (22) and (23) are added to the ten generators given in Equations (15) and (16), there are fifteen generators. They satisfy the following set of commutation relations. + +$$ +\begin{align*} +[L_i, L_j] &= i\epsilon_{ijk}L_k, & [S_i, S_j] &= i\epsilon_{ijk}S_k, & [L_i, S_j] &= 0 \\ +[L_i, K_j] &= i\epsilon_{ijk}K_k, & [L_i, Q_j] &= i\epsilon_{ijk}Q_k, & [L_i, G_j] &= i\epsilon_{ijk}G_k \\ +[K_i, K_j] &= [Q_i, Q_j] &= [Q_i, Q_j] &= -i\epsilon_{ijk}L_k \\ +[K_i, Q_j] &= -i\delta_{ij}S_3, & [Q_i, G_j] &= -i\delta_{ij}S_1, & [G_i, K_j] &= -i\delta_{ij}S_2 \\ +[K_i, S_3] &= -iQ_i, & [Q_i, S_3] &= iK_i, & [G_i, S_3] &= 0 \\ +[K_i, S_1] &= 0, & [Q_i, S_1] &= -iG_i, & [G_i, S_1] &= iQ_i \\ +[K_i, S_2] &= iG_i, & [Q_i, S_2] &= 0, & [G_i, S_2] &= -iK_i +\end{align*} +\tag{24} +$$ + +As we shall see in Section 4.2, this set of commutation relations serves as the Lie algebra for the group SL(4, r) and also for the *O*(3, 3) Lorentz group. +---PAGE_BREAK--- + +These fifteen four-by-four matrices are written in terms of Dirac's fifteen Majorana matrices, and are tabulated in Table 1. There are six anti-symmetric and nine symmetric matrices. These anti-symmetric matrices were divided into two sets of three rotation generators in the four-dimensional phase space. The nine symmetric matrices can be divided into three sets of three squeeze generators. However, this classification scheme is easier to understand in terms the group $O(3,3)$, discussed in Section 4.2. + +**Table 1.** SL(4,*r*) and Dirac matrices. Two sets of rotation generators and three sets of boost generators. +There are 15 generators. + +
First componentSecond componentThird component
Rotation$L_1 = \frac{-i}{2}\gamma_0$$L_2 = \frac{i}{2}\gamma_5\gamma_0$$L_3 = \frac{-i}{2}\gamma_5$
Rotation$S_1 = \frac{i}{2}\gamma_2\gamma_3$$S_2 = \frac{i}{2}\gamma_1\gamma_2$$S_3 = \frac{i}{2}\gamma_3\gamma_1$
Boost$K_1 = \frac{-i}{2}\gamma_5\gamma_1$$K_2 = \frac{1}{2}\gamma_1$$K_3 = \frac{1}{2}\gamma_0\gamma_1$
Boost$Q_1 = \frac{i}{2}\gamma_5\gamma_3$$Q_2 = \frac{-i}{2}\gamma_3$$Q_3 = -\frac{i}{2}\gamma_0\gamma_3$
Boost$G_1 = \frac{-i}{2}\gamma_5\gamma_2$$G_2 = \frac{1}{2}\gamma_2$$G_3 = \frac{1}{2}\gamma_0\gamma_2$
+ +## 4.1. Non-Canonical Transformations in Classical Mechanics + +In addition to Dirac's ten oscillator matrices, we can consider the matrix + +$$ G_3 = \frac{i}{2} \begin{pmatrix} I & 0 \\ 0 & -I \end{pmatrix} \qquad (25) $$ + +which will generate a radial expansion of the phase space of the first oscillator, while contracting that of the second phase space [14], as illustrated in Figure 1. What is the physical significance of this operation? The expansion of phase space leads to an increase in uncertainty and entropy [8,14]. + +**Figure 1.** Expanding and contracting phase spaces. Canonical transformations leave the area of each phase space invariant. Non-canonical transformations can change them, yet the product of these two areas remains invariant. + +The contraction of the second phase space has a lower limit in quantum mechanics, namely it cannot become smaller than Planck's constant. However, there is no such lower limit in classical mechanics. We shall go back to this question in Section 5. +---PAGE_BREAK--- + +In the meantime, let us study what happens when the matrix $G_3$ is introduced into the set of matrices given in Equations (20) and (21). It commutes with $S_3$, $L_3$, $K_1$, $K_2$, $Q_1$, and $Q_2$. However, its commutators with the rest of the matrices produce four more generators: + +$$[G_3, L_1] = iG_2, [G_3, L_2] = -iG_1, [G_3, K_3] = iS_2, [G_3, Q_3] = -iS_1 \qquad (26)$$ + +where + +$$G_1 = \frac{i}{2} \begin{pmatrix} 0 & I \\ I & 0 \end{pmatrix}, G_2 = \frac{1}{2} \begin{pmatrix} 0 & -\sigma_2 \\ \sigma_2 & 0 \end{pmatrix}$$ + +$$S_1 = \frac{i}{2} \begin{pmatrix} 0 & \sigma_3 \\ -\sigma_3 & 0 \end{pmatrix}, S_2 = \frac{i}{2} \begin{pmatrix} 0 & -\sigma_1 \\ \sigma_1 & 0 \end{pmatrix} \qquad (27)$$ + +If we take into account the above five generators in addition to the ten generators of $Sp(4)$, there are fifteen generators. These generators satisfy the set of commutation relations given in Equation (24). + +Indeed, the ten $Sp(4)$ generators together with the five new generators form the Lie algebra for the group $SL(4,r)$. There are thus fifteen four-by-four matrices. They can be written in terms of the fifteen Majorana matrices, as given in Table 1. + +## 4.2. Local Isomorphism between O(3,3) and SL(4,r) + +It is now possible to write fifteen six-by-six matrices that generate Lorentz transformations on the three space coordinates and three time coordinates [6]. However, those matrices are difficult to handle and do not show existing regularities. In this section, we write those matrices as two-by-two matrices of three-by-three matrices. + +For this purpose, we construct four sets of three-by-three matrices given in Table 2. There are two sets of rotation generators: + +$$L_i = \begin{pmatrix} A_i & 0 \\ 0 & 0 \end{pmatrix}, S_i = \begin{pmatrix} 0 & 0 \\ 0 & A_i \end{pmatrix} \qquad (28)$$ + +applicable to the space and time coordinates respectively. + +There are also three sets of boost generators. In the two-by-two representation of the matrices given in Table 2, they are: + +$$K_i = \begin{pmatrix} 0 & B_i \\ \tilde{B}_i & 0 \end{pmatrix}, Q_i = \begin{pmatrix} 0 & C_i \\ \tilde{C}_i & 0 \end{pmatrix}, G_i = \begin{pmatrix} 0 & D_i \\ \tilde{D}_i & 0 \end{pmatrix} \qquad (29)$$ + +where the three-by-three matrices $A_i, B_i, C_i$, and $D_i$ are given in Table 2, and $\tilde{A}_i, \tilde{B}_i, \tilde{C}_i, \tilde{D}_i$ are their transposes respectively. +---PAGE_BREAK--- + +**Table 2.** Three-by-three matrices constituting the two-by-two representation of generators of the $O(3,3)$ group. + +
i = 1i = 2i = 3
Ai$\begin{pmatrix} 0 & 0 & 0 \\ 0 & 0 & -i \\ 0 & i & 0 \end{pmatrix}$$\begin{pmatrix} 0 & 0 & i \\ 0 & 0 & 0 \\ -i & 0 & 0 \end{pmatrix}$$\begin{pmatrix} 0 & -i & 0 \\ i & 0 & 0 \\ 0 & 0 & 0 \end{pmatrix}$
Bi$\begin{pmatrix} i & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 0 & 0 \end{pmatrix}$$\begin{pmatrix} 0 & 0 & 0 \\ i & 0 & 0 \\ 0 & 0 & 0 \end{pmatrix}$$\begin{pmatrix} 0 & 0 & 0 \\ 0 & 0 & 0 \\ i & 0 & 0 \end{pmatrix}$
Ci$\begin{pmatrix} 0 & i & 0 \\ 0 & 0 & 0 \\ 0 & 0 & 0 \end{pmatrix}$$\begin{pmatrix} 0 & 0 & 0 \\ 0 & i & 0 \\ 0 & 0 & 0 \end{pmatrix}$$\begin{pmatrix} 0 & 0 & 0 \\ 0 & 0 & 0 \\ 0 & i & 0 \end{pmatrix}$
Di$\begin{pmatrix} 0 & 0 & i \\ 0 & 0 & 0 \\ 0 & 0 & 0 \end{pmatrix}$$\begin{pmatrix} 0 & 0 & 0 \\ 0 & i & 0 \\ 0 & 0 & 0 \end{pmatrix}$$\begin{pmatrix} 0 & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 0 & i \end{pmatrix}$
+ +There is a four-by-four Majorana matrix corresponding to each of these fifteen six-by-six matrices, as given in Table 1. + +There are of course many interesting subgroups. The most interesting case is the $O(3,2)$ subgroup, and there are three of them. Another interesting feature is that there are three time dimensions. Thus, there are also $O(2,3)$ subgroups applicable to two space and three time coordinates. This symmetry between space and time coordinates could be an interesting future investigation. + +## **5. Feynman's Rest of the Universe** + +In his book on statistical mechanics [9], Feynman makes the following statement. When we solve a quantum-mechanical problem, what we really do is divide the universe into two parts - the system in which we are interested and the rest of the universe. We then usually act as if the system in which we are interested comprised the entire universe. To motivate the use of density matrices, let us see what happens when we include the part of the universe outside the system. + +We can use two coupled harmonic oscillators to illustrate what Feynman says about his rest of the universe. One of the oscillators can be used for the world in which we make physical measurements, while the other belongs to the rest of the universe [8]. + +Let us start with a single oscillator in its ground state. In quantum mechanics, there are many kinds of excitations of the oscillator, and three of them are familiar to us. First, it can be excited to a state with a definite energy eigenvalue. We obtain the excited-state wave functions by solving the eigenvalue problem for the Schrödinger equation, and this procedure is well known. + +Second, the oscillator can go through coherent excitations. The ground-state oscillator can be excited to a coherent or squeezed state. During this process, the minimum uncertainty of the ground state is preserved. The coherent or squeezed state is not in an energy eigenstate. This kind of excited state plays a central role in coherent and squeezed states of light, which have recently become a standard item in quantum mechanics. + +Third, the oscillator can go through thermal excitations. This is not a quantum excitation but a statistical ensemble. We cannot express a thermally excited state by making linear combinations of wave functions. We should treat this as a canonical ensemble. In order to deal with this thermal state, we need a density matrix. + +For the thermally excited single-oscillator state, the density matrix takes the form [9,15,16]. + +$$ \rho(x,y) = (1 - e^{-1/T}) \sum_k e^{-k/T} \phi_k(x) \phi_k^*(x) \quad (30) $$ + +where the absolute temperature T is measured in the scale of Boltzmann's constant, and $\phi_k(x)$ is the k-th excited state wave oscillator wave function. The index ranges from 0 to $\infty$. +---PAGE_BREAK--- + +We also use Wigner functions to deal with statistical problems in quantum mechanics. The Wigner function for this thermally excited state is [4,9,15] + +$$W_T(x, p) = \frac{1}{\pi} \int e^{-2ipz} \rho(x-z, x+z) dz \quad (31)$$ + +which becomes + +$$W_T = \left[ \frac{\tanh(1/2T)}{\pi} \right] \exp \left[ - (x^2 + p^2) \tanh(1/2T) \right] \quad (32)$$ + +This Wigner function becomes + +$$W_0 = \frac{1}{\pi} \exp[-(x^2 + p^2)] \quad (33)$$ + +when $T=0$. As the temperature increases, the radius of this Gaussian form increases from one to [14]. + +$$\frac{1}{\sqrt{\tanh(1/2T)}} \qquad (34)$$ + +The question is whether we can derive this expanding Wigner function from the concept of Feynman's rest of the universe. In their 1999 paper [8], Han et al. used two coupled harmonic oscillators to illustrate what Feynman said about his rest of the universe. One of their two oscillators is for the world in which we do quantum mechanics and the other is for the rest of the universe. However, these authors did not use canonical transformations. In Section 5.1, we summarize the main point of their paper using the language of canonical transformations developed in the present paper. + +Their work was motivated by the papers by Yurke et al. [17] and by Ekert et al. [18], and the Barnett-Phoenix version of information theory [19]. These authors asked the question of what happens when one of the photons is not observed in the two-mode squeezed state. + +In Section 5.2, we introduce another form of Feynman's rest of the universe, based on non-canonical transformations discussed in the present paper. For a two-oscillator system, we can define a single-oscillator Wigner function for each oscillator. Then non-canonical transformations allow one Wigner function to expand while forcing the other to shrink. The shrinking Wigner function has a lower limit in quantum mechanics, while there is none in classical mechanics. Thus, Feynman's rest of the universe consists of classical mechanics where Planck's constant has no lower limit. + +In Section 5.3, we translate the mathematics of the expanding Wigner function into the physical language of entropy. + +## 5.1. Canonical Approach + +Let us start with the ground-state wave function for the uncoupled system. Its Hamiltonian is given in Equation (7), and its wave function is + +$$\psi_0(x_1, x_2) = \frac{1}{\sqrt{\pi}} \exp \left[ -\frac{1}{2} (x_1^2 + x_2^2) \right] \quad (35)$$ + +We can couple these two oscillators by making the following canonical transformations. First, let us rotate the coordinate system by 45° to get + +$$\frac{1}{\sqrt{2}}(x_1+x_2), \frac{1}{\sqrt{2}}(x_1-x_2) \qquad (36)$$ + +Let us then squeeze the coordinate system: + +$$\frac{e^{\eta}}{\sqrt{2}}(x_1 + x_2), \frac{e^{-\eta}}{\sqrt{2}}(x_1 - x_2) \qquad (37)$$ +---PAGE_BREAK--- + +Likewise, we can transform the momentum coordinates to + +$$ \frac{e^{-\eta}}{\sqrt{2}}(p_1 + p_2), \quad \frac{e^{\eta}}{\sqrt{2}}(p_1 - p_2) \qquad (38) $$ + +Equations (37) and (38) constitute a very familiar canonical transformation. The resulting wave function for this coupled system becomes + +$$ \psi_{\eta}(x_1, x_2) = \frac{1}{\sqrt{\pi}} \exp \left\{ -\frac{1}{4} [e^{\eta}(x_1 - x_2)^2 + e^{-\eta}(x_1 + x_2)^2] \right\} \quad (39) $$ + +This transformed wave function is illustrated in Figure 2. + +As was discussed in the literature for several different purposes [4,20–22], this wave function can be expanded as + +$$ \psi_{\eta}(x_1, x_2) = \frac{1}{\cosh \eta} \sum_k \left( \tanh \frac{\eta}{2} \right)^k \phi_k(x_1) \phi_k(x_2) \quad (40) $$ + +where the wave function $\phi_k\phi(x)$ and the range of summation are defined in Equation (30). From this wave function, we can construct the pure-state density matrix + +$$ \rho(x_1, x_2; x'_1, x'_2) = \psi_\eta(x_1, x_2) \psi_\eta(x'_1, x'_2) \quad (41) $$ + +which satisfies the condition $\rho^2 = \rho$: + +$$ \rho(x_1, x_2; x'_1, x'_2) = \int \rho(x_1, x_2; x''_1, x''_2) \rho(x''_1, x''_2; x'_1, x'_2) dx'_1 dx''_2 \quad (42) $$ + +**Figure 2.** Two-dimensional Gaussian form for two-coupled oscillators. One of the variables is observable while the second variable is not observed. It belongs to Feynman's rest of the universe. + +If we are not able to make observations on the $x_2$, we should take the trace of the $\rho$ matrix with respect to the $x_2$ variable. Then the resulting density matrix is + +$$ \rho(x, x') = \int \psi_{\eta}(x, x_2) \{\psi_{\eta}(x', x_2)\}^* dx_2 \quad (43) $$ +---PAGE_BREAK--- + +Here, we have replaced $x_1$ and $x'_1$ by $x$ and $x'$ respectively. If we complete the integration over the $x_2$ variable, + +$$ \rho(x,x') = \left(\frac{1}{\pi \cosh \eta}\right)^{1/2} \exp\left\{-\frac{(x+x')^2 + (x-x')^2 \cosh^2 \eta}{4 \cosh \eta}\right\} \quad (44) $$ + +The diagonal elements of the above density matrix are + +$$ \rho(x,x) = \left( \frac{1}{\pi \cosh \eta} \right)^{1/2} \exp(-x^2 / \cosh \eta) \quad (45) $$ + +With this expression, we can confirm the property of the density matrix: $\text{Tr}(\rho) = 1$. As for the trace of $\rho^2$, we can perform the integration + +$$ \mathrm{Tr}(\rho^2) = \int \rho(x,x')\rho(x',x)dx'dx = \frac{1}{\cosh\eta} \quad (46) $$ + +which is less than one for nonzero values of $\eta$. + +The density matrix can also be calculated from the expansion of the wave function given in Equation (40). If we perform the integral of Equation (43), the result is + +$$ \rho(x,x') = \left( \frac{1}{\cosh(\eta/2)} \right)^2 \sum_k \left( \tanh \frac{\eta}{2} \right)^{2k} \phi_k(x) \phi_k^*(x') \quad (47) $$ + +which leads to $\text{Tr}(\rho) = 1$. It is also straightforward to compute the integral for $\text{Tr}(\rho^2)$. The calculation leads to + +$$ \mathrm{Tr}(\rho^2) = \left(\frac{1}{\cosh(\eta/2)}\right)^4 \sum_k \left(\tanh \frac{\eta}{2}\right)^{4k} \quad (48) $$ + +The sum of this series becomes to $(1/\cosh\eta)$, as given in Equation (46). + +We can approach this problem using the Wigner function. The Wigner function for the two oscillator system is [4] + +$$ W_0(x_1, p_1; x_2, p_2) = \left(\frac{1}{\pi}\right)^2 \exp\left[-(x_1^2 + p_1^2 + x_2^2 + p_2^2)\right] \quad (49) $$ + +If we pretend not to make measurement on the second oscillator coordinate, the $x_2$ and $p_2$ variables have to be integrated out [8]. The net result becomes the Wigner function for the first oscillator. + +The canonical transformation of Equations (37) and (38) changes this Wigner function to + +$$ W(x_1, x_2; p_1, p_2) = \left(\frac{1}{\pi}\right)^2 \exp \left\{ -\frac{1}{2} [e^\eta (x_1 - x_2)^2 + e^{-\eta} (x_1 + x_2)^2 + e^{-\eta}(p_1 - p_2)^2 + e^\eta (p_1 + p_2)^2] \right\} \quad (50) $$ + +If we do not observe the second pair of variables, we have to integrate this function over $x_2$ and $p_2$: + +$$ W_{\eta}(x_1, p_1) = \int W(x_1, x_2; p_1, p_2) dx_2 dp_2 \quad (51) $$ + +and the evaluation of this integration leads to [8] + +$$ W_{\eta}(x,p) = \frac{1}{\pi \cosh \eta} \exp\left[-\left(\frac{x^2 + p^2}{\cosh \eta}\right)\right] \quad (52) $$ + +where we use $x$ and $p$ for $x_1$ and $p_1$ respectively. +---PAGE_BREAK--- + +This Wigner function is of the form given in Equation (32) for the thermal excitation, if we identify +the squeeze parameter $\eta$ as [23] + +$$ \cosh \eta = \frac{1}{\tanh(1/2T)} \quad (53) $$ + +The failure to make measurement on the second oscillator leads to the radial expansion of the Wigner phase space as in the case of the thermal excitation. + +## 5.2. Non-Canonical Approach + +As we noted before, among the fifteen Dirac matrices, ten of them can be used for canonical transformations in classical mechanics, and thus in quantum mechanics. They play a special role in quantum optics [2–5]. + +The remaining five of them can have their roles if the change in the phase space area is allowed. In quantum mechanics, the area can be increased, but it has a lower limit called Plank’s constant. In classical mechanics, this constraint does not exist. The mathematical formalism given in this paper allows us to study this aspect of the system of coupled oscillators. + +Let us choose the following three matrices from those in Equations (20) and (21). + +$$ S_3 = \frac{1}{2} \begin{pmatrix} \sigma_2 & 0 \\ 0 & \sigma_2 \end{pmatrix}, K_2 = \frac{i}{2} \begin{pmatrix} \sigma_3 & 0 \\ 0 & \sigma_3 \end{pmatrix}, Q_2 = \frac{i}{2} \begin{pmatrix} \sigma_1 & 0 \\ 0 & \sigma_1 \end{pmatrix} \quad (54) $$ + +They satisfy the closed set of commutation relations: + +$$ [S_3, K_2] = iQ_2, [S_3, Q_2] = -iQ_3, [K_2, Q_2] = -iS_3 \quad (55) $$ + +This is the Lie algebra for the $Sp(2)$ group. This is the symmetry group applicable to the single-oscillator phase space [4], with one rotation and two squeezes. These matrices generate the same transformation for the first and second oscillators. + +We can choose three other sets with similar properties. They are: + +$$ S_3 = \frac{1}{2} \begin{pmatrix} \sigma_2 & 0 \\ 0 & \sigma_2 \end{pmatrix}, Q_1 = \frac{i}{2} \begin{pmatrix} \sigma_3 & 0 \\ 0 & -\sigma_3 \end{pmatrix}, K_1 = \frac{i}{2} \begin{pmatrix} \sigma_1 & 0 \\ 0 & -\sigma_1 \end{pmatrix} \quad (56) $$ + +$$ L_3 = \frac{1}{2} \begin{pmatrix} -\sigma_2 & 0 \\ 0 & \sigma_2 \end{pmatrix}, K_2 = \frac{i}{2} \begin{pmatrix} \sigma_3 & 0 \\ 0 & \sigma_3 \end{pmatrix}, K_1 = \frac{i}{2} \begin{pmatrix} -\sigma_1 & 0 \\ 0 & \sigma_1 \end{pmatrix} \quad (57) $$ + +and + +$$ L_3 = \frac{1}{2} \begin{pmatrix} -\sigma_2 & 0 \\ 0 & \sigma_2 \end{pmatrix}, -Q_2 = \frac{i}{2} \begin{pmatrix} -\sigma_3 & 0 \\ 0 & \sigma_3 \end{pmatrix}, Q_2 = \frac{i}{2} \begin{pmatrix} \sigma_1 & 0 \\ 0 & \sigma_1 \end{pmatrix} \quad (58) $$ + +These matrices also satisfy the commutation relations given in Equation (55). In this case, the squeeze transformations take opposite directions in the second phase space. + +Since all these transformations are canonical, they leave the area of each phase space invariant. However, let us look at the non-canonical generator $G_3$ of Equation (25). It generates the transformation matrix of the form: + +$$ \begin{pmatrix} e^{\eta} & 0 \\ 0 & e^{-\eta} \end{pmatrix} \quad (59) $$ + +If $\eta$ is positive, this matrix expands the first phase space while contracting the second. This contraction of the second phase space is allowed in classical mechanics, but it has a lower limit in quantum mechanics. + +The expansion of the first phase space is exactly like the thermal expansion resulting from our failure to observe the second oscillator that belongs to the rest of the universe. If we expand the system of Dirac's ten oscillator matrices to the world of his fifteen Majorana matrices, we can expand and +---PAGE_BREAK--- + +contract the first and second phase spaces without mixing them up. We can thus construct a model where the observed world and the rest of the universe remain separated. In the observable world, quantum mechanics remains valid with thermal excitations. In the rest of the universe, since the area of the phase space can decrease without lower limit, only classical mechanics is valid. + +During the expansion/contraction process, the product of the areas of the two phase spaces remains constant. This may or may not be an extended interpretation of the uncertainty principle, but we choose not to speculate further on this issue. + +Let us turn our attention to the fact that the groups $SL(4,r)$ and $Sp(4)$ are locally isomorphic to $O(3,3)$ and $O(3,2)$ respectively. This means that we can do quantum mechanics in one of the $O(3,2)$ subgroups of $O(3,3)$, as Dirac noted in his 1963 paper [1]. The remaining generators belong to Feynman's rest of the universe. + +### 5.3. Entropy and the Expanding Wigner Phase Space + +We have seen how Feynman's rest of the universe increases the radius of the Wigner function. It is important to note that the entropy of the system also increases. + +Let us go back to the density matrix. The standard way to measure this ignorance is to calculate the entropy defined as [16,24–27]. + +$$S = -\operatorname{Tr}(\rho \ln(\rho)) \qquad (60)$$ + +where S is measured in units of Boltzmann's constant. If we use the density matrix given in Equation (44), the entropy becomes + +$$S = 2\left\{\cosh^2\left(\frac{\eta}{2}\right) \ln\left(\cosh\frac{\eta}{2}\right) - \sinh^2\left(\frac{\eta}{2}\right) \ln\left(\sinh\frac{\eta}{2}\right)\right\} \quad (61)$$ + +In order to express this equation in terms of the temperature variable $T$, we write Equation (53) as + +$$\cosh \eta = \frac{1 + e^{-1/T}}{1 - e^{-1/T}} \qquad (62)$$ + +which leads to + +$$\cosh^2\left(\frac{\eta}{2}\right) = \frac{1}{1+e^{-1/T}}, \quad \sinh^2\left(\frac{\eta}{2}\right) = \frac{e^{-1/T}}{1+e^{-1/T}} \qquad (63)$$ + +Then the entropy of Equation (61) takes the form [8] + +$$S = \left(\frac{1}{T}\right) \left\{ \frac{1}{\exp\left(\frac{1}{T}\right) - 1} \right\} - \ln\left(1 - e^{-1/T}\right) \qquad (64)$$ + +This familiar expression is for the entropy of an oscillator state in thermal equilibrium. Thus, for this oscillator system, we can relate our ignorance of the Feynman's rest of the universe, measured by the coupling parameter $\eta$, to the temperature. + +## 6. Concluding Remarks + +In this paper, we started with the fifteen four-by-four matrices for the Majorana representation of the Dirac matrices, and the ten generators of the $Sp(4)$ group corresponding to Dirac's oscillator matrices. Their explicit forms are given in the literature [6,7], and their roles in modern physics are well-known [3,4,11]. We re-organized them into tables. + +The difference between these two representations consists of five matrices. The physics of this difference is discussed in terms of Feynman's rest of the universe [9]. According to Feynman, this universe consists of the world in which we do quantum mechanics, and the rest of the universe. In the rest of the universe, our physical laws may or may not be respected. In the case of coupled oscillators, without the lower limit on Planck's constant, we can do classical mechanics but not quantum mechanics in the rest of the universe. +---PAGE_BREAK--- + +In 1971, Feynman et al. [28] published a paper on the oscillator model of hadrons, where the proton consists of three quarks linked up by oscillator springs. In order to treat this problem, they use a three-particle symmetry group formulated by Dirac in his book on quantum mechanics [29,30]. An interesting problem could be to see what happens to the two quarks when one of them is not observed. Another interesting question could be to see what happens to one of the quarks when two of them are not observed. + +Finally, we note here that group theory is a very powerful tool in approaching problems in modern physics. Different groups can share the same set of commutation relations for their generators. Recently, the group SL(2, c) through its correspondence with the SO(3,1) has been shown to be the underlying language for classical and modern optics [4,31]. In this paper, we exploited the correspondence between SL(4, r) and O(3,3), as well as the correspondence between Sp(4) and O(3,2), which was first noted by Paul A. M. Dirac [1]. + +There could be more applications of group isomorphisms in the future. A comprehensive list of those correspondences is given in Gilmore's book on Lie groups [32]. + +**Acknowledgments:** We would like to thank Christian Baumgarten for telling us about the *Sp*(2) symmetry in classical mechanics. + +References + +1. Dirac, P.A.M. A remarkable representation of the 3 + 2 de Sitter Group. J. Math. Phys. **1963**, *4*, 901-909. + [CrossRef] + +2. Yuen, H.P. Two-photon coherent states of the radiation field. Phys. Rev. A **1976**, *13*, 2226-2243. [CrossRef] + +3. Yurke, B.S.; McCall, S.L.; Klauder, J.R. SU(2) and SU(1,1) interferometers. Phys. Rev. A **1986**, *33*, 4033-4054. + [CrossRef] [PubMed] + +4. Kim, Y.S.; Noz, M.E. Phase Space Picture of Quantum Mechanics; World Scientific Publishing Company: Singapore, 1991. + +5. Han, D.; Kim, Y.S.; Noz, M.E.; Yeh, L. Symmetries of two-mode squeezed states. J. Math. Phys. **1993**, *34*, 5493-5508. [CrossRef] + +6. Han, D.; Kim, Y.S.; Noz, M.E. O(3,3)-like symmetries of coupled harmonic oscillators. J. Math. Phys. **1995**, *36*, 3940-3954. [CrossRef] + +7. Lee, D.-G. The Dirac gamma matrices as "relics" of a hidden symmetry?: As fundamental representation of the algebra Sp(4,r). J. Math. Phys. **1995**, *36*, 524-530. [CrossRef] + +8. Han, D.; Kim, Y.S.; Noz, M.E. Illustrative example of Feynman's rest of the universe. Am. J. Phys. **1999**, *67*, 61-66. [CrossRef] + +9. Feynman, R.P. Statistical Mechanics; Benjamin/Cummings: Reading, MA, USA, 1972. + +10. Majorana, E. Relativistic theory of particles with arbitrary intrinsic angular momentum. Nuovo Cimento **1932**, *9*, 335-341. [CrossRef] + +11. Itzykson, C.; Zuber, J.B. Quantum Field Theory; MaGraw-Hill: New York, NY, USA, 1980. + +12. Goldstein, H. *Classical Mechanics*, 2nd ed.; Addison-Wesley: Reading, MA, USA, 1980. + +13. Abraham, R.; Marsden, J.E. *Foundations of Mechanics*, 2nd ed.; Benjamin/Cummings: Reading, MA, USA, 1978. + +14. Kim, Y.S.; Li, M. Squeezed states and thermally excited states in the Wigner phase-space picture of quantum mechanics. Phys. Lett. A **1989**, *139*, 445-448. [CrossRef] + +15. Davies, R.W.; Davies, K.T.R. On the Wigner distribution function for an oscillator. Ann. Phys. **1975**, *89*, 261-273. [CrossRef] + +16. Landau, L.D.; Lifshitz, E.M. Statistical Physics; Pergamon Press: London, UK, 1958. + +17. Yurke, B.; Potasek, M. Obtainment of thermal noise from a pure state. Phys. Rev. A **1987**, *36*, 3464-3466. + [CrossRef] [PubMed] + +18. Ekert, A.K.; Knight, P.L. Correlations and squeezing of two-mode oscillations. Am. J. Phys. **1989**, *57*, 692-697. + [CrossRef] + +19. Barnett, S.M.; Phoenix, S.J.D. Information theory, squeezing and quantum correlations. Phys. Rev. A **1991**, *44*, 535-545. [CrossRef] [PubMed] +---PAGE_BREAK--- + +20. Kim, Y.S.; Noz, M.E.; Oh, S.H. A simple method for illustrating the difference between the homogeneous and inhomogeneous Lorentz Groups. Am. J. Phys. **1979**, *47*, 892–897. [CrossRef] + +21. Kim, Y.S.; Noz, M.E. *Theory and Applications of the Poincaré Group*; Reidel: Dordrecht, the Netherlands, 1986. + +22. Giedke, G.; Wolf, M.M.; Krueger, O.; Werner, R.F.; Cirac, J.J. Entanglement of formation for symmetric Gaussian states. Phys. Rev. Lett. **2003**, *91*, 107901–107904. [CrossRef] [PubMed] + +23. Han, D.; Kim, Y.S.; Noz, M.E. Lorentz-squeezed hadrons and hadronic temperature. Phys. Lett. A **1990**, *144*, 111–115. [CrossRef] + +24. von Neumann, J. *Mathematical Foundation of Quantum Mechanics*; Princeton University: Princeton, NJ, USA, 1955. + +25. Fano, U. Description of states in quantum mechanics by density matrix and operator techniques. Rev. Mod. Phys. **1957**, *29*, 74–93. [CrossRef] + +26. Blum, K. *Density Matrix Theory and Applications*; Plenum: New York, NY, USA, 1981. + +27. Kim, Y.S.; Wigner, E.P. Entropy and Lorentz transformations. Phys. Lett. A **1990**, *147*, 343–347. [CrossRef] + +28. Feynman, R.P.; Kislinger, M.; Ravndal, F. Current matrix elements from a relativistic Quark Model. Phys. Rev. D **1971**, *3*, 2706–2732. [CrossRef] + +29. Dirac, P.A.M. *Principles of Quantum Mechanics*, 4th ed.; Oxford University: London, UK, 1958. + +30. Hussar, P.E.; Kim, Y.S.; Noz, M.E. Three-particle symmetry classifications according to the method of Dirac. Am. J. Phys. **1980**, *48*, 1038–1042. [CrossRef] + +31. Başkal, S.; Kim, Y.S. Lorentz Group in ray and polarization optics. In *Mathematical Optics: Classical, Quantum and Imaging Methods*; Lakshminarayanan, V., Calvo, M.L., Alieva, T., Eds.; CRC Press: New York, NY, USA, 2012. + +32. Gilmore, R. *Lie Groups, Lie Algebras, and Some of Their Applications*; Wiley: New York, NY, USA, 1974. + +© 2012 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +# Symmetries Shared by the Poincaré Group and the Poincaré Sphere + +Young S. Kim ¹,* and Marilyn E. Noz ² + +¹ Center for Fundamental Physics, University of Maryland, College Park, MD 20742, USA + +² Department of Radiology, New York University, New York, NY 10016, USA; marilyne.noz@gmail.com + +* Author to whom correspondence should be addressed; yskim@umd.edu; Tel.: +1-301-937-1306. + +Received: 29 May 2013; in revised form: 9 June 2013; Accepted: 9 June 2013; Published: 27 June 2013 + +**Abstract:** Henri Poincaré formulated the mathematics of Lorentz transformations, known as the Poincaré group. He also formulated the Poincaré sphere for polarization optics. It is shown that these two mathematical instruments can be derived from the two-by-two representations of the Lorentz group. Wigner's little groups for internal space-time symmetries are studied in detail. While the particle mass is a Lorentz-invariant quantity, it is shown to be possible to address its variations in terms of the decoherence mechanism in polarization optics. + +**Keywords:** Poincaré group; Poincaré sphere; Wigner's little groups; particle mass; decoherence mechanism; two-by-two representations; Lorentz group + +## 1. Introduction + +It was Henri Poincaré who worked out the mathematics of Lorentz transformations before Einstein and Minkowski, and the Poincaré group is the underlying language for special relativity. In order to analyze the polarization of light, Poincaré also constructed a graphic illustration known as the Poincaré sphere [1–3]. + +It is of interest to see whether the Poincaré sphere can also speak the language of special relativity. In that case, we can study the physics of relativity in terms of what we observe in optical laboratories. For that purpose, we note first that the Lorentz group starts as a group of four-by-four matrices, while the Poincaré sphere is based on the two-by-two matrix consisting of four Stokes parameters. Thus, it is essential to find a two-by-two representation of the Lorentz group. Fortunately, this representation exists in the literature [4,5], and we shall use it in this paper. + +As for the problems in relativity, we shall discuss here Wigner’s little groups dictating the internal space-time symmetries of relativistic particles [6]. In his original paper of 1939 [7], Wigner considered the subgroups of the Lorentz group, whose transformations leave the four-momentum of a given particle invariant. While this problem has been extensively discussed in the literature, we propose here to study it using Naimark’s two-by-two representation of the Lorentz group [4,5]. + +This two-by-two representation is useful for communicating with the symmetries of the Poincaré sphere based on the four Stokes parameters, which can take the form of two-by-two matrices. We shall prove here that the Poincaré sphere shares the same symmetry property as that of the Lorentz group, particularly in approaching Wigner’s little groups. By doing this, we can study the Lorentz symmetries of elementary particles from what we observe in optical laboratories. + +The present paper starts from an unpublished note based on an invited paper presented by one of the authors (YSK) at the Fedorov Memorial Symposium: Spins and Photonic Beams at Interface held in Minsk (2011) [8]. To this, we have added a detailed discussion of how the decoherence mechanism in polarization optics is mathematically equivalent to a massless particle gaining mass to become a massive particle. We are particularly interested in how the variation of mass can be accommodated in the study of internal space-time symmetries. +---PAGE_BREAK--- + +In Section 2, we define the symmetry problem we propose to study in this paper. We are interested in the subgroups of the Lorentz group, whose transformations leave the four-momentum of a given particle invariant. This is an old problem and has been repeatedly discussed in the literature [6,7,9]. In this paper, we discuss this problem using the two-by-two formulation of the Lorentz group. This two-by-two language is directly applicable to polarization optics and the Poincaré sphere. + +While Wigner formulated his little groups for particles in their given Lorentz frames, we give a formalism applicable to all Lorentz frames. In his 1939 paper, Wigner pointed out that his little groups are different for massive, massless and imaginary-particles. In Section 3, we discuss the possibility of deriving the symmetry properties for massive and imaginary-mass particles from that of the massless particle. + +In Section 4, we assemble the variables in polarization optics, and define the matrix operators corresponding to transformations applicable to those variables. We write the Stokes parameters in the form of a two-by-two matrix. The Poincaré sphere can be constructed from this two-by-two Stokes matrix. In Section 5, we note that there can be two radii for the Poincaré sphere. Poincaré's original sphere has one fixed radius, but this radius can change, depending on the degree of coherence. Based on what we studied in Section 3, we can associate this change of the radius to the change in mass of the particle. + +## 2. Poincaré Group and Wigner's Little Groups + +Poincaré formulated the group theory of Lorentz transformations applicable to four-dimensional space consisting of three space coordinates and one time variable. There are six generators for this group consisting of three rotation and three boost generators. + +In addition, Poincaré considered translations applicable to those four space-time variables, with four generators. If we add these four generators to the six generators for the homogeneous Lorentz group, the result is the inhomogeneous Lorentz group [7] with ten generators. This larger group is called the Poincaré group in the literature. + +The four translation generators produce space-time four-vectors consisting of the energy and momentum. Thus, within the framework of the Poincaré group, we can consider the subgroup of the Lorentz group for a fixed value of momentum [7]. This subgroup defines the internal space-time symmetry of the particle. Let us consider a particle at rest. Its momentum consists of its mass as its time-like variable and zero for the three momentum components. + +$$ (m, 0, 0, 0) \qquad (1) $$ + +For convenience, we use the four-vector convention, $(t, z, x, y)$ and $(E, p_x, p_y)$. + +This four-momentum of Equation (1) is invariant under three-dimensional rotations applicable only to the $z, x, y$ coordinates. The dynamical variable associated with this rotational degree of freedom is called the spin of the particle. + +We are then interested in what happens when the particle moves with a non-zero momentum. If it moves along the z direction, the four-momentum takes the value: + +$$ m(\cosh \eta, \sinh \eta, 0, 0) \qquad (2) $$ + +which means: + +$$ p_0 = m(\cosh \eta)p_z = m(\sinh \eta)e^{\eta} = \sqrt{\frac{p_0 + p_z}{p_0 - p_z}} \qquad (3) $$ + +Accordingly, the little group consists of Lorentz-boosted rotation matrices. This aspect of the little group has been discussed in the literature [6,9]. The question then is whether we could carry out the same logic using two-by-two matrices +---PAGE_BREAK--- + +Of particular interest is what happens when the transformation parameter, $\eta$, becomes very large and the four-momentum becomes that of a massless particle. This problem has also been discussed in the literature within the framework of four-dimensional Minkowski space. The $\eta$ parameter becomes large when the momentum becomes large, but it can also become large when the mass becomes very small. The two-by-two formulation allows us to study these two cases separately, as we will do in Section 3. + +If the particle has an imaginary mass, it moves faster than light and is not observable. Yet, particles of this kind play important roles in Feynman diagrams, and their space-time symmetry should also be studied. In his original paper [7], Wigner studied the little group as the subgroup of the Lorentz group whose transformations leave the four-momentum invariant of the form: + +$$ (0, k, 0, 0) \tag{4} $$ + +Wigner observed that this four-momentum remains invariant under the Lorentz boost along the x or y direction. + +If we boost this four-momentum along the z direction, the four-momentum becomes: + +$$ k(\sinh\eta, \cosh\eta, 0, 0) \tag{5} $$ + +with: + +$$ e^{\eta} = \sqrt{\frac{p_0 + p_z}{p_z - p_0}} \tag{6} $$ + +The two-by-two formalism also allows us to study this problem. + +In Section 2.1, we shall present the two-by-two representation of the Lorentz group. In Section 2.2, we shall present Wigner's little groups in this two-by-two representation. While Wigner's analysis was based on particles in their fixed Lorentz frames, we are interested in what happens when they start moving. We shall deal with this problem in Section 3. + +## 2.1. Two-by-Two Representation of the Lorentz Groups + +The Lorentz group starts with a group of four-by-four matrices performing Lorentz transformations on the Minkowskian vector space of $(t, z, x, y)$, leaving the quantity: + +$$ t^2 - z^2 - x^2 - y^2 \tag{7} $$ + +invariant. It is possible to perform this transformation using two-by-two representations [4,5]. This mathematical aspect is known as SL(2, c), the universal covering group for the Lorentz group. + +In this two-by-two representation, we write the four-vector as a matrix: + +$$ X = \begin{pmatrix} t+z&x-iy \\ x+iy&t-z \end{pmatrix} \tag{8} $$ + +Then, its determinant is precisely the quantity given in Equation (7). Thus, the Lorentz transformation on this matrix is a determinant-preserving transformation. Let us consider the transformation matrix as: + +$$ G = \begin{pmatrix} a & b \\ c & d \end{pmatrix} G^\dagger = \begin{pmatrix} a^* & c^* \\ b^* & d^* \end{pmatrix} \tag{9} $$ + +with: + +$$ \det(G) = 1 \tag{10} $$ + +The $G$ matrix starts with four complex numbers. Due to the above condition on its determinant, it has six independent parameters. The group of these $G$ matrices is known to be locally isomorphic to +---PAGE_BREAK--- + +the group of four-by-four matrices performing Lorentz transformations on the four-vector (t,z,x,y). +In other words, for each G matrix, there is a corresponding four-by-four Lorentz-transform matrix, as +is illustrated in the Appendix A. + +The matrix, $G$, is not a unitary matrix, because its Hermitian conjugate is not always its inverse. +The group can have a unitary subgroup, called $SU(2)$, performing rotations on electron spins. As far +as we can see, this $G$-matrix formalism was first presented by Naimark in 1954 [4]. Thus, we call this +formalism the Naimark representation of the Lorentz group. We shall see first that this representation +is convenient for studying space-time symmetries of particles. We shall then note that this Naimark +representation is the natural language for the Stokes parameters in polarization optics. + +With this point in mind, we can now consider the transformation: + +$$X' = GXG^{\dagger} \qquad (11)$$ + +Since $G$ is not a unitary matrix, it is not a unitary transformation. In order to tell this difference, we call +this the "Naimark transformation". This expression can be written explicitly as: + +$$\begin{pmatrix} t' + z' & x' - iy' \\ x + iy & t' - z' \end{pmatrix} = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix} \begin{pmatrix} t + z & x - iy \\ x + iy & t - z \end{pmatrix} \begin{pmatrix} \alpha^* & \gamma^* \\ \beta^* & \delta^* \end{pmatrix} \quad (12)$$ + +For this transformation, we have to deal with four complex numbers. However, for all practical +purposes, we may work with two Hermitian matrices: + +$$Z(\delta) = \begin{pmatrix} e^{i\delta/2} & 0 \\ 0 & e^{-i\delta/2} \end{pmatrix} R(\delta) = \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \quad (13)$$ + +and two symmetric matrices: + +$$B(\eta) = \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} S(\lambda) = \begin{pmatrix} \cosh(\lambda/2) & \sinh(\lambda/2) \\ \sinh(\lambda/2) & \cosh(\lambda/2) \end{pmatrix} \quad (14)$$ + +whose Hermitian conjugates are not their inverses. The two Hermitian matrices in Equation (13) lead +to rotations around the *z* and *y* axes, respectively. The symmetric matrices in Equation (14) perform +Lorentz boosts along the *z* and *x* directions, respectively. + +Repeated applications of these four matrices will lead to the most general form of the $G$ matrix of +Equation (9) with six independent parameters. For each two-by-two Naimark transformation, there is +a four-by-four matrix performing the corresponding Lorentz transformation on the four-component +four-vector. In the Appendix A, the four-by-four equivalents are given for the matrices of Equations (13) +and (14). + +It was Einstein who defined the energy-momentum four-vector and showed that it also has the +same Lorentz-transformation law as the space-time four-vector. We write the energy-momentum +four-vector as: + +$$P = \begin{pmatrix} E + p_z & p_x - ip_y \\ p_x + ip_y & E - p_z \end{pmatrix} \qquad (15)$$ + +with: + +$$\det(P) = E^2 - p_x^2 - p_y^2 - p_z^2 \qquad (16)$$ + +which means: + +$$\det(P) = m^2 \qquad (17)$$ + +where *m* is the particle mass. +---PAGE_BREAK--- + +Now, Einstein's transformation law can be written as: + +$$P' = GPC^+ \quad (18)$$ + +or explicitly: + +$$\begin{pmatrix} E' + p_z' & p_x' - ip_y' \\ p_x' + ip_y' & E' - p_z' \end{pmatrix} = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix} \begin{pmatrix} E + p_z & p_x - ip_y \\ p_x + ip_y & E - p_z \end{pmatrix} \begin{pmatrix} \alpha^* & \gamma^* \beta^* \\ \delta^* & \end{pmatrix} \quad (19)$$ + +## 2.2. Wigner's Little Groups + +Later in 1939 [7], Wigner was interested in constructing subgroups of the Lorentz group whose transformations leave a given four-momentum invariant. He called these subsets "little groups". Thus, Wigner's little group consists of two-by-two matrices satisfying: + +$$P = WPW^+ \quad (20)$$ + +This two-by-two W matrix is not an identity matrix, but tells about the internal space-time symmetry of a particle with a given energy-momentum four-vector. This aspect was not known when Einstein formulated his special relativity in 1905. The internal space-time symmetry was not an issue at that time. + +If its determinant is a positive number, the P matrix can be brought to a form proportional to: + +$$P = \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} \quad (21)$$ + +corresponding to a massive particle at rest. + +If the determinant is negative, it can be brought to a form proportional to: + +$$P = \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \quad (22)$$ + +corresponding to an imaginary-mass particle moving faster than light along the z direction, with its vanishing energy component. + +If the determinant is zero, we may write P as: + +$$P = \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \quad (23)$$ + +which is proportional to the four-momentum matrix for a massless particle moving along the z direction. + +For all three of the above cases, the matrix of the form: + +$$Z(\delta) = \begin{pmatrix} e^{i\delta/2} & 0 \\ 0 & e^{-i\delta/2} \end{pmatrix} \quad (24)$$ + +will satisfy the Wigner condition of Equation (20). This matrix corresponds to rotations around the z axis, as is shown in the Appendix A. + +For the massive particle with the four-momentum of Equation (21), the Naimark transformations with the rotation matrix of the form: + +$$R(\theta) = \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \quad (25)$$ +---PAGE_BREAK--- + +also leave the *P* matrix of Equation (21) invariant. Together with the *Z*(*δ*) matrix, this rotation matrix +leads to the subgroup consisting of the unitary subset of the *G* matrices. The unitary subset of *G* is +*SU*(2), corresponding to the three-dimensional rotation group dictating the spin of the particle [9]. + +For the massless case, the transformations with the triangular matrix of the form: + +$$ +\begin{pmatrix} +1 & \gamma \\ +0 & 1 +\end{pmatrix} +\qquad (26) +$$ + +leave the momentum matrix of Equation (23) invariant. The physics of this matrix has a stormy history, +and the variable, $\gamma$, leads to gauge transformation applicable to massless particles [6,10]. + +For a particle with its imaginary mass, the W matrix of the form: + +$$ +S(\lambda) = \begin{pmatrix} \cosh(\lambda/2) & \sinh(\lambda/2) \\ \sinh(\lambda/2) & \cosh(\lambda/2) \end{pmatrix} \tag{27} +$$ + +will leave the four-momentum of Equation (22) invariant. This unobservable particle does not appear +to have observable internal space-time degrees of freedom. + +Table 1 summarizes the transformation matrices for Wigner’s subgroups for massive, massless and imaginary-mass particles. Of course, it is a challenging problem to have one expression for all those three cases, and this problem has been addressed in the literature [11]. + +**Table 1.** Wigner’s Little Groups. The little groups are the subgroups of the Lorentz group, whose transformations leave the four-momentum of a given particle invariant. Thus, the little groups define the internal space-time symmetries of particles. The four-momentum remains invariant under the rotation around it. In addition, the four-momentum remains invariant under the following transformations. These transformations are different for massive, massless and imaginary-mass particles. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Particle mass + + Four-momentum + + Transform matrices +
+ Massive + + ( + + + + + + + + + +
+ 1 + + 0 +
+ 0 + + 1 +
+ ) +
+ ( + + + + + + + + + +
+ cosh(θ/2) + + -sin(θ/2) +
+ sin(θ/2) + + cos(θ/2) +
+ ) +
+ Massless + + ( + + + + + + + + + +
+ 1 + + 0 +
+ 0 + + 0 +
+ ) +
+ ( + + + + + + + + + +
+ 1 + + γ +
+ 0 + + 1 +
+ ) +
+ Imaginary mass + + ( + + + + + + + + + +
+ 1 + + 0 +
+ 0 + + -1 +
+ ) +
+ ( + + + + + + + + + +
+ cosh(λ/2) + + sinh(λ/2) +
+ sinh(λ/2) + + cosh(λ/2) +
+ ) +
+ +**3. Lorentz Completion of Wigner's Little Groups** + +In his original paper [7], Wigner worked out his little groups for specific Lorentz frames. For the massive particle, he constructed his little group in the frame where the particle is at rest. For the imaginary-mass particle, the energy-component of his frame is zero. + +For the massless particle, it moves along the *z* direction with a nonzero momentum. There are no specific frames particularly convenient for us. Thus, the specific frame can be chosen for an arbitrary value of the momentum, and the triangular matrix of Equation (26) should remain invariant under Lorentz boosts along the *z* direction. + +For the massive particle, let us Lorentz-boost the four-momentum matrix of Equation (21) by performing a Naimark transformation: + +$$ +\begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \quad (28) +$$ + +which leads to: + +$$ +\left( \begin{array}{cc} e^{\eta} & 0 \\ 0 & e^{-\eta} \end{array} \right) \qquad (29) +$$ +---PAGE_BREAK--- + +This resulting matrix corresponds to the Lorentz-boosted four-momentum given in Equation (2). For simplicity, we let $m = 1$ hereafter in this paper. The Lorentz transformation applicable to the four-momentum matrix is not a similarity transformation, but it is a Naimark transformation, as defined in Equation (11). + +On the other hand, the rotation matrix of Equation (25) is Lorentz-boosted as a similarity transformation: + +$$ \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix} \quad (30) $$ + +and it becomes: + +$$ \begin{pmatrix} \cos(\theta/2) & -e^{\eta} \sin(\theta/2) \\ e^{-\eta} \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \quad (31) $$ + +If we perform the Naimark transformation of the four-momentum matrix of Equation (29) with this Lorentz-boosted rotation matrix: + +$$ \begin{pmatrix} \cos(\theta/2) & -e^{\eta} \sin(\theta/2) \\ e^{-\eta/2} \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \begin{pmatrix} e^{\eta} & 0 \\ 0 & e^{-\eta} \end{pmatrix} \begin{pmatrix} \cos(\theta/2) & e^{\eta} \sin(\theta/2) \\ -e^{-\eta} \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \quad (32) $$ + +the result is the four-momentum matrix of Equation (29). This means that the Lorentz-boosted rotation matrix of Equation (31) represents the little group, whose transformations leave the four-momentum matrix of Equation (29) invariant. + +For the imaginary-mass case, the Lorentz boosted four-momentum matrix becomes: + +$$ \begin{pmatrix} e^\eta & 0 \\ 0 & -e^{-\eta} \end{pmatrix} \quad (33) $$ + +The little group matrix is: + +$$ \begin{pmatrix} \cosh(\lambda/2) & e^\eta \sinh(\lambda/2) \\ e^{-\eta} \sinh(\lambda/2) & \cosh(\lambda/2) \end{pmatrix} \quad (34) $$ + +where $\eta$ is given in Equation (6). + +For the massless case, if we boost the four-momentum matrix of Equation (23), the result is: + +$$ e^{\eta} \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \quad (35) $$ + +Here, the $\eta$ parameter is an independent variable and cannot be defined in terms of the momentum or energy. + +The remaining problem is to see whether the massive and imaginary-mass cases collapse to the massless case in the large $\eta$ limit. This variable becomes large when the momentum becomes large or the mass becomes small. We shall discuss these two cases separately. + +### 3.1. Large-Momentum Limit + +While Wigner defined his little group for the massive particle in its rest frame in his original paper [7], the little group represented by Equation (31) is applicable to the moving particle, whose four-momentum is given in Equation (29). This matrix can also be written as: + +$$ e^{\eta} \begin{pmatrix} 1 & 0 \\ 0 & e^{-2\eta} \end{pmatrix} \quad (36) $$ +---PAGE_BREAK--- + +In the limit of large η, we can change the above expression into: + +$$e^{\eta} \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \qquad (37)$$ + +This process is continuous, but not necessarily analytic [11]. After making this transition, we can come back to the original frame to obtain the four momentum matrix of Equation (23). + +The remaining problem is the Lorentz-boosted rotation matrix of Equation (31). If this matrix is going to remain finite as $\eta$ approaches infinity, the upper-right element should be finite for large values of $\eta$. Let it be $\gamma$. Then: + +$$-\varepsilon^{\eta} \sin(\theta/2) = \gamma \qquad (38)$$ + +This means that angle $\theta$ has to become zero. As a consequence, the little group matrix of Equation (31) becomes the triangular matrix given in Equation (26) for massless particles. + +Imaginary-mass particles move faster than light, and they are not observable. On the other hand, the mathematics applicable to Wigner's little group for this particle has been useful in the two-by-two beam transfer matrix in ray and polarization optics [12]. + +Let us go back to the four-momentum matrix of Equation (22). If we boost this matrix, it becomes: + +$$\begin{pmatrix} e^{\eta} & 0 \\ 0 & -e^{-\eta} \end{pmatrix} \qquad (39)$$ + +which can be written as: + +$$e^{\eta} \begin{pmatrix} 1 & 0 \\ 0 & -e^{-2\eta} \end{pmatrix} \qquad (40)$$ + +This matrix can be changed to form Equation (37) in the limit of large $\eta$. + +Indeed, the little groups for massive, massless and imaginary cases coincide in the large-$\eta$ limit. Thus, it is possible to jump from one little group to another, and it is a continuous process, but not necessarily analytic [12]. + +The $\eta$ parameter can become large as the momentum becomes large or the mass becomes small. In this subsection, we considered the case for large momentum. However, it is of interest to see the limiting process when the mass becomes small, especially in view of the fact that neutrinos have small masses. + +## 3.2. Small-Mass Limit + +Let us start with a massive particle with fixed energy, $E$. Then, $p_0 = E$, and $p_z = E \cos \chi$. The four-momentum matrix is: + +$$E \begin{pmatrix} 1 + \cos \chi & 0 \\ 0 & 1 - \cos \chi \end{pmatrix} \qquad (41)$$ + +The determinant of this matrix is $E^2 (\sin \chi)^2$. In the regime of the Lorentz group, this is the $(mass)^2$ and is a Lorentz-invariant quantity. There are no Lorentz transformations that change the angle, $\chi$. Thus, with this extra variable, it is possible to study the little groups for variable masses, including the small-mass limit and the zero-mass case. + +If $\chi = 0$, the matrix of Equation (41) becomes that of the four-momentum matrix for a massless particle. As it becomes a positive small number, the matrix of Equation (41) can be written as: + +$$E(\sin\chi) \begin{pmatrix} e^\eta & 0 \\ 0 & e^{-\eta} \end{pmatrix} \qquad (42)$$ +---PAGE_BREAK--- + +with + +$$e^{\eta} = \sqrt{\frac{1 + \cos \chi}{1 - \cos \chi}} \qquad (43)$$ + +Here, again, the determinant of Equation (42) is $E^2(\sin \chi)^2$. With this matrix, we can construct Wigner's little group for each value of the angle, $\chi$. If $\chi$ is not zero, even if it is very small, the little group is $O(3)$-like, as in the case of all massive particles. As the angle, $\chi$, varies continuously from zero to 90°, the mass increases from zero to its maximum value. + +It is important to note that the little groups are different for the small-mass limit and for the zero-mass case. In this section, we studied the internal space-time symmetries dictated by Wigner's little groups, and we are able to present their Lorentz-covariant picture in Table 2. + +**Table 2.** Covariance of the energy-momentum relation and covariance of the internal space-time symmetry groups. The $\gamma$ parameter for the massless case has been studied in earlier papers in the four-by-four matrix formulation [6]. It corresponds to a gauge transformation. Among the three spin components, $S_3$ is along the direction of the momentum and remains invariant. It is called the "helicity". + +
Massive, SlowCovarianceMassless, Fast
$E = p^2/2m$
$S_3$
Einstein's $E = mc^2$$E = cp$
Helicity
$S_1, S_2$Wigner's Little GroupGauge Transformation
+ +## 4. Jones Vectors and Stokes Parameters + +In studying polarized light propagating along the z direction, the traditional approach is to consider the x and y components of the electric fields. Their amplitude ratio and the phase difference determine the state of polarization. Thus, we can change the polarization either by adjusting the amplitudes, by changing the relative phase or both. For convenience, we call the optical device that changes amplitudes an "attenuator" and the device that changes the relative phase a "phase shifter". + +The traditional language for this two-component light is the Jones-vector formalism, which is discussed in standard optics textbooks [13]. In this formalism, the above two components are combined into one column matrix, with the exponential form for the sinusoidal function: + +$$\begin{pmatrix} \psi_1(z,t) \\ \psi_2(z,t) \end{pmatrix} = \begin{pmatrix} a \exp\{i(kz - \omega t + \phi_1)\} \\ b \exp\{i(kz - \omega t + \phi_2)\} \end{pmatrix} \qquad (44)$$ + +This column matrix is called the Jones vector. + +When the beam goes through a medium with different values of indexes of refraction for the x and y directions, we have to apply the matrix: + +$$\begin{pmatrix} e^{i\delta_1} & 0 \\ 0 & e^{i\delta_2} \end{pmatrix} = e^{i(\delta_1+\delta_2)/2} \begin{pmatrix} e^{-i\delta/2} & 0 \\ 0 & e^{i\delta/2} \end{pmatrix} \qquad (45)$$ + +with $\delta = \delta_1 - \delta_2$. In measurement processes, the overall phase factor, $e^{i(\delta_1+\delta_2)/2}$, cannot be detected and can therefore be deleted. The polarization effect of the filter is solely determined by the matrix: + +$$Z(\delta) = \begin{pmatrix} e^{i\delta/2} & 0 \\ 0 & e^{-i\delta/2} \end{pmatrix} \qquad (46)$$ + +which leads to a phase difference of $\delta$ between the x and y components. The form of this matrix is given in Equation (13), which serves as the rotation around the z axis in the Minkowski space and time. +---PAGE_BREAK--- + +Also along the x and y directions, the attenuation coefficients could be different. This will lead to +the matrix [14]: + +$$ +\begin{pmatrix} +e^{-\eta_1} & 0 \\ +0 & e^{-\eta_2} +\end{pmatrix} += +e^{-(\eta_1+\eta_2)/2} +\begin{pmatrix} +e^{\eta/2} & 0 \\ +0 & e^{-\eta/2} +\end{pmatrix} +\quad (47) +$$ + +with $\eta = \eta_2 - \eta_1$. If $\eta_1 = 0$ and $\eta_2 = \infty$, the above matrix becomes: + +$$ +\begin{pmatrix} +1 & 0 \\ +0 & 0 +\end{pmatrix} +\qquad (48) +$$ + +which eliminates the y component. This matrix is known as a polarizer in the textbooks [13] and is a +special case of the attenuation matrix of Equation (47). + +This attenuation matrix tells us that the electric fields are attenuated at two different rates. +The exponential factor, $e^{-(\eta_1+\eta_2)/2}$, reduces both components at the same rate and does not affect the +state of polarization. The effect of polarization is solely determined by the squeeze matrix [14]: + +$$ +B(\eta) = \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \tag{49} +$$ + +This diagonal matrix is given in Equation (14). In the language of space-time symmetries, this matrix performs a Lorentz boost along the z direction. + +The polarization axes are not always the x and y axes. For this reason, we need the rotation matrix: + +$$ +R(\theta) = \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \quad (50) +$$ + +which, according to Equation (13), corresponds to the rotation around the *y* axis in the space-time symmetry. + +Among the rotation angles, the angle of 45° plays an important role in polarization optics. +Indeed, if we rotate the squeeze matrix of Equation (49) by 45°, we end up with the squeeze matrix: + +$$ +R(\theta) = \begin{pmatrix} \cosh(\lambda/2) & \sinh(\lambda/2) \\ \sinh(\lambda/2) & \cosh(\lambda/2) \end{pmatrix} \quad (51) +$$ + +which is also given in Equation (14). In the language of space-time physics, this matrix leads to a +Lorentz boost along the x axis. + +Indeed, the *G* matrix of Equation (9) is the most general form of the transformation matrix applicable to the Jones vector. Each of the above four matrices plays its important role in special relativity, as we discussed in Section 2. Their respective roles in optics and particle physics are given in Table 3. + +However, the Jones vector alone cannot tell us whether the two components are coherent with each other. In order to address this important degree of freedom, we use the coherency matrix [1,2]: + +$$ +C = \begin{pmatrix} S_{11} & S_{12} \\ S_{21} & S_{22} \end{pmatrix} \tag{52} +$$ + +with: + +$$ +\langle \psi_i^* \psi_j \rangle = \frac{1}{T} \int_0^T \psi_i^*(t + \tau) \psi_j(t) dt \quad (53) +$$ +---PAGE_BREAK--- + +where $T$, for a sufficiently long time interval, is much larger than $\tau$. Then, those four elements become [15]: + +$$ +\begin{aligned} +S_{11} &= \langle \psi_1^\dagger \psi_1 \rangle = a^2 & S_{12} &= \langle \psi_1^\dagger \psi_2 \rangle = abe^{-(\sigma+i\delta)} \\ +S_{21} &= \langle \psi_2^\dagger \psi_1 \rangle = abe^{-(\sigma-i\delta)} & S_{22} &= \langle \psi_2^\dagger \psi_2 \rangle = b^2 +\end{aligned} +\quad (54) $$ + +The diagonal elements are the absolute values of $\psi_1$ and $\psi_2$, respectively. The off-diagonal elements could be smaller than the product of $\psi_1$ and $\psi_2$, if the two beams are not completely coherent. The $\sigma$ parameter specifies the degree of coherency. + +This coherency matrix is not always real, but it is Hermitian. Thus, it can be diagonalized by a unitary transformation. If this matrix is normalized so that its trace is one, it becomes a density matrix [16,17]. + +**Table 3.** Polarization optics and special relativity sharing the same mathematics. Each matrix has its clear role in both optics and relativity. The determinant of the Stokes or the four-momentum matrix remains invariant under Lorentz transformations. It is interesting to note that the decoherence parameter (least fundamental) in optics corresponds to the mass (most fundamental) in particle physics. + +
Polarization OpticsTransformation MatrixParticle Symmetry
Phase shift δ( eδ/2 0
0 e-iδ/2)
Rotation around z
Rotation around z( cos(θ/2) - sin(θ/2)
sin(θ/2) cos(θ/2))
Rotation around y
Squeeze along x and y( eη/2 0
0 e-η/2)
Boost along z
Squeeze along 45°
(ab)2 sin2χ
( cosh(λ/2) sinh(λ/2)
sinh(λ/2) cosh(λ/2))
Determinant
Boost along x
(mass)2
+ +If we start with the Jones vector of the form of Equation (44), the coherency matrix becomes: + +$$ C = \begin{pmatrix} a^2 & ab e^{-(\sigma+i\delta)} \\ ab e^{-(\sigma-i\delta)} & b^2 \end{pmatrix} \qquad (55) $$ + +We are interested in the symmetry properties of this matrix. Since the transformation matrix applicable to the Jones vector is the two-by-two representation of the Lorentz group, we are particularly interested in the transformation matrices applicable to this coherency matrix. + +The trace and the determinant of the above coherency matrix are: + +$$ +\begin{aligned} +\det(C) &= (ab)^2 (1 - e^{-2\sigma}) \\ +\operatorname{tr}(C) &= a^2 + b^2 +\end{aligned} +\quad (56) $$ + +Since $e^{-\sigma}$ is always smaller than one, we can introduce an angle, $\chi$, defined as: + +$$ \cos \chi = e^{-\sigma} \quad (57) $$ + +and call it the "decoherence angle". If $\chi = 0$, the decoherence is minimum, and it becomes maximum when $\chi = 90^\circ$. We can then write the coherency matrix of Equation (55) as: + +$$ C = \begin{pmatrix} a^2 & ab(\cos \chi)e^{-i\delta} \\ ab(\cos \chi)e^{i\delta} & b^2 \end{pmatrix} \quad (58) $$ +---PAGE_BREAK--- + +The degree of polarization is defined as [13]: + +$$f = \sqrt{1 - \frac{4 \det(C)}{(tr(C))^2}} = \sqrt{1 - \frac{4(ab)^2 \sin^2 \chi}{(a^2 + b^2)^2}} \quad (59)$$ + +This degree is one if $\chi = 0$. When $\chi = 90^\circ$, it becomes: + +$$\frac{a^2 - b^2}{a^2 + b^2} \qquad (60)$$ + +Without loss of generality, we can assume that *a* is greater than *b*. If they are equal, this minimum degree of polarization is zero. + +Under the influence of the Naimark transformation given in Equation (11), this coherency matrix is transformed as: + +$$ (61) $$ + +It is more convenient to make the following linear combinations: + +$$ +\begin{aligned} +S_0 &= \frac{S_{11} + S_{22}}{2} S_3 = \frac{S_{11} - S_{22}}{2} \\ +S_1 &= \frac{S_{12} - S_{21}}{2} S_2 = \frac{S_{12} + S_{21}}{2} +\end{aligned} +\qquad (62) $$ + +These four parameters are called Stokes parameters, and four-by-four transformations applicable to these parameters are widely known as Mueller matrices [1,3]. However, if the Naimark transformation given in Equation (61) is translated into the four-by-four Lorentz transformations according to the correspondence given in the Appendix A, the Mueller matrices constitute a representation of the Lorentz group. + +Another interesting aspect of the two-by-two matrix formalism is that the coherency matrix can be formulated in terms of quarternions [18–20]. The quarnion representation can be translated into rotations in four-dimensional space. There is a long history between the Lorentz group and the four-dimensional rotation group. It would be interesting to see what the quarnion representation of polarization optics will add to this history between those two similar, but different, groups. + +As for earlier applications of the two-by-two representation of the Lorentz group, we note the vector representation by Fedorov [21,22]. Fedorov showed that it is easier to carry out kinematical calculations using his two-by-two representation. For instance, the computation of the Wigner rotation angle is possible in the two-by-two representation [23]. Earlier papers on group theoretical approaches to polarization optics include also those on Mueller matrices [24] and on relativistic kinematics and polarization optics [25]. + +**5. Geometry of the Poincaré Sphere** + +We now have the four-vector, ($S_0, S_3, S_1, S_2$), which is Lorentz-transformed like the space-time four-vector, $(t, z, x, y)$, or the energy-momentum four-vector of Equation (15). This Stokes four-vector has a three-component subspace, ($S_3, S_1, S_2$), which is like the three-dimensional Euclidean subspace +---PAGE_BREAK--- + +in the four-dimensional Minkowski space. In this three-dimensional subspace, we can introduce the +spherical coordinate system with: + +$$ +\begin{align} +&R = \sqrt{S_3^2 + S_1^2 + S_2^2} \notag \\ +&S_3 = R \cos \zeta \tag{63} \\ +&S_1 = R(\sin \zeta) \cos \delta S_2 = R(\sin \zeta) \sin \delta \notag +\end{align} +$$ + +The radius, *R*, is the radius of this sphere, and is: + +$$ +R = \frac{1}{2} \sqrt{(a^2 - b^2)^2 + 4(ab)^2 \cos^2 \chi} \quad (64) +$$ + +with: + +$$ +S_3 = \frac{a^2 - b^2}{2} \tag{65} +$$ + +This spherical picture is traditionally known as the Poincaré sphere [1–3]. Without loss of generality, we assume *a* is greater than *b*, and *S*₃ is non-negative. In addition, we can consider another sphere with its radius: + +$$ +S_0 = \frac{a^2 + b^2}{2} \tag{66} +$$ + +according to Equation (62). + +The radius, *R*, takes its maximum value, $S_0$, when $\chi = 0^\circ$. It decreases and reaches its minimum value, $S_3$, when $\chi = 90^\circ$. In terms of *R*, the degree of polarization given in Equation (59) is: + +$$ +f = \frac{R}{S_0} \tag{67} +$$ + +This aspect of the radius *R* is illustrated in Figure 1a. The minimum value of *R* is *S*3 of Equation (64). + +**Figure 1.** Radius of the Poincaré sphere. The radius, *R*, takes its maximum value, $S_0$, when the decoherence angle, $\chi$, is zero. It becomes smaller as $\chi$ increases. It becomes minimum when the angle reaches 90°. Its minimum value is $S_3$, as is illustrated in Figure 1a. The degree of polarization is maximum when $R = S_0$ and is minimum when $R = S_3$. According to Equation (65), $S_3$ becomes zero when $a = b$, and the minimum value of $R$ becomes zero, as is indicated in Figure 1b. Its maximum value is still $S_0$. This maximum radius can become larger because $b$ becomes larger to make $a = b$. +---PAGE_BREAK--- + +Let us go back to the four-momentum matrix of Equation (15). Its determinant is $m^2$ and remains invariant. Likewise, the determinant of the coherency matrix of Equation (58) should also remain invariant. The determinant in this case is: + +$$S_0^2 - R^2 = (ab)^2 \sin^2 \chi \quad (68)$$ + +This quantity remains invariant. This aspect is shown on the last row of Table 3. + +Let us go back to Equation (49). This matrix changes the relative magnitude of the amplitudes, *a* and *b*. Thus, without loss of generality, we can study the Stokes parameters with *a* = *b*. The coherency matrix then becomes: + +$$C = a^2 \begin{pmatrix} 1 & (\cos \chi)e^{-i\delta} \\ (\cos \chi)e^{i\delta} & 1 \end{pmatrix} \quad (69)$$ + +Since the angle, $\delta$, does not play any essential roles, we can let $\delta = 0$ and write the coherency matrix as: + +$$C = a^2 \begin{pmatrix} 1 & \cos \chi \\ \cos \chi & 1 \end{pmatrix} \quad (70)$$ + +Then, the minimum radius, $S_3 = 0$, and $S_0$ of Equation (62) and *R* of Equation (64) become: + +$$S_0 = a^2 R = a^2(\cos \chi) \quad (71)$$ + +respectively. The Poincaré sphere becomes simplified to that of Figure 1b. This Poincaré sphere allows *R* to decrease to zero. + +The determinant of the above two-by-two matrix is: + +$$a^4 (1 - \cos^2 \chi) = a^4 \sin^2 \chi \quad (72)$$ + +Since the Lorentz transformation leaves the determinant invariant, the change in this $\chi$ variable is not a Lorentz transformation. It is of course possible to construct a larger group in which this variable plays a role in a group transformation [23], but in this paper, we are more interested in its role in a particle gaining a mass. With this point in mind, let us diagonalize the coherency matrix of Equation (69). Then it takes the form: + +$$a^2 \begin{pmatrix} 1 + \cos \chi & 0 \\ 0 & 1 - \cos \chi \end{pmatrix} \quad (73)$$ + +This form is the same as the four-momentum matrix given in Equation (41). There, we were not able to associate the variable, $\chi$, with any known physical process or symmetry operations of the Lorentz group. Fortunately, in this section, we noted that this variable comes from the degree of decoherence in polarization optics. + +## 6. Concluding Remarks + +In this paper, we noted first that the group of Lorentz transformations can be formulated in terms of two-by-two matrices. This two-by-two formalism can also be used for transformations of the coherency matrix in polarization optics consisting of four Stokes parameters. + +Thus, this set of the four parameters is like a Minkowskian four-vector under four-by-four Lorentz transformations. In order to accommodate all four Stokes parameters, we noted that the radius of the Poincaré sphere should be allowed to vary from its maximum value to its minimum, corresponding to the fully and minimal coherent cases. + +As in the case of the particle mass, the decoherence parameter in the Stokes formalism is invariant under Lorentz transformations. However, the Poincaré sphere, with a variable radius, provides the +---PAGE_BREAK--- + +mechanism for the variations of the decoherence parameter. It was noted that this variation gives a +physical process whose mathematics correspond to that of the mass variable in particle physics. + +As for polarization optics, the traditional approach has been to work with two polarizer matrices, like: + +$$ +\begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \begin{pmatrix} 0 & 0 \\ 0 & 1 \end{pmatrix} \qquad (74) +$$ + +We have replaced these two matrices by one attenuation matrix of Equation (47). This replacement enables us to formulate the Lorentz group for the Stokes parameters [15]. Furthermore, this attenuation matrix makes it possible to make a continuous transformation from one matrix to another by adjusting the attenuation parameters in optical media. It could be interesting to design optical experiments along this direction. + +**Acknowledgments:** This paper is in part based on an invited paper presented by one of the authors (YSK) at the Fedorov Memorial Symposium: International Conference "Spins and Photonic Beams at Interface", dedicated to the 100th anniversary of F.I. Fedorov (1911–1994) (Minsk, Belarus, 2011). He would like to thank Sergei Kilin for inviting him to the conference. + +In addition to numerous original contributions in optics, Fedorov wrote a book on two-by-two representations of the Lorentz group based on his own research on this subject. It was, therefore, quite appropriate for him (YSK) to present a paper on applications of the Lorentz group to optical science. He would like to thank V. A. Dluganovich and M. Glaynskii for bringing the papers and the book written by Academician Fedorov, as well as their own papers to his attention. + +**Conflicts of Interest:** The authors declare no conflict of interest. + +Appendix Appendix + +In Section 2, we listed four two-by-two matrices whose repeated applications lead to the most general form of the two-by-two matrix, *G*. It is known that every *G* matrix can be translated into a four-by-four Lorentz transformation matrix through [4,9,15]: + +$$ +\begin{pmatrix} +t' + z' \\ +x' - iy' \\ +x' + iy' \\ +t' - z' +\end{pmatrix} += +\begin{pmatrix} +\alpha\alpha^* & \alpha\beta^* & \beta\alpha^* & \beta\beta^* \\ +\alpha\gamma^* & \alpha\delta^* & \beta\gamma^* & \beta\delta^* \\ +\gamma\alpha^* & \gamma\beta^* & \delta\alpha^* & \delta\beta^* \\ +\gamma\gamma^* & \gamma\delta^* & \delta\gamma^* & \delta\delta^* +\end{pmatrix} +\begin{pmatrix} +t+z \\ +x-iy \\ +x+iy \\ +t-z +\end{pmatrix} +\tag{75} +$$ + +and: + +$$ +\begin{pmatrix} t \\ z \\ x \\ y \end{pmatrix} = \frac{1}{2} \begin{pmatrix} 1 & 0 & 0 & 1 \\ 1 & 0 & 0 & -1 \\ 0 & 1 & 1 & 0 \\ 0 & i & -i & 0 \end{pmatrix} \begin{pmatrix} t+z \\ x-iy \\ x+iy \\ t-z \end{pmatrix} \quad (76) +$$ + +These matrices appear to be complicated, but it is enough to study the matrices of Equation (13) and Equation (14) to cover all the matrices in this group. Thus, we give their four-by-four equivalents in this Appendix A: + +$$ +Z(\delta) = \begin{pmatrix} e^{i\delta/2} & 0 \\ 0 & e^{-i\delta/2} \end{pmatrix} \tag{77} +$$ + +leads to the four-by-four matrix: + +$$ +\begin{pmatrix} +1 & 0 & 0 & 0 \\ +1 & 0 & 0 & 0 \\ +0 & 1 & \cos \delta & -\sin \delta \\ +0 & 0 & \sin \delta & \cos \delta +\end{pmatrix} +\qquad (78) +$$ +---PAGE_BREAK--- + +Likewise: + +$$ +B(\eta) = \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \rightarrow \begin{pmatrix} \cosh \eta & \sinh \eta & 0 & 0 \\ \sinh \eta & \cosh \eta & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{pmatrix} \qquad (79) +$$ + +$$ +R(\theta) = \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \sin(\theta/2) \end{pmatrix} \rightarrow \begin{pmatrix} 1 & 0 & 0 & 0 \\ 0 & \cos\theta & -\sin\theta & 0 \\ 0 & \sin\theta & \cos\theta & 0 \\ 0 & 0 & 0 & 1 \end{pmatrix} \quad (80) +$$ + +and: + +$$ +S(\lambda) = \begin{pmatrix} \cosh(\lambda/2) & \sinh(\lambda/2) \\ \sinh(\lambda/2) & \sinh(\lambda/2) \end{pmatrix} \rightarrow \begin{pmatrix} \cosh\lambda & 0 & \sinh\lambda & 0 \\ 0 & 1 & 0 & 0 \\ \sinh\lambda & 0 & \cosh\lambda & 0 \\ 0 & 0 & 0 & 1 \end{pmatrix} \quad (81) +$$ + +References + +1. Azzam, R.A.M.; Bashara, I. *Ellipsometry and Polarized Light*; North-Holland: Amsterdam, The Netherlands, 1977. + +2. Born, M.; Wolf, E. *Principles of Optics*, 6th ed.; Pergamon: Oxford, NY, USA, 1980. + +3. Brosseau, C. *Fundamentals of Polarized Light: A Statistical Optics Approach*; John Wiley: New York, NY, USA, 1998. + +4. Naimark, M.A. Linear representation of the Lorentz group. *Uspekhi Mater. Nauk* **1954**, *9*, 19–93, Translated by Atkinson, F.V., American Mathematical Society Translations, Series 2, **1957**, *6*, 379–458. + +5. Naimark, M.A. *Linear Representations of the Lorentz Group*; Pergamon Press: Oxford, NY, USA, 1958; Translated by Swinfen, A.; Marstrand, O.J., 1964. + +6. Kim, Y.S.; Wigner, E.P. Space-time geometry of relativistic particles. *J. Math. Phys.* **1990**, *31*, 55–60. [CrossRef] + +7. Wigner, E. On unitary representations of the inhomogeneous Lorentz group. *Ann. Math.* **1939**, *40*, 149–204. [CrossRef] + +8. Kim, Y.S. Poincaré Sphere and Decoherence Problems. Available online: http://arxiv.org/abs/1203.4539 (accessed on 17 June 2013). + +9. Kim, Y.S.; Noz, M.E. *Theory and Applications of the Poincaré Group*; Reidel: Dordrecht, The Netherlands, 1986. + +10. Han, D.; Kim, Y.S.; Son, D. E(2)-like little group for massless particles and polarization of neutrinos. *Phys. Rev. D* **1982**, *26*, 3717–3725. + +11. Başkal, S.; Kim, Y.S. One analytic form for four branches of the ABCD matrix. *J. Mod. Opt.* **2010**, *57*, 1251–1259. +[CrossRef] + +12. Başkal, S.; Kim, Y.S. Lorentz Group in Ray and Polarization Optics. In *Mathematical Optics: Classical, Quantum and Computational Methods*; Lakshminarayanan, V., Calvo, M.L., Alieva, T., Eds.; CRC Taylor and Francis: New York, NY, USA, 2013; Chapter 9; pp. 303–349. + +13. Saleh, B.E.A.; Teich, M.C. *Fundamentals of Photonics*, 2nd ed.; John Wiley: Hoboken, NJ, USA, 2007. + +14. Han, D.; Kim, Y.S.; Noz, M.E. Jones-vector formalism as a representation of the Lorentz group. *J. Opt. Soc. Am. A* **1997**, *14*, 2290–2298. + +15. Han, D.; Kim, Y.S.; Noz, M.E. Stokes parameters as a Minkowskian four-vector. *Phys. Rev. E* **1997**, *56*, 6065–6076. + +16. Feynman, R.P. *Statistical Mechanics*; Benjamin/Cummings: Reading, MA, USA, 1972. + +17. Han, D.; Kim, Y.S.; Noz, M.E. Illustrative example of Feynman's rest of the universe. *Am. J. Phys.* **1999**, *67*, 61–66. [CrossRef] + +18. Pellat-Finet, P. Geometric approach to polarization optics. II. Quarternionic representation of polarized light. *Optik* **1991**, *87*, 68–76. + +19. Dlugunovich, V.A.; Kurochkin, Y.A. Vector parameterization of the Lorentz group transformations and polar decomposition of Mueller matrices. *Opt. Spectrosc.* **2009**, *107*, 312–317. [CrossRef] +---PAGE_BREAK--- + +20. Tudor, T. Vectorial Pauli algebraic approach in polarization optics. I. Device and state operators. *Optik* **2010**, *121*, 1226–1235. [CrossRef] + +21. Fedorov, F.I. Vector parametrization of the Lorentz group and relativistic kinematics. *Theor. Math. Phys.* **1970**, *2*, 248–252. [CrossRef] + +22. Fedorov, F.I. *Lorentz Group*; [in Russian]; Global Science, Physical-Mathematical Literature: Moscow, Russia, 1979. + +23. Başkal, S.; Kim, Y.S. De Sitter group as a symmetry for optical decoherence. *J. Phys. A* **2006**, *39*, 7775–7788. + +24. Dargys, A. Optical Mueller matrices in terms of geometric algebra. *Opt. Commun.* **2012**, *285*, 4785–4792. +[CrossRef] + +25. Pellat-Finet, P.; Basset, M. What is common to both polarization optics and relativistic kinematics? *Optik* **1992**, *90*, 101–106. + +© 2013 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +Wigner's Space-Time Symmetries Based on the Two-by-Two Matrices of the Damped Harmonic Oscillators and the Poincaré Sphere + +Sibel Başkal ¹, Young S. Kim ²,* and Marilyn E. Noz ³ + +¹ Department of Physics, Middle East Technical University, Ankara 06800, Turkey; E-Mail: baskal@newton.physics.metu.edu.tr + +² Center for Fundamental Physics, University of Maryland, College Park, MD 20742, USA + +³ Department of Radiology, New York University, New York, NY 10016, USA; E-Mail: marilyne.noz@gmail.com + +* E-Mail: yskim@umd.edu; Tel.: +1-301-937-1306. + +Received: 28 February 2014; in revised form: 28 May 2014 / Accepted: 9 June 2014 / Published: 25 June 2014 + +**Abstract:** The second-order differential equation for a damped harmonic oscillator can be converted to two coupled first-order equations, with two two-by-two matrices leading to the group $Sp(2)$. It is shown that this oscillator system contains the essential features of Wigner's little groups dictating the internal space-time symmetries of particles in the Lorentz-covariant world. The little groups are the subgroups of the Lorentz group whose transformations leave the four-momentum of a given particle invariant. It is shown that the damping modes of the oscillator correspond to the little groups for massive and imaginary-mass particles respectively. When the system makes the transition from the oscillation to damping mode, it corresponds to the little group for massless particles. Rotations around the momentum leave the four-momentum invariant. This degree of freedom extends the $Sp(2)$ symmetry to that of $SL(2, c)$ corresponding to the Lorentz group applicable to the four-dimensional Minkowski space. The Poincaré sphere contains the $SL(2, c)$ symmetry. In addition, it has a non-Lorentzian parameter allowing us to reduce the mass continuously to zero. It is thus possible to construct the little group for massless particles from that of the massive particle by reducing its mass to zero. Spin-1/2 particles and spin-1 particles are discussed in detail. + +**Keywords:** damped harmonic oscillators; coupled first-order equations; unimodular matrices; Wigner's little groups; Poincaré sphere; $Sp(2)$ group; $SL(2, c)$ group; gauge invariance; neutrinos; photons + +**PACS:** 03.65.Fd, 03.67.-a, 05.30.-d + +# 1. Introduction + +We are quite familiar with the second-order differential equation + +$$m \frac{d^2 y}{dt^2} + b \frac{dy}{dt} + Ky = 0 \quad (1)$$ + +for a damped harmonic oscillator. This equation has the same mathematical form as + +$$L \frac{d^2 Q}{dt^2} + R \frac{dQ}{dt} + \frac{1}{C} Q = 0 \quad (2)$$ + +for electrical circuits, where L, R, and C are the inductance, resistance, and capacitance respectively. These two equations play fundamental roles in physical and engineering sciences. Since they start from the same set of mathematical equations, one set of problems can be studied in terms of the other. For instance, many mechanical phenomena can be studied in terms of electrical circuits. +---PAGE_BREAK--- + +In Equation (1), when $b = 0$, the equation is that of a simple harmonic oscillator with the frequency $\omega = \sqrt{K/m}$. As $b$ increases, the oscillation becomes damped. When $b$ is larger than $2\sqrt{Km}$, the oscillation disappears, as the solution is a damping mode. + +Consider that increasing *b* continuously, while difficult mechanically, can be done electrically using Equation (2) by adjusting the resistance *R*. The transition from the oscillation mode to the damping mode is a continuous physical process. + +This *b* term leads to energy dissipation, but is not regarded as a fundamental force. It is inconvenient in the Hamiltonian formulation of mechanics and troublesome in transition to quantum mechanics, yet, plays an important role in classical mechanics. In this paper this term will help us understand the fundamental space-time symmetries of elementary particles. + +We are interested in constructing the fundamental symmetry group for particles in the Lorentz-covariant world. For this purpose, we transform the second-order differential equation of Equation (1) to two coupled first-order equations using two-by-two matrices. Only two linearly independent matrices are needed. They are the anti-symmetric and symmetric matrices + +$$A = \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix}, \quad \text{and} \quad S = \begin{pmatrix} 0 & i \\ i & 0 \end{pmatrix} \qquad (3)$$ + +respectively. The anti-symmetric matrix *A* is Hermitian and corresponds to the oscillation part, while the symmetric *S* matrix corresponds to the damping. + +These two matrices lead to the *Sp*(2) group consisting of two-by-two unimodular matrices with real elements. This group is isomorphic to the three-dimensional Lorentz group applicable to two space-like and one time-like coordinates. This group is commonly called the *O*(2, 1) group. + +This *O*(2, 1) group can explain all the essential features of Wigner's little groups dictating internal space-time symmetries of particles [1]. Wigner defined his little groups as the subgroups of the Lorentz group whose transformations leave the four-momentum of a given particle invariant. He observed that the little groups are different for massive, massless, and imaginary-mass particles. It has been a challenge to design a mathematical model which will combine those three into one formalism, but we show that the damped harmonic oscillator provides the desired mathematical framework. + +For the two space-like coordinates, we can assign one of them to the direction of the momentum, and the other to the direction perpendicular to the momentum. Let the direction of the momentum be along the z axis, and let the perpendicular direction be along the x axis. We therefore study the kinematics of the group within the zx plane, then see what happens when we rotate the system around the z axis without changing the momentum [2]. + +The Poincaré sphere for polarization optics contains the *SL*(2, *c*) symmetry isomorphic to the four-dimensional Lorentz group applicable to the Minkowski space [3–7]. Thus, the Poincaré sphere extends Wigner’s picture into the three space-like and one time-like coordinates. Specifically, this extension adds rotations around the given momentum which leaves the four-momentum invariant [2]. + +While the particle mass is a Lorentz-invariant variable, the Poincaré sphere contains an extra variable which allows the mass to change. This variable allows us to take the mass-limit of the symmetry operations. The transverse rotational degrees of freedom collapse into one gauge degree of freedom and polarization of neutrinos is a consequence of the requirement of gauge invariance [8,9]. + +The *SL*(2,*c*) group contains symmetries not seen in the three-dimensional rotation group. While we are familiar with two spinors for a spin-1/2 particle in nonrelativistic quantum mechanics, there are two additional spinors due to the reflection properties of the Lorentz group. There are thus 16 bilinear combinations of those four spinors. This leads to two scalars, two four-vectors, and one antisymmetric four-by-four tensor. The Maxwell-type electromagnetic field tensor can be obtained as a massless limit of this tensor [10]. + +In Section 2, we review the damped harmonic oscillator in classical mechanics, and note that the solution can be either in the oscillation mode or damping mode depending on the magnitude of +---PAGE_BREAK--- + +the damping parameter. The translation of the second order equation into a first order differential equation with two-by-two matrices is possible. This first-order equation is similar to the Schrödinger equation for a spin-1/2 particle in a magnetic field. + +Section 3 shows that the two-by-two matrices of Section 2 can be formulated in terms of the $Sp(2)$ group. These matrices can be decomposed into the Bargmann and Wigner decompositions. Furthermore, this group is isomorphic to the three-dimensional Lorentz group with two space and one time-like coordinates. + +In Section 4, it is noted that this three-dimensional Lorentz group has all the essential features of Wigner's little groups which dictate the internal space-time symmetries of the particles in the Lorentz-covariant world. Wigner's little groups are the subgroups of the Lorentz group whose transformations leave the four-momentum of a given particle invariant. The Bargmann Wigner decompositions are shown to be useful tools for studying the little groups. + +In Section 5, we note that the given momentum is invariant under rotations around it. The addition of this rotational degree of freedom extends the $Sp(2)$ symmetry to the six-parameter $SL(2, c)$ symmetry. In the space-time language, this extends the three dimensional group to the Lorentz group applicable to three space and one time dimensions. + +Section 6 shows that the Poincaré sphere contains the symmetries of $SL(2, c)$ group. In addition, it contains an extra variable which allows us to change the mass of the particle, which is not allowed in the Lorentz group. + +In Section 7, the symmetries of massless particles are studied in detail. In addition to rotation around the momentum, Wigner's little group generates gauge transformations. While gauge transformations on spin-1 photons are well known, the gauge invariance leads to the polarization of massless spin-1/2 particles, as observed in neutrino polarizations. + +In Section 8, it is noted that there are four spinors for spin-1/2 particles in the Lorentz-covariant world. It is thus possible to construct 16 bilinear forms, applicable to two scalars, and two vectors, and one antisymmetric second-rank tensor. The electromagnetic field tensor is derived as the massless limit. This tensor is shown to be gauge-invariant. + +## 2. Classical Damped Oscillators + +For convenience, we write Equation (1) as + +$$ \frac{d^2 y}{dt^2} + 2\mu \frac{dy}{dt} + \omega^2 y = 0 \quad (4) $$ + +with + +$$ \omega = \sqrt{\frac{K}{m}}, \quad \text{and} \quad \mu = \frac{b}{2m} \qquad (5) $$ + +The damping parameter $\mu$ is positive when there are no external forces. When $\omega$ is greater than $\mu$, the solution takes the form + +$$ y = e^{-\mu t} [C_1 \cos(\omega't) + C_2 \sin(\omega't)] \quad (6) $$ + +where + +$$ \omega' = \sqrt{\omega^2 - \mu^2} \qquad (7) $$ + +and $C_1$ and $C_2$ are the constants to be determined by the initial conditions. This expression is for a damped harmonic oscillator. Conversely, when $\mu$ is greater than $\omega$, the quantity inside the square-root sign is negative, then the solution becomes + +$$ y = e^{-\mu t} [C_3 \cosh(\mu't) + C_4 \sinh(\mu't)] \quad (8) $$ + +with + +$$ \mu' = \sqrt{\mu^2 - \omega^2} \qquad (9) $$ +---PAGE_BREAK--- + +If $\omega = \mu$, both Equations (6) and (8) collapse into one solution + +$$y(t) = e^{-\mu t} [C_5 + C_6 t] \quad (10)$$ + +These three different cases are treated separately in textbooks. Here we are interested in the transition from Equation (6) to Equation (8), via Equation (10). For convenience, we start from $\mu$ greater than $\omega$ with $\mu'$ given by Equation (9). + +For a given value of $\mu$, the square root becomes zero when $\omega$ equals $\mu$. If $\omega$ becomes larger, the square root becomes imaginary and divides into two branches. + +$$\pm i \sqrt{\omega^2 - \mu^2} \quad (11)$$ + +This is a continuous transition, but not an analytic continuation. To study this in detail, we translate the second order differential equation of Equation (4) into the first-order equation with two-by-two matrices. + +Given the solutions of Equations (6) and (10), it is convenient to use $\psi(t)$ defined as + +$$\psi(t) = e^{\mu t} y(t), \quad \text{and} \quad y = e^{-\mu t} \psi(t) \quad (12)$$ + +Then $\psi(t)$ satisfies the differential equation + +$$\frac{d^2 \psi(t)}{dt^2} + (\omega^2 - \mu^2)\psi(t) = 0 \quad (13)$$ + +## 2.1. Two-by-Two Matrix Formulation + +In order to convert this second-order equation to a first-order system, we introduce $\psi_1(t)$ and $\psi_2(t)$ satisfying two coupled differential equations + +$$\begin{align} +\frac{d\psi_1(t)}{dt} &= (\mu - \omega)\psi_2(t) \tag{14} \\ +\frac{d\psi_2(t)}{dt} &= (\mu + \omega)\psi_1(t) \tag{15} +\end{align}$$ + +which can be written in matrix form as + +$$\frac{d}{dt} \begin{pmatrix} \psi_1 \\ \psi_2 \end{pmatrix} = \begin{pmatrix} 0 & \mu - \omega \\ \mu + \omega & 0 \end{pmatrix} \begin{pmatrix} \psi_1 \\ \psi_2 \end{pmatrix} \quad (16)$$ + +Using the Hermitian and anti-Hermitian matrices of Equation (3) in Section 1, we construct the linear combination + +$$H = \omega \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix} + \mu \begin{pmatrix} 0 & i \\ i & 0 \end{pmatrix} \quad (17)$$ + +We can then consider the first-order differential equation + +$$i \frac{\partial}{\partial t} \psi(t) = H \psi(t) \quad (18)$$ + +While this equation is like the Schrödinger equation for an electron in a magnetic field, the two-by-two matrix is not Hermitian. Its first matrix is Hermitian, but the second matrix is anti-Hermitian. It is of course an interesting problem to give a physical interpretation to this non-Hermitian matrix +---PAGE_BREAK--- + +in connection with quantum dissipation [11], but this is beyond the scope of the present paper. +The solution of Equation (18) is + +$$ +\psi(t) = \exp \left\{ \begin{pmatrix} 0 & -\omega + \mu \\ \omega + \mu & 0 \end{pmatrix} t \right\} \begin{pmatrix} C_7 \\ C_8 \end{pmatrix} \quad (19) +$$ + +where $C_7 = \psi_1(0)$ and $C_8 = \psi_2(0)$ respectively. + +2.2. Transition from the Oscillation Mode to Damping Mode + +It appears straight-forward to compute this expression by a Taylor expansion, but it is not. +This issue was extensively discussed in the earlier papers by two of us [12,13]. The key idea is to write +the matrix + +$$ +\begin{pmatrix} +0 & -\omega + \mu \\ +\omega + \mu & 0 +\end{pmatrix} +\qquad (20) +$$ + +as a similarity transformation of + +$$ +\omega' \begin{pmatrix} 0 & -1 \\ 1 & 0 \end{pmatrix} \quad (\omega > \mu) \tag{21} +$$ + +and as that of + +$$ +\mu' \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix} \quad (\mu > \omega) \tag{22} +$$ + +with $\omega'$ and $\mu'$ defined in Equations (7) and (9), respectively. +Then the Taylor expansion leads to + +$$ +\left( \frac{\cos(\omega't)}{\sqrt{(\omega+\mu)/(\omega-\mu)}} \sin(\omega't) - \frac{\sqrt{(\omega-\mu)/(\omega+\mu)}}{\cos(\omega't)} \sin(\omega't) \right) \quad (23) +$$ + +when $\omega$ is greater than $\mu$. The solution $\psi(t)$ takes the form + +$$ +\begin{pmatrix} +C_7 \cos(\omega't) - C_8 \sqrt{(\omega - \mu)/( \omega + \mu)} \sin(\omega't) \\ +C_7 \sqrt{(\omega + \mu)/( \omega - \mu)} \sin(\omega't) + C_8 \cos(\omega't) +\end{pmatrix} +\quad (24) +$$ + +If $\mu$ is greater than $\omega$, the Taylor expansion becomes + +$$ +\left( \frac{\cosh(\mu't)}{\sqrt{(\mu+\omega)/(\mu-\omega)}} \frac{\sqrt{(\mu-\omega)/(\mu+\omega)}}{\cosh(\mu't)} \sinh(\mu't) \right) \quad (25) +$$ + +When $\omega$ is equal to $\mu$, both Equations (23) and (25) become + +$$ +\begin{pmatrix} 1 & 0 \\ 2\omega t & 1 \end{pmatrix} \tag{26} +$$ + +If $\omega$ is sufficiently close to but smaller than $\mu$, the matrix of Equation (25) becomes + +$$ +\begin{pmatrix} +1 + (\epsilon/2)(2\omega t)^2 & +\epsilon(2\omega t) \\ +(2\omega t) & 1 + (\epsilon/2)(2\omega t)^2 +\end{pmatrix} +\quad (27) +$$ + +with + +$$ +\epsilon = \frac{\mu - \omega}{\mu + \omega} \tag{28} +$$ +---PAGE_BREAK--- + +If $\omega$ is sufficiently close to $\mu$, we can let + +$$ \mu + \omega = 2\omega, \quad \text{and} \quad \mu - \omega = 2\mu\epsilon \tag{29} $$ + +If $\omega$ is greater than $\mu$, $\epsilon$ defined in Equation (28) becomes negative, the matrix of Equation (23) becomes + +$$ \begin{pmatrix} 1 - (-\epsilon/2)(2\omega t)^2 & -(\epsilon)(2\omega t) \\ 2\omega t & 1 - (-\epsilon/2)(2\omega t)^2 \end{pmatrix} \tag{30} $$ + +We can rewrite this matrix as + +$$ \begin{pmatrix} 1 - (1/2) \left[ (2\omega\sqrt{-\epsilon})t \right]^2 & -\sqrt{-\epsilon} \left[ (2\omega\sqrt{-\epsilon})t \right] \\ 2\omega t & 1 - (1/2) \left[ (2\omega\sqrt{-\epsilon})t \right]^2 \end{pmatrix} \tag{31} $$ + +If $\epsilon$ becomes positive, Equation (27) can be written as + +$$ \begin{pmatrix} 1 + (1/2) \left[ (2\omega\sqrt{\epsilon})t \right]^2 & \sqrt{\epsilon} \left[ (2\omega\sqrt{\epsilon})t \right] \\ 2\omega t & 1 + (1/2) \left[ (2\omega\sqrt{\epsilon})t \right]^2 \end{pmatrix} \tag{32} $$ + +The transition from Equation (31) to Equation (32) is continuous as they become identical when $\epsilon = 0$. As $\epsilon$ changes its sign, the diagonal elements of above matrices tell us how cos($\omega't$) becomes cosh($\mu't$). As for the upper-right element element, $-\sin(\omega't)$ becomes sinh($\mu't$). This non-analytic continuity is discussed in detail in one of the earlier papers by two of us on lens optics [13]. This type of continuity was called there "tangential continuity." There, the function and its first derivative are continuous while the second derivative is not. + +## 2.3. Mathematical Forms of the Solutions + +In this section, we use the Heisenberg approach to the problem, and obtain the solutions in the form of two-by-two matrices. We note that + +1. For the oscillation mode, the trace of the matrix is smaller than 2. The solution takes the form of + +$$ \begin{pmatrix} \cos(x) & -e^{-\eta} \sin(x) \\ e^{\eta} \sin(x) & \cos(x) \end{pmatrix} \tag{33} $$ + +with trace $2\cos(x)$. The trace is independent of $\eta$. + +2. For the damping mode, the trace of the matrix is greater than 2. + +$$ \begin{pmatrix} \cosh(x) & e^{-\eta} \sinh(x) \\ e^{\eta} \sinh(x) & \cosh(x) \end{pmatrix} \tag{34} $$ + +with trace $2\cosh(x)$. Again, the trace is independent of $\eta$. + +3. For the transition mode, the trace is equal to 2, and the matrix is triangular and takes the form of + +$$ \begin{pmatrix} 1 & 0 \\ \gamma & 1 \end{pmatrix} \tag{35} $$ + +When $x$ approaches zero, the Equations (33) and (34) take the form + +$$ \begin{pmatrix} 1 - x^2/2 & -xe^{-\eta} \\ xe^{\eta} & 1 - x^2/2 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 + x^2/2 & xe^{-\eta} \\ xe^{\eta} & 1 + x^2/2 \end{pmatrix} \tag{36} $$ +---PAGE_BREAK--- + +respectively. These two matrices have the same lower-left element. Let us fix this element to be a +positive number $\gamma$. Then + +$$ +x = \gamma e^{-\eta} \tag{37} +$$ + +Then the matrices of Equation (36) become + +$$ +\begin{pmatrix} +1 - \gamma^2 e^{-2\eta} / 2 & -\gamma e^{-2\eta} \\ +\gamma & 1 - \gamma^2 e^{-2\eta} / 2 +\end{pmatrix}, +\quad +\text{and} +\quad +\begin{pmatrix} +1 + \gamma^2 e^{-2\eta} / 2 & \gamma e^{-2\eta} \\ +\gamma & 1 + \gamma^2 e^{-2\eta} / 2 +\end{pmatrix} +\qquad (38) +$$ + +If we introduce a small number $\epsilon$ defined as + +$$ +\epsilon = \sqrt{\gamma} e^{-\eta} \tag{39} +$$ + +the matrices of Equation (38) become + +$$ +\begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix} \begin{pmatrix} 1 - \gamma \epsilon^2/2 & \sqrt{\gamma} \epsilon \\ \sqrt{\gamma} \epsilon & 1 - \gamma \epsilon^2/2 \end{pmatrix} \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \tag{40} +$$ + +$$ +\begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix} \begin{pmatrix} 1 + \gamma \epsilon^2/2 & \sqrt{\gamma} \epsilon \\ \sqrt{\gamma} \epsilon & 1 + \gamma \epsilon^2/2 \end{pmatrix} \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} +$$ + +respectively, with $e^{-\eta} = \epsilon / \sqrt{\gamma}$. + +**3. Groups of Two-by-Two Matrices** + +If a two-by-two matrix has four complex elements, it has eight independent parameters. If the determinant of this matrix is one, it is known as an unimodular matrix and the number of independent parameters is reduced to six. The group of two-by-two unimodular matrices is called SL(2, c). This six-parameter group is isomorphic to the Lorentz group applicable to the Minkowski space of three space-like and one time-like dimensions [14]. + +We can start with two subgroups of SL(2, c). + +1. While the matrices of SL(2, c) are not unitary, we can consider the subset consisting of unitary matrices. This subgroup is called SU(2), and is isomorphic to the three-dimensional rotation group. This three-parameter group is the basic scientific language for spin-1/2 particles. + +2. We can also consider the subset of matrices with real elements. This three-parameter group is called Sp(2) and is isomorphic to the three-dimensional Lorentz group applicable to two space-like and one time-like coordinates. + +In the Lorentz group, there are three space-like dimensions with x, y, and z coordinates. +However, for many physical problems, it is more convenient to study the problem in the +two-dimensional (x,z) plane first and generalize it to three-dimensional space by rotating the system +around the z axis. This process can be called Euler decomposition and Euler generalization [2]. + +First, we study *Sp*(2) symmetry in detail, and achieve the generalization by augmenting the +two-by-two matrix corresponding to the rotation around the *z* axis. In this section, we study in detail +properties of *Sp*(2) matrices, then generalize them to *SL*(2, *c*) in Section 5. + +There are three classes of Sp(2) matrices. Their traces can be smaller or greater than two, or equal to two. While these subjects are already discussed in the literature [15–17] our main interest is what happens as the trace goes from less than two to greater than two. Here we are guided by the model we have discussed in Section 2, which accounts for the transition from the oscillation mode to the damping mode. +---PAGE_BREAK--- + +### 3.1. Lie Algebra of Sp(2) + +The two linearly independent matrices of Equation (3) can be written as + +$$ K_1 = \frac{1}{2} \begin{pmatrix} 0 & i \\ i & 0 \end{pmatrix}, \quad \text{and} \quad J_2 = \frac{1}{2} \begin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix} \qquad (41) $$ + +However, the Taylor series expansion of the exponential form of Equation (23) or Equation (25) requires an additional matrix + +$$ K_3 = \frac{1}{2} \begin{pmatrix} i & 0 \\ 0 & -i \end{pmatrix} \qquad (42) $$ + +These matrices satisfy the following closed set of commutation relations. + +$$ [K_1, J_2] = iK_3, \quad [J_2, K_3] = iK_1, \quad [K_3, K_1] = -iJ_2 \qquad (43) $$ + +These commutation relations remain invariant under Hermitian conjugation, even though $K_1$ and $K_3$ are anti-Hermitian. The algebra generated by these three matrices is known in the literature as the group $Sp(2)$ [17]. Furthermore, the closed set of commutation relations is commonly called the Lie algebra. Indeed, Equation (43) is the Lie algebra of the $Sp(2)$ group. + +The Hermitian matrix $J_2$ generates the rotation matrix + +$$ R(\theta) = \exp(-i\theta J_2) = \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \qquad (44) $$ + +and the anti-Hermitian matrices $K_1$ and $K_2$, generate the following squeeze matrices. + +$$ S(\lambda) = \exp(-i\lambda K_1) = \begin{pmatrix} \cosh(\lambda/2) & \sinh(\lambda/2) \\ \sinh(\lambda/2) & \cosh(\lambda/2) \end{pmatrix} \qquad (45) $$ + +and + +$$ B(\eta) = \exp(-i\eta K_3) = \begin{pmatrix} \exp(\eta/2) & 0 \\ 0 & \exp(-\eta/2) \end{pmatrix} \qquad (46) $$ + +respectively. + +Returning to the Lie algebra of Equation (43), since $K_1$ and $K_3$ are anti-Hermitian, and $J_2$ is Hermitian, the set of commutation relation is invariant under the Hermitian conjugation. In other words, the commutation relations remain invariant, even if we change the sign of $K_1$ and $K_3$, while keeping that of $J_2$ invariant. Next, let us take the complex conjugate of the entire system. Then both the $J$ and $K$ matrices change their signs. + +### 3.2. Bargmann and Wigner Decompositions + +Since the $Sp(2)$ matrix has three independent parameters, it can be written as [15] + +$$ \begin{pmatrix} \cos(\alpha_1/2) & -\sin(\alpha_1/2) \\ \sin(\alpha_1/2) & \cos(\alpha_1/2) \end{pmatrix} \begin{pmatrix} \cosh\chi & \sinh\chi \\ \sinh\chi & \cosh\chi \end{pmatrix} \begin{pmatrix} \cos(\alpha_2/2) & -\sin(\alpha_2/2) \\ \sin(\alpha_2/2) & \cos(\alpha_2/2) \end{pmatrix} \qquad (47) $$ + +This matrix can be written as + +$$ \begin{pmatrix} \cos(\delta/2) & -\sin(\delta/2) \\ \sin(\delta/2) & \cos(\delta/2) \end{pmatrix} \begin{pmatrix} a & b \\ c & d \end{pmatrix} \begin{pmatrix} \cos(\delta/2) & \sin(\delta/2) \\ -\sin(\delta/2) & \cos(\delta/2) \end{pmatrix} \qquad (48) $$ +---PAGE_BREAK--- + +where + +$$ +\begin{pmatrix} a & b \\ c & d \end{pmatrix} = \begin{pmatrix} \cos(\alpha/2) & -\sin(\alpha/2) \\ \sin(\alpha/2) & \cos(\alpha/2) \end{pmatrix} \begin{pmatrix} \cosh \chi & \sinh \chi \\ \sinh \chi & \cosh \chi \end{pmatrix} \begin{pmatrix} \cos(\alpha/2) & -\sin(\alpha/2) \\ \sin(\alpha/2) & \cos(\alpha/2) \end{pmatrix} \quad (49) +$$ + +with + +$$ +\delta = \frac{1}{2}(\alpha_1 - \alpha_2), \quad \text{and} \quad \alpha = \frac{1}{2}(\alpha_1 + \alpha_2) \tag{50} +$$ + +If we complete the matrix multiplication of Equation (49), the result is + +$$ +\left( +\begin{array}{cc} + (\cosh \chi) \cos \alpha & \sinh \chi - (\cosh \chi) \sin \alpha \\ + \sinh \chi + (\cosh \chi) \sin \alpha & (\cosh \chi) \cos \alpha +\end{array} +\right) +\qquad (51) +$$ + +We shall call hereafter the decomposition of Equation (49) the Bargmann decomposition. This means that every matrix in the Sp(2) group can be brought to the Bargmann decomposition by a similarity transformation of rotation, as given in Equation (48). This decomposition leads to an equidiagonal matrix with two independent parameters. + +For the matrix of Equation (49), we can now consider the following three cases. Let us assume that $\chi$ is positive, and the angle $\theta$ is less than 90°. Let us look at the upper-right element. + +1. If it is negative with $[\sinh\chi < (\cosh\chi)\sin\alpha]$, then the trace of the matrix is smaller than 2, and the matrix can be written as + +$$ +\begin{pmatrix} +\cos(\theta/2) & -e^{-\eta}\sin(\theta/2) \\ +e^{\eta}\sin(\theta/2) & \cos(\theta/2) +\end{pmatrix} +\qquad (52) +$$ + +with + +$$ +\cos(\theta/2) = (\cosh\chi)\cos\alpha, \quad \text{and} \quad e^{-2\eta} = \frac{(\cosh\chi)\sin\alpha - \sinh\chi}{(\cosh\chi)\sin\alpha + \sinh\chi} \tag{53} +$$ + +2. If it is positive with $[\sinh \chi > (\cosh \chi) \sin \alpha]$, then the trace is greater than 2, and the matrix can be written as + +$$ +\begin{pmatrix} +\cosh(\lambda/2) & e^{-\eta} \sinh(\lambda/2) \\ +e^{\eta} \sinh(\lambda/2) & \cosh(\lambda/2) +\end{pmatrix} +\qquad (54) +$$ + +with + +$$ +\cosh(\lambda/2) = (\cosh\chi)\cos\alpha, \quad \text{and} \quad e^{-2\eta} = \frac{\sinh\chi - (\cosh\chi)\sin\alpha}{(\cosh\chi)\sin\alpha + \sinh\chi} \tag{55} +$$ + +3. If it is zero with $[(\sinh \chi = (\cosh \chi) \sin \alpha)]$, then the trace is equal to 2, and the matrix takes the form + +$$ +\begin{pmatrix} +1 & 0 \\ +2 \sinh \chi & 1 +\end{pmatrix} +\qquad (56) +$$ + +The above repeats the mathematics given in Section 2.3. + +Returning to Equations (52) and (53), they can be decomposed into + +$$ +M(\theta, \eta) = \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix} \quad (57) +$$ + +and + +$$ +M(\lambda, \eta) = \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \begin{pmatrix} \cosh(\lambda/2) & \sinh(\lambda/2) \\ \sinh(\lambda/2) & \cos(\lambda/2) \end{pmatrix} \begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix} \quad (58) +$$ + +respectively. In view of the physical examples given in Section 6, we shall call this the “Wigner decomposition.” Unlike the Bargmann decomposition, the Wigner decomposition is in the form of a similarity transformation. +---PAGE_BREAK--- + +We note that both Equations (57) and (58) are written as similarity transformations. Thus + +$$[M(\theta, \eta)]^n = \begin{pmatrix} \cos(n\theta/2) & -e^{-\eta} \sin(n\theta/2) \\ e^{\eta} \sin(n\theta/2) & \cos(n\theta/2) \end{pmatrix} \quad (59)$$ + +$$[M(\lambda, \eta)]^n = \begin{pmatrix} \cosh(n\lambda/2) & e^{\eta} \sinh(n\lambda/2) \\ e^{-\eta} \sinh(n\lambda/2) & \cosh(n\lambda/2) \end{pmatrix} \quad (60)$$ + +$$[M(\gamma)]^n = \begin{pmatrix} 1 & 0 \\ n\gamma & 1 \end{pmatrix} \quad (61)$$ + +These expressions are useful for studying periodic systems [18]. + +The question is what physics these decompositions describe in the real world. To address this, we study what the Lorentz group does in the real world, and study isomorphism between the $Sp(2)$ group and the Lorentz group applicable to the three-dimensional space consisting of one time and two space coordinates. + +### 3.3. Isomorphism with the Lorentz Group + +The purpose of this section is to give physical interpretations of the mathematical formulas given in Section 3.2. We will interpret these formulae in terms of the Lorentz transformations which are normally described by four-by-four matrices. For this purpose, it is necessary to establish a correspondence between the two-by-two representation of Section 3.2 and the four-by-four representations of the Lorentz group. + +Let us consider the Minkowskian space-time four-vector + +$$ (t, z, x, y) \qquad (62) $$ + +where $(t^2 - z^2 - x^2 - y^2)$ remains invariant under Lorentz transformations. The Lorentz group consists of four-by-four matrices performing Lorentz transformations in the Minkowski space. + +In order to give physical interpretations to the three two-by-two matrices given in Equations (44)–(46), we consider rotations around the *y* axis, boosts along the *x* axis, and boosts along the *z* axis. The transformation is restricted in the three-dimensional subspace of $(t,z,x)$. It is then straight-forward to construct those four-by-four transformation matrices where the *y* coordinate remains invariant. They are given in Table 1. Their generators also given. Those four-by-four generators satisfy the Lie algebra given in Equation (43). + +**Table 1.** Matrices in the two-by-two representation, and their corresponding four-by-four generators and transformation matrices. + +
MatricesGeneratorsFour-by-FourTransform matrices
R(θ)J2 = 12 (0
i
−i
0)
0    0    0
0    0    −i
0    i    0
0    0    0
1    0    0
0    cos θ    − sin θ
0    sin θ    cos θ
0    0    0
B(η)K3 = 12(i
0
−i
0))
0    i    0
i    0    0
0    0    0
0    0    0
cosh ηsinh η00
sinh ηcosh η00
0010
0001
S(λ)K1 = 12(0
i
i
0))
0    0    i
i    0    0
0    0    0
cosh λ0sinh λ0
0100
sinh λ0cosh λ0
0001
+ + +---PAGE_BREAK--- + +**4. Internal Space-Time Symmetries** + +We have seen that there corresponds a two-by-two matrix for each four-by-four Lorentz transformation matrix. It is possible to give physical interpretations to those four-by-four matrices. It must thus be possible to attach a physical interpretation to each two-by-two matrix. + +Since 1939 [1] when Wigner introduced the concept of the little groups many papers have been published on this subject, but most of them were based on the four-by-four representation. In this section, we shall give the formalism of little groups in the language of two-by-two matrices. In so doing, we provide physical interpretations to the Bargmann and Wigner decompositions introduced in Section 3.2. + +**4.1. Wigner's Little Groups** + +In [1], Wigner started with a free relativistic particle with momentum, then constructed subgroups of the Lorentz group whose transformations leave the four-momentum invariant. These subgroups thus define the internal space-time symmetry of the given particle. Without loss of generality, we assume that the particle momentum is along the z direction. Thus rotations around the momentum leave the momentum invariant, and this degree of freedom defines the helicity, or the spin parallel to the momentum. + +We shall use the word "Wigner transformation" for the transformation which leaves the four-momentum invariant: + +1. For a massive particle, it is possible to find a Lorentz frame where it is at rest with zero momentum. The four-momentum can be written as $m(1,0,0,0)$, where $m$ is the mass. This four-momentum is invariant under rotations in the three-dimensional $(z,x,y)$ space. + +2. For an imaginary-mass particle, there is the Lorentz frame where the energy component vanishes. The momentum four-vector can be written as $p(0,1,0,0)$, where $p$ is the magnitude of the momentum. + +3. If the particle is massless, its four-momentum becomes $p(1,1,0,0)$. Here the first and second components are equal in magnitude. + +The constant factors in these four-momenta do not play any significant roles. Thus we write them as $(1,0,0,0)$, $(0,1,0,0)$, and $(1,1,0,0)$ respectively. Since Wigner worked with these three specific four-momenta [1], we call them Wigner four-vectors. + +All of these four-vectors are invariant under rotations around the z axis. The rotation matrix is + +$$Z(\phi) = \begin{pmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & \cos\phi & -\sin\phi \\ 0 & 0 & \sin\phi & \cos\phi \end{pmatrix} \quad (63)$$ + +In addition, the four-momentum of a massive particle is invariant under the rotation around the y axis, whose four-by-four matrix was given in Table 1. The four-momentum of an imaginary particle is invariant under the boost matrix $S(\lambda)$ given in Table 1. The problem for the massless particle is more complicated, but will be discussed in detail in Section 7. See Table 2. +---PAGE_BREAK--- + +**Table 2.** Wigner four-vectors and Wigner transformation matrices applicable to two space-like and one time-like dimensions. Each Wigner four-vector remains invariant under the application of its Wigner matrix. + +
MassWigner Four-VectorWigner Transformation
Massive(1, 0, 0, 0)(1 0 0 0)
(0 cos θ - sinθ 0)
(0 sin θ cos θ 0)
(0 0 0 1)
Massless(1, 1, 0, 0)(1 + γ2/2 - γ2/2 γ 0)
2/2 1 - γ2/2 γ 0)
-γ γ 1 0
(0 0 0 1)
Imaginary mass(0, 1, 0, 0)(cosh λ 0 sinh λ 0)
(0 1 0 0)
(sinh λ 0 cosh λ 0)
(0 0 0 1)
+ +## 4.2. Two-by-Two Formulation of Lorentz Transformations + +The Lorentz group is a group of four-by-four matrices performing Lorentz transformations on the Minkowskian vector space of $(t,z,x,y)$, leaving the quantity + +$$t^2 - z^2 - x^2 - y^2 \quad (64)$$ + +invariant. It is possible to perform the same transformation using two-by-two matrices [7,14,19]. + +In this two-by-two representation, the four-vector is written as + +$$X = \begin{pmatrix} t+z & x-iy \\ x+iy & t-z \end{pmatrix} \quad (65)$$ + +where its determinant is precisely the quantity given in Equation (64) and the Lorentz transformation on this matrix is a determinant-preserving, or unimodular transformation. Let us consider the transformation matrix as [7,19] + +$$G = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix}, \quad \text{and} \quad G^{\dagger} = \begin{pmatrix} \alpha^{*} & \gamma^{*} \\ \beta^{*} & \delta^{*} \end{pmatrix} \quad (66)$$ + +with + +$$\det(G) = 1 \quad (67)$$ + +and the transformation + +$$X' = GXG^{\dagger} \quad (68)$$ + +Since $G$ is not a unitary matrix, Equation (68) not a unitary transformation, but rather we call this the “Hermitian transformation”. Equation (68) can be written as + +$$\begin{pmatrix} t' + z' & x' - iy' \\ x + iy & t' - z' \end{pmatrix} = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix} \begin{pmatrix} t + z & x - iy \\ x + iy & t - z \end{pmatrix} \begin{pmatrix} \alpha^* & \gamma^* \\ \beta^* & \delta^* \end{pmatrix} \quad (69)$$ + +It is still a determinant-preserving unimodular transformation, thus it is possible to write this as a four-by-four transformation matrix applicable to the four-vector $(t,z,x,y)$ [7,14]. + +Since the $G$ matrix starts with four complex numbers and its determinant is one by Equation (67), it has six independent parameters. The group of these $G$ matrices is known to be locally isomorphic +---PAGE_BREAK--- + +to the group of four-by-four matrices performing Lorentz transformations on the four-vector $(t, z, x, y)$. In other words, for each $G$ matrix there is a corresponding four-by-four Lorentz-transform matrix [7]. + +The matrix $G$ is not a unitary matrix, because its Hermitian conjugate is not always its inverse. This group has a unitary subgroup called $SU(2)$ and another consisting only of real matrices called $Sp(2)$. For this later subgroup, it is sufficient to work with the three matrices $R(\theta), S(\lambda)$, and $B(\eta)$ given in Equations (44)–(46) respectively. Each of these matrices has its corresponding four-by-four matrix applicable to the $(t, z, x, y)$. These matrices with their four-by-four counterparts are tabulated in Table 1. + +The energy-momentum four vector can also be written as a two-by-two matrix. It can be written as + +$$P = \begin{pmatrix} p_0 + p_z & p_x - ip_y \\ p_x + ip_y & p_0 - p_z \end{pmatrix} \qquad (70)$$ + +with + +$$\det(P) = p_0^2 - p_x^2 - p_y^2 - p_z^2 \qquad (71)$$ + +which means + +$$\det(P) = m^2 \qquad (72)$$ + +where *m* is the particle mass. + +The Lorentz transformation can be written explicitly as + +$$P' = GPG^+ \qquad (73)$$ + +or + +$$\begin{pmatrix} p'_0 + p'_z & p'_x - ip'_y \\ p'_x + ip'_y & E' - p'_z \end{pmatrix} = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix} \begin{pmatrix} p_0 + p_z & p_x - ip_y \\ p_x + ip_y & p_0 - p_z \end{pmatrix} \begin{pmatrix} \alpha^* & \gamma^* \\ \beta^* & \delta^* \end{pmatrix} \qquad (74)$$ + +This is an unimodular transformation, and the mass is a Lorentz-invariant variable. Furthermore, it was shown in [7] that Wigner's little groups for massive, massless, and imaginary-mass particles can be explicitly defined in terms of two-by-two matrices. + +Wigner's little group consists of two-by-two matrices satisfying + +$$P = WPW^{+} \qquad (75)$$ + +The two-by-two $W$ matrix is not an identity matrix, but tells about the internal space-time symmetry of a particle with a given energy-momentum four-vector. This aspect was not known when Einstein formulated his special relativity in 1905, hence the internal space-time symmetry was not an issue at that time. We call the two-by-two matrix $W$ the Wigner matrix, and call the condition of Equation (75) the Wigner condition. + +If determinant of $W$ is a positive number, then $P$ is proportional to + +$$P = \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} \qquad (76)$$ + +corresponding to a massive particle at rest, while if the determinant is negative, it is proportional to + +$$P = \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \qquad (77)$$ +---PAGE_BREAK--- + +corresponding to an imaginary-mass particle moving faster than light along the z direction, with +a vanishing energy component. If the determinant is zero, P is + +$$ +P = \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \tag{78} +$$ + +which is proportional to the four-momentum matrix for a massless particle moving along the z direction. + +For all three cases, the matrix of the form + +$$ +Z(\phi) = \begin{pmatrix} e^{-i\phi/2} & 0 \\ 0 & e^{i\phi/2} \end{pmatrix} \quad (79) +$$ + +will satisfy the Wigner condition of Equation (75). This matrix corresponds to rotations around +the z axis. + +For the massive particle with the four-momentum of Equation (76), the transformations with the rotation matrix of Equation (44) leave the *P* matrix of Equation (76) invariant. Together with the *Z*(*φ*) matrix, this rotation matrix leads to the subgroup consisting of the unitary subset of the *G* matrices. The unitary subset of *G* is *SU*(2) corresponding to the three-dimensional rotation group dictating the spin of the particle [14]. + +For the massless case, the transformations with the triangular matrix of the form + +$$ +\begin{pmatrix} 1 & \gamma \\ 0 & 1 \end{pmatrix} \qquad (80) +$$ + +leave the momentum matrix of Equation (78) invariant. The physics of this matrix has a stormy history, +and the variable $\gamma$ leads to a gauge transformation applicable to massless particles [8,9,20,21]. + +For a particle with an imaginary mass, a W matrix of the form of Equation (45) leaves the +four-momentum of Equation (77) invariant. + +Table 3 summarizes the transformation matrices for Wigner's little groups for massive, massless, +and imaginary-mass particles. Furthermore, in terms of their traces, the matrices given in this +subsection can be compared with those given in Section 2.3 for the damped oscillator. The comparisons +are given in Table 4. + +Of course, it is a challenging problem to have one expression for all three classes. This problem +has been discussed in the literature [12], and the damped oscillator case of Section 2 addresses the +continuity problem. + +**Table 3.** Wigner vectors and Wigner matrices in the two-by-two representation. The trace of the matrix tells whether the particle $m^2$ is positive, zero, or negative. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Particle Mass + + Four-Momentum + + Transform Matrix + + Trace +
+ Massive + + ( + + 1 + + 0) +
+ (0 1) +
+ ( + + cos(θ/2) + + − sin(θ/2)) +
+ ( + + sin(θ/2) + + cos(θ/2)) +
+ less than 2 +
+ Massless + + ( + + 1 + + 0) +
+ (0 0) +
+ ( + + 1 + + γ) +
+ (0 1) +
+ equal to 2 +
+ Imaginary mass + + ( + + 1 + + 0) +
+ (0 −1) +
+ ( + + cosh(λ/2) + + sinh(λ/2)) +
+ ( + + sinh(λ/2) + + cosh(λ/2)) +
+ greater than 2 +
+---PAGE_BREAK--- + +**Table 4.** Damped Oscillators and Space-time Symmetries. Both share Sp(2) as their symmetry group. + +
TraceDamped OscillatorParticle Symmetry
Smaller than 2Oscillation ModeMassive Particles
Equal to 2Transition ModeMassless Particles
Larger than 2Damping ModeImaginary-mass Particles
+ +## 5. Lorentz Completion of Wigner's Little Groups + +So far we have considered transformations applicable only to (t, z, x) space. In order to study the full symmetry, we have to consider rotations around the z axis. As previously stated, when a particle moves along this axis, this rotation defines the helicity of the particle. + +In [1], Wigner worked out the little group of a massive particle at rest. When the particle gains a momentum along the z direction, the single particle can reverse the direction of momentum, the spin, or both. What happens to the internal space-time symmetries is discussed in this section. + +### 5.1. Rotation around the z Axis + +In Section 3, our kinematics was restricted to the two-dimensional space of z and x, and thus includes rotations around the y axis. We now introduce the four-by-four matrix of Equation (63) performing rotations around the z axis. Its corresponding two-by-two matrix was given in Equation (79). Its generator is + +$$J_3 = \frac{1}{2} \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \qquad (81)$$ + +If we introduce this additional matrix for the three generators we used in Sections 3 and 3.2, we end up the closed set of commutation relations + +$$[J_i, J_j] = i\epsilon_{ijk}J_k, \quad [J_i, K_j] = i\epsilon_{ijk}K_k, \quad [K_i, K_j] = -i\epsilon_{ijk}J_k \qquad (82)$$ + +with + +$$J_i = \frac{1}{2}\sigma_i, \quad \text{and} \quad K_i = \frac{i}{2}\sigma_i \qquad (83)$$ + +where $\sigma_i$ are the two-by-two Pauli spin matrices. + +For each of these two-by-two matrices there is a corresponding four-by-four matrix generating Lorentz transformations on the four-dimensional Lorentz group. When these two-by-two matrices are imaginary, the corresponding four-by-four matrices were given in Table 1. If they are real, the corresponding four-by-four matrices were given in Table 5. +---PAGE_BREAK--- + +**Table 5.** Two-by-two and four-by-four generators not included in Table 1. The generators given there and given here constitute the set of six generators for SL(2, c) or of the Lorentz group given in Equation (82). + +
GeneratorTwo-by-TwoFour-by-Four
J312(
10
0-1
)
0000
000-i
00i0
J112(
01
10
)
0000
00i0
0000
0-i00
K212(
01
-10
)
000i
0000
0000
i000
+ +This set of commutation relations is known as the Lie algebra for the SL(2, c), namely the group of two-by-two elements with unit determinants. Their elements are complex. This set is also the Lorentz group performing Lorentz transformations on the four-dimensional Minkowski space. + +This set has many useful subgroups. For the group SL(2, c), there is a subgroup consisting only of real matrices, generated by the two-by-two matrices given in Table 1. This three-parameter subgroup is precisely the Sp(2) group we used in Sections 3 and 3.2. Their generators satisfy the Lie algebra given in Equation (43). + +In addition, this group has the following Wigner subgroups governing the internal space-time symmetries of particles in the Lorentz-covariant world [1]: + +1. The $J_i$ matrices form a closed set of commutation relations. The subgroup generated by these Hermitian matrices is SU(2) for electron spins. The corresponding rotation group does not change the four-momentum of the particle at rest. This is Wigner's little group for massive particles. If the particle is at rest, the two-by-two form of the four-vector is given by Equation (76). The Lorentz transformation generated by $J_3$ takes the form + +$$ \begin{pmatrix} e^{i\phi/2} & 0 \\ 0 & e^{-i\phi/2} \end{pmatrix} \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} \begin{pmatrix} e^{-i\phi/2} & 0 \\ 0 & e^{i\phi/2} \end{pmatrix} = \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} \quad (84) $$ + +Similar computations can be carried out for $J_1$ and $J_2$. + +2. There is another Sp(2) subgroup, generated by $K_1$, $K_2$, and $J_3$. They satisfy the commutation relations + +$$ [K_1, K_2] = -iJ_3, \quad [J_3, K_1] = iK_2, \quad [K_2, J_3] = iK_1. \quad (85) $$ + +The Wigner transformation generated by these two-by-two matrices leave the momentum four-vector of Equation (77) invariant. For instance, the transformation matrix generated by $K_2$ takes the form + +$$ \exp(-i\xi K_2) = \begin{pmatrix} \cosh(\xi/2) & i\sinh(\xi/2) \\ i\sinh(\xi/2) & \cosh(\xi/2) \end{pmatrix} \quad (86) $$ + +and the Wigner transformation takes the form + +$$ \begin{pmatrix} \cosh(\xi/2) & i\sinh(\xi/2) \\ -i\sinh(\xi/2) & \cosh(\xi/2) \end{pmatrix} \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \begin{pmatrix} \cosh(\xi/2) & i\sinh(\xi/2) \\ -i\sinh(\xi/2) & \cosh(\xi/2) \end{pmatrix} = \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \quad (87) $$ + +Computations with $K_2$ and $J_3$ lead to the same result. +---PAGE_BREAK--- + +Since the determinant of the four-momentum matrix is negative, the particle has an imaginary mass. In the language of the four-by-four matrix, the transformation matrices leave the four-momentum of the form (0, 1, 0, 0) invariant. + +3. Furthermore, we can consider the following combinations of the generators: + +$$N_1 = K_1 - J_2 = \begin{pmatrix} 0 & i \\ 0 & 0 \end{pmatrix}, \quad \text{and} \quad N_2 = K_2 + J_1 = \begin{pmatrix} 0 & 1 \\ 0 & 0 \end{pmatrix} \qquad (88)$$ + +Together with $J_3$, they satisfy the following commutation relations. + +$$[N_1, N_2] = 0, \quad [N_1, J_3] = -iN_2, \quad [N_2, J_3] = iN_1 \qquad (89)$$ + +In order to understand this set of commutation relations, we can consider an x y coordinate system in a two-dimensional space. Then rotation around the origin is generated by + +$$J_3 = -i \left( x \frac{\partial}{\partial y} - y \frac{\partial}{\partial x} \right) \qquad (90)$$ + +and the two translations are generated by + +$$N_1 = -i \frac{\partial}{\partial x}, \quad \text{and} \quad N_2 = -i \frac{\partial}{\partial y} \qquad (91)$$ + +for the x and y directions respectively. These operators satisfy the commutations relations given in Equation (89). + +The two-by-two matrices of Equation (88) generate the following transformation matrix. + +$$G(\gamma, \phi) = \exp[-i\gamma(N_1 \cos\phi + N_2 \sin\phi)] = \begin{pmatrix} 1 & \gamma e^{-i\phi} \\ 0 & 1 \end{pmatrix} \qquad (92)$$ + +The two-by-two form for the four-momentum for the massless particle is given by Equation (78). The computation of the Hermitian transformation using this matrix is + +$$\begin{pmatrix} 1 & \gamma e^{-i\phi} \\ 0 & 1 \end{pmatrix} \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \begin{pmatrix} 1 & 0 \\ \gamma e^{i\phi} & 1 \end{pmatrix} = \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \qquad (93)$$ + +confirming that $N_1$ and $N_2$, together with $J_3$, are the generators of the $E(2)$-like little group for massless particles in the two-by-two representation. The transformation that does this in the physical world is described in the following section. + +## 5.2. $E(2)$-Like Symmetry of Massless Particles + +From the four-by-four generators of $K_{1,2}$ and $J_{1,2}$, we can write + +$$N_1 = \begin{pmatrix} 0 & 0 & i & 0 \\ 0 & 0 & i & 0 \\ i & -i & 0 & 0 \\ 0 & 0 & 0 & 0 \end{pmatrix}, \quad \text{and} \quad N_2 = \begin{pmatrix} 0 & 0 & 0 & i \\ 0 & 0 & 0 & i \\ 0 & 0 & 0 & 0 \\ i & -i & 0 & 0 \end{pmatrix} \qquad (94)$$ +---PAGE_BREAK--- + +These matrices lead to the transformation matrix of the form + +$$ +G(\gamma, \phi) = \begin{pmatrix} +1 + \gamma^2/2 & -\gamma^2/2 & \gamma \cos \phi & \gamma \sin \phi \\ +\gamma^2/2 & 1 - \gamma^2/2 & \gamma \cos \phi & \gamma \sin \phi \\ +-\gamma \cos \phi & \gamma \cos \phi & 1 & 0 \\ +-\gamma \sin \phi & \gamma \sin \phi & 0 & 1 +\end{pmatrix} \quad (95) +$$ + +This matrix leaves the four-momentum invariant, as we can see from + +$$ +G(\gamma, \phi) \begin{pmatrix} 1 \\ 1 \\ 0 \\ 0 \end{pmatrix} = \begin{pmatrix} 1 \\ 1 \\ 0 \\ 0 \end{pmatrix} \tag{96} +$$ + +When it is applied to the photon four-potential + +$$ +G(\gamma, \phi) \begin{pmatrix} A_0 \\ A_3 \\ A_1 \\ A_2 \end{pmatrix} = \begin{pmatrix} A_0 \\ A_3 \\ A_1 \\ A_2 \end{pmatrix} + \gamma (A_1 \cos \phi + A_2 \sin \phi) \begin{pmatrix} 1 \\ 1 \\ 0 \\ 0 \end{pmatrix} \quad (97) +$$ + +with the Lorentz condition which leads to $A_3 = A_0$ in the zero mass case. Gauge transformations are well known for electromagnetic fields and photons. Thus Wigner's little group leads to gauge transformations. + +In the two-by-two representation, the electromagnetic four-potential takes the form + +$$ +\begin{pmatrix} 2A_0 & A_1 - iA_2 \\ A_1 + iA_2 & 0 \end{pmatrix} \qquad (98) +$$ + +with the Lorentz condition $A_3 = A_0$. Then the two-by-two form of Equation (97) is + +$$ +\begin{pmatrix} 1 & \gamma e^{-i\phi} \\ 0 & 1 \end{pmatrix} \begin{pmatrix} 2A_0 & A_1 - iA_2 \\ A_1 + iA_2 & 0 \end{pmatrix} \begin{pmatrix} 1 & 0 \\ \gamma e^{i\phi} & 1 \end{pmatrix} \quad (99) +$$ + +which becomes + +$$ +\begin{pmatrix} A_0 & A_1 - iA_2 \\ A_1 + iA_2 & 0 \end{pmatrix} + \begin{pmatrix} 2\gamma (A_1 \cos \phi - A_2 \sin \phi) & 0 \\ 0 & 0 \end{pmatrix} \quad (100) +$$ + +This is the two-by-two equivalent of the gauge transformation given in Equation (97). + +For massless spin-1/2 particles starting with the two-by-two expression of $G(\gamma, \phi)$ given in Equation (92), and considering the spinors + +$$ +u = \begin{pmatrix} 1 \\ 0 \end{pmatrix}, \quad \text{and} \quad v = \begin{pmatrix} 0 \\ 1 \end{pmatrix} \tag{101} +$$ + +for spin-up and spin-down states respectively, + +$$ +Gu = u, \quad \text{and} \quad Gv = v + \gamma e^{-i\phi} u +\quad (102) +$$ + +This means that the spinor $u$ for spin up is invariant under the gauge transformation while $v$ is not. Thus, the polarization of massless spin-1/2 particle, such as neutrinos, is a consequence of the gauge invariance. We shall continue this discussion in Section 7. +---PAGE_BREAK--- + +5.3. Boosts along the z Axis + +In Sections 4.1 and 5.1, we studied Wigner transformations for fixed values of the four-momenta. +The next question is what happens when the system is boosted along the z direction, with the +transformation + +$$ +\begin{pmatrix} t' \\ z' \end{pmatrix} = \begin{pmatrix} \cosh \eta & \sinh \eta \\ \sinh \eta & \cosh \eta \end{pmatrix} \begin{pmatrix} t \\ z \end{pmatrix} \qquad (103) +$$ + +Then the four-momenta become + +$$ +(\cosh \eta, \sinh \eta, 0, 0), \quad (\sinh \eta, \cosh \eta, 0, 0), \quad e^{\eta}(1, 1, 0, 0) \tag{104} +$$ + +respectively for massive, imaginary, and massless particles cases. In the two-by-two representation, +the boost matrix is + +$$ +\begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \tag{105} +$$ + +and the four-momenta of Equation (104) become + +$$ +\begin{pmatrix} e^\eta & 0 \\ 0 & e^{-\eta} \end{pmatrix}, \quad \begin{pmatrix} e^\eta & 0 \\ 0 & -e^{-\eta} \end{pmatrix}, \quad \begin{pmatrix} e^\eta & 0 \\ 0 & 0 \end{pmatrix} \tag{106} +$$ + +respectively. These matrices become Equations (76)–(78) respectively when $\eta = 0$. + +We are interested in Lorentz transformations which leave a given non-zero momentum invariant. +We can consider a Lorentz boost along the direction preceded and followed by identical rotation +matrices, as described in Figure 1 and the transformation matrix as + +$$ +\begin{pmatrix} \cos(\alpha/2) & -\sin(\alpha/2) \\ \sin(\alpha/2) & \cos(\alpha/2) \end{pmatrix} \begin{pmatrix} \cosh \chi & -\sinh \chi \\ -\sinh \chi & \cosh \chi \end{pmatrix} \begin{pmatrix} \cos(\alpha/2) & -\sin(\alpha/2) \\ \sin(\alpha/2) & \cos(\alpha/2) \end{pmatrix} \quad (107) +$$ + +which becomes + +$$ +\begin{pmatrix} +(\cos \alpha) \cosh \chi & -\sinh \chi - (\sin \alpha) \cosh \chi \\ +-\sinh \chi + (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi +\end{pmatrix} +\quad (108) +$$ +---PAGE_BREAK--- + +Figure 1. Bargmann and Wigner decompositions. (a) Bargmann decomposition; (b) Wigner decomposition. In the Bargmann decomposition, we start from a momentum along the z direction. We can rotate, boost, and rotate to bring the momentum to the original position. The resulting matrix is the product of one boost and two rotation matrices. In the Wigner decomposition, the particle is boosted back to the frame where the Wigner transformation can be applied. Make a Wigner transformation there and come back to the original state of the momentum. This process also can also be written as the product of three simple matrices. + +Except the sign of $\chi$, the two-by-two matrices of Equations (107) and (108) are identical with those given in Section 3.2. The only difference is the sign of the parameter $\chi$. We are thus ready to interpret this expression in terms of physics. + +1. If the particle is massive, the off-diagonal elements of Equation (108) have opposite signs, and this matrix can be decomposed into + +$$ \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \quad (109) $$ + +with + +$$ \cos(\theta/2) = (\cosh \chi) \cos \alpha, \quad \text{and} \quad e^{2\eta} = \frac{\cosh(\chi) \sin \alpha + \sinh \chi}{\cosh(\chi) \sin \alpha - \sinh \chi} \quad (110) $$ + +and + +$$ e^{2\eta} = \frac{p_0 + p_z}{p_0 - p_z} \quad (111) $$ + +According to Equation (109) the first matrix (far right) reduces the particle momentum to zero. The second matrix rotates the particle without changing the momentum. The third matrix boosts the particle to restore its original momentum. This is the extension of Wigner's original idea to moving particles. + +2. If the particle has an imaginary mass, the off-diagonal elements of Equation (108) have the same sign, + +$$ \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \begin{pmatrix} \cosh(\lambda/2) & -\sinh(\lambda/2) \\ \sinh(\lambda/2) & \cosh(\lambda/2) \end{pmatrix} \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \quad (112) $$ +---PAGE_BREAK--- + +with + +$$ \cosh(\lambda/2) = (\cosh\chi)\cos\alpha, \quad \text{and} \quad e^{2\eta} = \frac{\sinh\chi + \cosh(\chi)\sin\alpha}{\cosh(\chi)\sin\alpha - \sinh\chi} \qquad (113) $$ + +and + +$$ e^{2\eta} = \frac{p_0 + p_z}{p_z - p_0} \qquad (114) $$ + +This is also a three-step operation. The first matrix brings the particle momentum to the zero-energy state with $p_0 = 0$. Boosts along the x or y direction do not change the four-momentum. We can then boost the particle back to restore its momentum. This operation is also an extension of the Wigner's original little group. Thus, it is quite appropriate to call the formulas of Equations (109) and (112) Wigner decompositions. + +3. If the particle mass is zero with + +$$ \sinh \chi = (\cosh \chi) \sin \alpha \qquad (115) $$ + +the $\eta$ parameter becomes infinite, and the Wigner decomposition does not appear to be useful. We can then go back to the Bargmann decomposition of Equation (107). With the condition of Equations (115) and (108) becomes + +$$ \begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix} \qquad (116) $$ + +with + +$$ \gamma = 2 \sinh \chi \qquad (117) $$ + +The decomposition ending with a triangular matrix is called the Iwasawa decomposition [16,22] and its physical interpretation was given in Section 5.2. The $\gamma$ parameter does not depend on $\eta$. + +Thus, we have given physical interpretations to the Bargmann and Wigner decompositions given in Section (3.2). Consider what happens when the momentum becomes large. Then $\eta$ becomes large for nonzero mass cases. All three four-momenta in Equation (106) become + +$$ e^{\eta} \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \qquad (118) $$ + +As for the Bargmann-Wigner matrices, they become the triangular matrix of Equation (116), with $\gamma = \sin(\theta/2)e^{\eta}$ and $\gamma = \sinh(\lambda/2)e^{\eta}$, respectively for the massive and imaginary-mass cases. + +In Section 5.2, we concluded that the triangular matrix corresponds to gauge transformations. However, particles with imaginary mass are not observed. For massive particles, we can start with the three-dimensional rotation group. The rotation around the z axis is called helicity, and remains invariant under the boost along the z direction. As for the transverse rotations, they become gauge transformation as illustrated in Table 6. + +**Table 6.** Covariance of the energy-momentum relation, and covariance of the internal space-time symmetry. Under the Lorentz boost along the z direction, $J_3$ remains invariant, and this invariant component of the angular momentum is called the helicity. The transverse component $J_1$ and $J_2$ collapse into a gauge transformation. The $\gamma$ parameter for the massless case has been studied in earlier papers in the four-by-four matrix formulation of Wigner's little groups [8,21]. + +
Massive, SlowCovarianceMassless, Fast
$E = p^2/2m$
$J_3$
Einstein's $E = mc^2$
Wigner's Little Group
$E = cp$
Helicity
Gauge Transformation
$J_1, J_2$
+---PAGE_BREAK--- + +5.4. Conjugate Transformations + +The most general form of the SL(2, c) matrix is given in Equation (66). Transformation operators for the Lorentz group are given in exponential form as: + +$$ +D = \exp \left\{ -i \sum_{i=1}^{3} (\theta_i J_i + \eta_i K_i) \right\} \qquad (119) +$$ + +where the $J_i$ are the generators of rotations and the $K_i$ are the generators of proper Lorentz boosts. They satisfy the Lie algebra given in Equation (43). This set of commutation relations is invariant under the sign change of the boost generators $K_i$. Thus, we can consider “dot conjugation” defined as + +$$ +\dot{D} = \exp \left\{ -i \sum_{i=1}^{3} (\theta_i J_i - \eta_i K_i) \right\} \quad (120) +$$ + +Since $K_i$ are anti-Hermitian while $J_i$ are Hermitian, the Hermitian conjugate of the above expression is + +$$ +D^{\dagger} = \exp \left\{ -i \sum_{i=1}^{3} (-\theta_i J_i + \eta_i K_i) \right\} \qquad (121) +$$ + +while the Hermitian conjugate of G is + +$$ +\dot{D}^{\dagger} = \exp \left\{ -i \sum_{i=1}^{3} (-\theta_i J_i - \eta_i K_i) \right\} \qquad (122) +$$ + +Since we understand the rotation around the z axis, we can now restrict the kinematics to the +zt plane, and work with the Sp(2) symmetry. Then the D matrices can be considered as Bargmann +decompositions. First, D and $\dot{D}$, and their Hermitian conjugates are + +$$ +D(\alpha, \chi) = \begin{pmatrix} +(\cos \alpha) \cosh \chi & \sinh \chi - (\sin \alpha) \cosh \chi \\ +\sinh \chi + (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi +\end{pmatrix} \tag{123} +$$ + +$$ +\dot{D}(\alpha, \chi) = \begin{pmatrix} +(\cos \alpha) \cosh \chi & -\sinh \chi - (\sin \alpha) \cosh \chi \\ +-\sinh \chi + (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi +\end{pmatrix} \quad (124) +$$ + +These matrices correspond to the "D loops" given in Figure 2a,b respectively. The "dot" conjugation changes the direction of boosts. The dot conjugation leads to the inversion of the space which is called the parity operation. + +We can also consider changing the direction of rotations. Then they result in the Hermitian +conjugates. We can write their matrices as + +$$ +D^{\dagger}(\alpha, \chi) = \begin{pmatrix} +(\cos \alpha) \cosh \chi & \sinh \chi + (\sin \alpha) \cosh \chi \\ +\sinh \chi - (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi +\end{pmatrix} \quad (125) +$$ + +$$ +\dot{D}^{\dagger}(\alpha, \chi) = \begin{pmatrix} +(\cos \alpha) \cosh \chi & -\sinh \chi + (\sin \alpha) \cosh \chi \\ +-\sinh \chi - (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi +\end{pmatrix} \quad (126) +$$ + +From the exponential expressions from Equation (119) to Equation (122), it is clear that + +$$ +D^{\dagger} = D^{-1}, \quad \text{and} \quad D^{\dagger} = D^{-1} \tag{127} +$$ + +The D loop given in Figure 1 corresponds to $\dot{D}$. We shall return to these loops in Section 7. +---PAGE_BREAK--- + +Figure 2. Four D-loops resulting from the Bargmann decomposition. (a) Bargmann decomposition from Figure 1; (b) Direction of the Lorentz boost is reversed; (c) Direction of rotation is reversed; (d) Both directions are reversed. These operations correspond to the space-inversion, charge conjugation, and the time reversal respectively. + +**6. Symmetries Derivable from the Poincaré Sphere** + +The Poincaré sphere serves as the basic language for polarization physics. Its underlying +language is the two-by-two coherency matrix. This coherency matrix contains the symmetry of SL(2, c) +isomorphic to the the Lorentz group applicable to three space-like and one time-like dimensions [4,6,7]. + +For polarized light propagating along the z direction, the amplitude ratio and phase difference of +electric field x and y components traditionally determine the state of polarization. Hence, the polarization +can be changed by adjusting the amplitude ratio or the phase difference or both. Usually, the optical +device which changes amplitude is called an “attenuator” (or “amplifier”) and the device which changes +the relative phase a “phase shifter”. + +Let us start with the Jones vector: + +$$ +\begin{pmatrix} \psi_1(z,t) \\ \psi_2(z,t) \end{pmatrix} = \begin{pmatrix} a \exp[i(kz - \omega t)] \\ a \exp[i(kz - \omega t)] \end{pmatrix} \tag{128} +$$ +---PAGE_BREAK--- + +To this matrix, we can apply the phase shift matrix of Equation (79) which brings the Jones vector to + +$$ +\begin{pmatrix} \psi_1(z,t) \\ \psi_2(z,t) \end{pmatrix} = \begin{pmatrix} a \exp[i(kz - \omega t - i\phi/2)] \\ a \exp[i(kz - \omega t + i\phi/2)] \end{pmatrix} \quad (129) +$$ + +The generator of this phase-shifter is $I_3$ given Table 5. + +The optical beam can be attenuated differently in the two directions. The resulting matrix is + +$$ +e^{-\mu} \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \qquad (130) +$$ + +with the attenuation factor of exp(-μ₀ + η/2) and exp(-μ - η/2) for the x and y directions respectively. We are interested only the relative attenuation given in Equation (46) which leads to different amplitudes for the x and y component, and the Jones vector becomes + +$$ +\begin{pmatrix} \psi_1(z, t) \\ \psi_2(z, t) \end{pmatrix} = \begin{pmatrix} ae^{\mu/2} \exp[i(kz - \omega t - i\phi/2)] \\ ae^{-\mu/2} \exp[i(kz - \omega t + i\phi/2)] \end{pmatrix} \quad (131) +$$ + +The squeeze matrix of Equation (46) is generated by $K_3$ given in Table 1. + +The polarization is not always along the *x* and *y* axes, but can be rotated around the *z* axis using Equation (79) generated by $J_2$ given in Table 1. + +Among the rotation angles, the angle of 45° plays an important role in polarization optics. Indeed, if we rotate the squeeze matrix of Equation (46) by 45°, we end up with the squeeze matrix of Equation (45) generated by $K_1$ given also in Table 1. + +Each of these four matrices plays an important role in special relativity, as we discussed in Sections 3.2 and 6. Their respective roles in optics and particle physics are given in Table 7. + +**Table 7.** Polarization optics and special relativity share the same mathematics. Each matrix has its clear role in both optics and relativity. The determinant of the Stokes or the four-momentum matrix remains invariant under Lorentz transformations. It is interesting to note that the decoherence parameter (least fundamental) in optics corresponds to the (mass)$^2$ (most fundamental) in particle physics. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Polarization Optics + + Transformation Matrix + + Particle Symmetry +
+ Phase shift by φ + + + + + + + + + + +
+ e-iφ/2 + + 0 +
+ 0 + + eiφ/2 +
+
+ Rotation around z. +
+ Rotation around z + + + + + + + + + + +
+ cos(θ/2) + + -sin(θ/2) +
+ sin(θ/2) + + cos(θ/2) +
+
+ Rotation around y. +
+ Squeeze along x and y + + + + + + + + + + +
+ eη/2 + + 0 +
+ 0 + + e-η/2 +
+
+ Boost along z. +
+ Squeeze along 45° + + + + + + + + + + +
+ cosh(λ/2) + + sinh(λ/2) +
+ sinh(λ/2) + + cosh(λ/2) +
+
+ Boost along x. +
+ a⁴ (sinξ)² Determinant + + (mass)² +
+ +The most general form for the two-by-two matrix applicable to the Jones vector is the G matrix of Equation (66). This matrix is of course a representation of the SL(2, c) group. It brings the simplest Jones vector of Equation (128) to its most general form. +---PAGE_BREAK--- + +## 6.1. Coherency Matrix + +However, the Jones vector alone cannot tell us whether the two components are coherent with each other. In order to address this important degree of freedom, we use the coherency matrix defined as [3,23] + +$$ C = \begin{pmatrix} S_{11} & S_{12} \\ S_{21} & S_{22} \end{pmatrix} \qquad (132) $$ + +where + +$$ \langle \psi_i^* \psi_j \rangle = \frac{1}{T} \int_0^T \psi_i^*(t+\tau) \psi_j(t) dt \qquad (133) $$ + +where T is a sufficiently long time interval. Then, those four elements become [4] + +$$ S_{11} = \langle \psi_1^* \psi_1 \rangle = a^2, \quad S_{12} = \langle \psi_1^* \psi_2 \rangle = a^2 (\cos \zeta) e^{-i\phi} \qquad (134) $$ + +$$ S_{21} = \langle \psi_2^* \psi_1 \rangle = a^2 (\cos \zeta) e^{+i\phi}, \quad S_{22} = \langle \psi_2^* \psi_2 \rangle = a^2 \qquad (135) $$ + +The diagonal elements are the absolute values of $\psi_1$ and $\psi_2$ respectively. The angle $\phi$ could be different from the value of the phase-shift angle given in Equation (79), but this difference does not play any role in the reasoning. The off-diagonal elements could be smaller than the product of $\psi_1$ and $\psi_2$, if the two polarizations are not completely coherent. + +The angle $\zeta$ specifies the degree of coherency. If it is zero, the system is fully coherent, while the system is totally incoherent if $\zeta$ is $90^\circ$. This can therefore be called the "decoherence angle." + +While the most general form of the transformation applicable to the Jones vector is G of Equation (66), the transformation applicable to the coherency matrix is + +$$ C' = G C G^{\dagger} \qquad (136) $$ + +The determinant of the coherency matrix is invariant under this transformation, and it is + +$$ \det(C) = a^4 (\sin \zeta)^2 \qquad (137) $$ + +Thus, angle $\zeta$ remains invariant. In the language of the Lorentz transformation applicable to the four-vector, the determinant is equivalent to the $(mass)^2$ and is therefore a Lorentz-invariant quantity. + +## 6.2. Two Radii of the Poincaré Sphere + +Let us write explicitly the transformation of Equation (136) as + +$$ \begin{pmatrix} S'_{11} & S'_{12} \\ S'_{21} & S'_{22} \end{pmatrix} = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix} \begin{pmatrix} S_{11} & S_{12} \\ S_{21} & S_{22} \end{pmatrix} \begin{pmatrix} \alpha^* & \gamma^* \\ \beta^* & \delta^* \end{pmatrix} \qquad (138) $$ + +It is then possible to construct the following quantities, + +$$ S_0 = \frac{S_{11} + S_{22}}{2}, \quad S_3 = \frac{S_{11} - S_{22}}{2} \qquad (139) $$ + +$$ S_1 = \frac{S_{12} + S_{21}}{2}, \quad S_2 = \frac{S_{12} - S_{21}}{2i} \qquad (140) $$ + +These are known as the Stokes parameters, and constitute a four-vector ($S_0, S_3, S_1, S_2$) under the Lorentz transformation. + +In the Jones vector of Equation (128), the amplitudes of the two orthogonal components are equal. Thus, the two diagonal elements of the coherency matrix are equal. This leads to $S_3 = 0$, and the +---PAGE_BREAK--- + +problem is reduced from the sphere to a circle. In the resulting two-dimensional subspace, we can +introduce the polar coordinate system with + +$$ +\begin{align} +R &= \sqrt{S_1^2 + S_2^2} \tag{141} \\ +S_1 &= R \cos \phi \tag{142} \\ +S_2 &= R \sin \phi \tag{143} +\end{align} +$$ + +The radius $R$ is the radius of this circle, and is + +$$ +R = a^2 \cos \zeta \quad (144) +$$ + +The radius $R$ takes its maximum value $S_0$ when $\zeta = 0^\circ$. It decreases as $\zeta$ increases and vanishes when $\zeta = 90^\circ$. This aspect of the radius $R$ is illustrated in Figure 3. + +**Figure 3.** Radius of the Poincaré sphere. The radius $R$ takes its maximum value $S_0$ when the decoherence angle $\zeta$ is zero. It becomes smaller as $\zeta$ increases. It becomes zero when the angle reaches 90°. + +In order to see its implications in special relativity, let us go back to the four-momentum matrix of $m(1,0,0,0)$. Its determinant is $m^2$ and remains invariant. Likewise, the determinant of the coherency matrix of Equation (132) should also remain invariant. The determinant in this case is + +$$ +S_0^2 - R^2 = a^4 \sin^2 \zeta \quad (145) +$$ + +This quantity remains invariant under the Hermitian transformation of Equation (138), which is a Lorentz transformation as discussed in Sections 3.2 and 6. This aspect is shown on the last row of Table 7. + +The coherency matrix then becomes + +$$ +C = a^2 \begin{pmatrix} 1 & (\cos \xi)e^{-i\phi} \\ (\cos \xi)e^{i\phi} & 1 \end{pmatrix} \qquad (146) +$$ +---PAGE_BREAK--- + +Since the angle $\phi$ does not play any essential role, we can let $\phi = 0$, and write the coherency matrix as + +$$ C = a^2 \begin{pmatrix} 1 & \cos \xi \\ \cos \xi & 1 \end{pmatrix} \qquad (147) $$ + +The determinant of the above two-by-two matrix is + +$$ a^4 (1 - \cos^2 \xi) = a^4 \sin^2 \xi \qquad (148) $$ + +Since the Lorentz transformation leaves the determinant invariant, the change in this $\xi$ variable is not a Lorentz transformation. It is of course possible to construct a larger group in which this variable plays a role in a group transformation [6], but here we are more interested in its role in a particle gaining a mass from zero or the mass becoming zero. + +### 6.3. Extra-Lorentzian Symmetry + +The coherency matrix of Equation (146) can be diagonalized to + +$$ a^2 \begin{pmatrix} 1 + \cos \xi & 0 \\ 0 & 1 - \cos \xi \end{pmatrix} \qquad (149) $$ + +by a rotation. Let us then go back to the four-momentum matrix of Equation (70). If $p_x = p_y = 0$, and $p_z = p_0 \cos \xi$, we can write this matrix as + +$$ p_0 \begin{pmatrix} 1 + \cos \xi & 0 \\ 0 & 1 - \cos \xi \end{pmatrix} \qquad (150) $$ + +Thus, with this extra variable, it is possible to study the little groups for variable masses, including the small-mass limit and the zero-mass case. + +For a fixed value of $p_0$, the $(mass)^2$ becomes + +$$ (mass)^2 = (p_0 \sin \xi)^2, \quad \text{and} \quad (momentum)^2 = (p_0 \cos \xi)^2 \qquad (151) $$ + +resulting in + +$$ (energy)^2 = (mass)^2 + (momentum)^2 \qquad (152) $$ + +This transition is illustrated in Figure 4. We are interested in reaching a point on the light cone from mass hyperbola while keeping the energy fixed. According to this figure, we do not have to make an excursion to infinite-momentum limit. If the energy is fixed during this process, Equation (152) tells the mass and momentum relation, and Figure 5 illustrates this relation. +---PAGE_BREAK--- + +Figure 4. Transition from the massive to massless case. (a) Transition within the framework of the Lorentz group; (b) TransITION allowed in the symmetry of the Poincaré sphere. Within the framework of the Lorentz group, it is not possible to go from the massive to massless case directly, because it requires the change in the mass which is a Lorentz-invariant quantity. The only way is to move to infinite momentum and jump from the hyperbola to the light cone, and come back. The extra symmetry of the Poincaré sphere allows a direct transition + +Figure 5. Energy-momentum-mass relation. This circle illustrates the case where the energy is fixed, while the mass and momentum are related according to the triangular rule. The value of the angle ξ changes from zero to 180°. The particle mass is negative for negative values of this angle. However, in the Lorentz group, only (mass)$^2$ is a relevant variable, and negative masses might play a role for theoretical purposes. + +Within the framework of the Lorentz group, it is possible, by making an excursion to infinite momentum where the mass hyperbola coincides with the light cone, to then come back to the desired point. On the other hand, the mass formula of Equation (151) allows us to go there directly. The decoherence mechanism of the coherency matrix makes this possible. +---PAGE_BREAK--- + +## 7. Small-Mass and Massless Particles + +We now have a mathematical tool to reduce the mass of a massive particle from its positive value to zero. During this process, the Lorentz-boosted rotation matrix becomes a gauge transformation for the spin-1 particle, as discussed Section 5.2. For spin-1/2 particles, there are two issues. + +1. It was seen in Section 5.2 that the requirement of gauge invariance lead to a polarization of massless spin-1/2 particle, such as neutrinos. What happens to anti-neutrinos? + +2. There are strong experimental indications that neutrinos have a small mass. What happens to the $E(2)$ symmetry? + +### 7.1. Spin-1/2 Particles + +Let us go back to the two-by-two matrices of Section 5.4, and the two-by-two $D$ matrix. For a massive particle, its Wigner decomposition leads to + +$$ D = \begin{pmatrix} \cos(\theta/2) & -e^{-\eta} \sin(\theta/2) \\ e^{\eta} \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \qquad (153) $$ + +This matrix is applicable to the spinors $u$ and $v$ defined in Equation (101) respectively for the spin-up and spin-down states along the $z$ direction. + +Since the Lie algebra of $SL(2,c)$ is invariant under the sign change of the $K_i$ matrices, we can consider the “dotted” representation, where the system is boosted in the opposite direction, while the direction of rotations remain the same. Thus, the Wigner decomposition leads to + +$$ \dot{D} = \begin{pmatrix} \cos(\theta/2) & -e^{\eta} \sin(\theta/2) \\ e^{-\eta} \sin(\theta/2) & \cos(\theta/2) \end{pmatrix} \qquad (154) $$ + +with its spinors + +$$ \dot{u} = \begin{pmatrix} 1 \\ 0 \end{pmatrix}, \quad \text{and} \quad \dot{v} = \begin{pmatrix} 0 \\ 1 \end{pmatrix} \qquad (155) $$ + +For anti-neutrinos, the helicity is reversed but the momentum is unchanged. Thus, $D^\dagger$ is the appropriate matrix. However, $D^\dagger = \tilde{D}^{-1}$ as was noted in Section 5.4. Thus, we shall use $\tilde{D}$ for anti-neutrinos. + +When the particle mass becomes very small, + +$$ e^{-\eta} = \frac{m}{2p} \qquad (156) $$ + +becomes small. Thus, if we let + +$$ e^{\eta} \sin(\theta/2) = \gamma, \quad \text{and} \quad e^{-\eta} \sin(\theta/2) = \epsilon^2 \qquad (157) $$ + +then the $D$ matrix of Equation (153) and the $\tilde{D}$ of Equation (154) become + +$$ \begin{pmatrix} 1 - \gamma\epsilon^2/2 & -\epsilon^2 \\ \gamma & 1 - \gamma\epsilon^2 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 - \gamma\epsilon^2/2 & -\gamma \\ \epsilon^2 & 1 - \gamma\epsilon^2 \end{pmatrix} \qquad (158) $$ + +respectively where $\gamma$ is an independent parameter and + +$$ \epsilon^2 = \gamma \left( \frac{m}{2p} \right)^2 \qquad (159) $$ +---PAGE_BREAK--- + +When the particle mass becomes zero, they become + +$$ \begin{pmatrix} 1 & 0 \\ \gamma & 1 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix} \tag{160} $$ + +respectively, applicable to the spinors $(u, v)$ and $(\tilde{u}, \tilde{v})$ respectively. + +For neutrinos, + +$$ \begin{pmatrix} 1 & 0 \\ \gamma & 1 \end{pmatrix} \begin{pmatrix} 1 \\ 0 \end{pmatrix} = \begin{pmatrix} 1 \\ \gamma \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 & 0 \\ \gamma & 1 \end{pmatrix} \begin{pmatrix} 0 \\ 1 \end{pmatrix} = \begin{pmatrix} 0 \\ 1 \end{pmatrix} \tag{161} $$ + +For anti-neutrinos, + +$$ \begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix} \begin{pmatrix} 1 \\ 0 \end{pmatrix} = \begin{pmatrix} 1 \\ 0 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix} \begin{pmatrix} 0 \\ 1 \end{pmatrix} = \begin{pmatrix} -\gamma \\ 1 \end{pmatrix} \tag{162} $$ + +It was noted in Section 5.2 that the triangular matrices of Equation (160) perform gauge transformations. Thus, for Equations (161) and (162) the requirement of gauge invariance leads to the polarization of neutrinos. The neutrinos are left-handed while the anti-neutrinos are right-handed. Since, however, nature cannot tell the difference between the dotted and undotted representations, the Lorentz group cannot tell which neutrino is right handed. It can say only that the neutrinos and anti-neutrinos are oppositely polarized. + +If the neutrino has a small mass, the gauge invariance is modified to + +$$ \begin{pmatrix} 1 - \gamma\epsilon^{2/2} & -\epsilon^2 \\ \gamma & 1 - \gamma\epsilon^{2/2} \end{pmatrix} \begin{pmatrix} 0 \\ 1 \end{pmatrix} = \begin{pmatrix} 0 \\ 1 \end{pmatrix} - \epsilon^2 \begin{pmatrix} 1 \\ \gamma/2 \end{pmatrix} \tag{163} $$ + +and + +$$ \begin{pmatrix} 1 - \gamma\epsilon^2/2 & -\gamma \\ \epsilon^2 & 1 - \gamma\epsilon^2 \end{pmatrix} \begin{pmatrix} 1 \\ 0 \end{pmatrix} = \begin{pmatrix} 1 \\ 0 \end{pmatrix} + \epsilon^2 \begin{pmatrix} -\gamma/2 \\ 1 \end{pmatrix} \tag{164} $$ + +respectively for neutrinos and anti-neutrinos. Thus the violation of the gauge invariance in both cases is proportional to $\epsilon^2$ which is $m^2/4p^2$. + +## 7.2. Small-Mass Neutrinos in the Real World + +Whether neutrinos have mass or not and the consequences of this relative to the Standard Model and lepton number is the subject of much theoretical speculation [24,25], and of cosmology [26], nuclear reactors [27], and high energy experimentations [28,29]. Neutrinos are fast becoming an important component of the search for dark matter and dark radiation [30]. Their importance within the Standard Model is reflected by the fact that they are the only particles which seem to exist with only one direction of chirality, i.e., only left-handed neutrinos have been confirmed to exist so far. + +It was speculated some time ago that neutrinos in constant electric and magnetic fields would acquire a small mass, and that right-handed neutrinos would be trapped within the interaction field [31]. Solving generalized electroweak models using left- and right-handed neutrinos has been discussed recently [32]. Today these right-handed neutrinos which do not participate in weak interactions are called “sterile” neutrinos [33]. A comprehensive discussion of the place of neutrinos in the scheme of physics has been given by Drewes [30]. We should note also that the three different neutrinos, namely $ν_e$, $ν_μ$, and $ν_τ$, may have different masses [34]. +---PAGE_BREAK--- + +**8. Scalars, Four-Vectors, and Four-Tensors** + +In Sections 5 and 7, our primary interest has been the two-by-two matrices applicable to spinors for spin-1/2 particles. Since we also used four-by-four matrices, we indirectly studied the four-component particle consisting of spin-1 and spin-zero components. + +If there are two spin 1/2 states, we are accustomed to construct one spin-zero state, and one spin-one state with three degeneracies. + +In this paper, we are confronted with two spinors, but each spinor can also be dotted. For this reason, there are 16 orthogonal states consisting of spin-one and spin-zero states. How many spin-zero states? How many spin-one states? + +For particles at rest, it is known that the addition of two one-half spins result in spin-zero and spin-one states. In this paper, we have two different spinors behaving differently under the Lorentz boost. Around the z direction, both spinors are transformed by + +$$Z(\phi) = \exp(-i\phi J_3) = \begin{pmatrix} e^{-i\phi/2} & 0 \\ 0 & e^{i\phi/2} \end{pmatrix} \quad (165)$$ + +However, they are boosted by + +$$B(\eta) = \exp(-i\eta K_3) = \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix} \quad (166)$$ + +$$\dot{B}(\eta) = \exp(i\eta K_3) = \begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix} \quad (167)$$ + +applicable to the undotted and dotted spinors respectively. These two matrices commute with each other, and also with the rotation matrix Z(φ) of Equation (165). Since K₃ and J₃ commute with each other, we can work with the matrix Q(η, φ) defined as + +$$Q(\eta, \phi) = B(\eta)Z(\phi) = \begin{pmatrix} e^{(\eta-i\phi)/2} & 0 \\ 0 & e^{-(\eta-i\phi)/2} \end{pmatrix} \quad (168)$$ + +$$\dot{Q}(\eta, \phi) = \dot{B}(\eta)\dot{Z}(\phi) = \begin{pmatrix} e^{-(\eta+i\phi)/2} & 0 \\ 0 & e^{(\eta+i\phi)/2} \end{pmatrix} \quad (169)$$ + +When this combined matrix is applied to the spinors, + +$$Q(\eta, \phi)u = e^{(\eta-i\phi)/2}u, \quad Q(\eta, \phi)v = e^{-(\eta-i\phi)/2}v \quad (170)$$ + +$$\dot{Q}(\eta, \phi)\dot{u} = e^{-(\eta+i\phi)/2}\dot{u}, \quad \dot{Q}(\eta, \phi)\dot{v} = e^{(\eta+i\phi)/2}\dot{v} \quad (171)$$ + +If the particle is at rest, we can construct the combinations + +$$uu, \quad \frac{1}{\sqrt{2}}(uv + vu), \quad vv \quad (172)$$ + +to construct the spin-1 state, and + +$$\frac{1}{\sqrt{2}}(uv - vu) \qquad (173)$$ + +for the spin-zero state. There are four bilinear states. In the SL(2, c) regime, there are two dotted spinors. If we include both dotted and undotted spinors, there are 16 independent bilinear combinations. They are given in Table 8. This table also gives the effect of the operation of Q(η, φ). +---PAGE_BREAK--- + +**Table 8.** Sixteen combinations of the SL(2, c) spinors. In the SU(2) regime, there are two spinors leading to four bilinear forms. In the SL(2, c) world, there are two undotted and two dotted spinors. These four spinors lead to 16 independent bilinear combinations. + +
Spin 1Spin 0
uu, 1√2(uv + vu), vv,1√2(uv − vu)
úú, 1√2(úv + vú), vúv,1√2(úv − vú)
uú, 1√2(uø + vú), vúv,1√2(uø − vú)
úú, 1√2(úv + vú), vúv,1√2(úv − vú)
+ +After the Operation of Q(η, φ) and $\tilde{Q}(\eta, \phi)$ + +$$ +\begin{aligned} +e^{-i\phi} e^{\eta} u u, & \quad \frac{1}{\sqrt{2}} (uv + vu), \quad e^{i\phi} e^{-\eta} v v, \quad \frac{1}{\sqrt{2}} (uv - vu) \\ +e^{-i\phi} e^{-\eta} u \dot{u}, & \quad \frac{1}{\sqrt{2}} (\dot{u}v + \dot{v}\dot{u}), \quad e^{i\phi} e^{\eta} \dot{v} \dot{v}, \quad \frac{1}{\sqrt{2}} (\dot{u}\dot{v} - \dot{v}\dot{u}) \\ +e^{-i\phi} u \dot{u}, & \quad \frac{1}{\sqrt{2}} (e^{\eta} u \dot{v} + e^{-\eta} v \dot{u}), \quad e^{i\phi} v \dot{v}, \quad \frac{1}{\sqrt{2}} (e^{\eta} u \dot{v} - e^{-\eta} v \dot{u}) \\ +e^{-i\phi} \dot{u} u, & \quad \frac{1}{\sqrt{2}} (\dot{u}v + \dot{v}u), \quad e^{i\phi} \dot{v} v, \quad \frac{1}{\sqrt{2}} (e^{-\eta} \dot{u} v - e^{\eta} \dot{v} u) +\end{aligned} +$$ + +Among the bilinear combinations given in Table 8, the following two are invariant under rotations and also under boosts. + +$$S = \frac{1}{\sqrt{2}}(uv - vu), \quad \text{and} \quad S = -\frac{1}{\sqrt{2}}(\dot{u}\dot{v} - \dot{v}\dot{u}) \qquad (174)$$ + +They are thus scalars in the Lorentz-covariant world. Are they the same or different? Let us consider the following combinations + +$$S_+ = \frac{1}{\sqrt{2}} (S + S'), \quad \text{and} \quad S_- = \frac{1}{\sqrt{2}} (S - S') \qquad (175)$$ + +Under the dot conjugation, $S_+$ remains invariant, but $S_-$ changes its sign. + +Under the dot conjugation, the boost is performed in the opposite direction. Therefore it is the operation of space inversion, and $S_+$ is a scalar while $S_-$ is called the pseudo-scalar. + +## 8.1. Four-Vectors + +Let us consider the bilinear products of one dotted and one undotted spinor as $u\dot{u}$, $u\dot{v}$, $\dot{u}v$, $v\dot{v}$, and construct the matrix + +$$U = \begin{pmatrix} u\dot{v} & v\dot{v} \\ u\dot{u} & v\dot{u} \end{pmatrix} \qquad (176)$$ + +Under the rotation $Z(\phi)$ and the boost $B(\eta)$ they become + +$$ +\begin{pmatrix} +e^{\eta} u \dot{v} & e^{-i\phi} v \dot{v} \\ +e^{i\phi} u \dot{u} & e^{-\eta} v \dot{u} +\end{pmatrix} +\qquad +(177) +$$ + +Indeed, this matrix is consistent with the transformation properties given in Table 8, and transforms like the four-vector + +$$ +\begin{pmatrix} +t+z & x-iy \\ +x+iy & t-z +\end{pmatrix} +\qquad +(178) +$$ + +This form was given in Equation (65), and played the central role throughout this paper. Under the space inversion, this matrix becomes + +$$ +\begin{pmatrix} +t-z & -(x-iy) \\ +-(x+iy) & t+z +\end{pmatrix} +\qquad +(179) +$$ +---PAGE_BREAK--- + +This space inversion is known as the parity operation. + +The form of Equation (176) for a particle or field with four-components, is given by $(V_0, V_z, V_x, V_y)$. The two-by-two form of this four-vector is + +$$ U = \begin{pmatrix} V_0 + V_z & V_x - iV_y \\ V_x + iV_y & V_0 - V_z \end{pmatrix} \qquad (180) $$ + +If boosted along the z direction, this matrix becomes + +$$ \begin{pmatrix} e^{\eta} (V_0 + V_z) & V_x - iV_y \\ V_x + iV_y & e^{-\eta} (V_0 - V_z) \end{pmatrix} \qquad (181) $$ + +In the mass-zero limit, the four-vector matrix of Equation (181) becomes + +$$ \begin{pmatrix} 2A_0 & A_x - iA_y \\ A_x + iA_y & 0 \end{pmatrix} \qquad (182) $$ + +with the Lorentz condition $A_0 = A_z$. The gauge transformation applicable to the photon four-vector was discussed in detail in Section 5.2. + +Let us go back to the matrix of Equation (180), we can construct another matrix $\dot{U}$. Since the dot conjugation leads to the space inversion, + +$$ \dot{U} = \begin{pmatrix} \dot{u}\nu & \dot{\nu}\nu \\ \dot{u}u & \dot{\nu}u \end{pmatrix} \qquad (183) $$ + +Then + +$$ \dot{u}\nu \approx (t-z), \qquad \dot{\nu}u \approx (t+z) \qquad (184) $$ + +$$ \dot{\nu}\nu \approx -(x-iy), \quad \dot{u}u \approx -(x+iy) \qquad (185) $$ + +where the symbol $\simeq$ means “transforms like”. + +Thus, $U$ of Equation (176) and $\dot{U}$ of Equation (183) used up 8 of the 16 bilinear forms. Since there are two bilinear forms in the scalar and pseudo-scalar as given in Equation (175), we have to give interpretations to the six remaining bilinear forms. + +## 8.2. Second-Rank Tensor + +In this subsection, we are studying bilinear forms with both spinors dotted and undotted. In Section 8.1, each bilinear spinor consisted of one dotted and one undotted spinor. There are also bilinear spinors which are both dotted or both undotted. We are interested in two sets of three quantities satisfying the $O(3)$ symmetry. They should therefore transform like + +$$ (\overline{x+iy})/\sqrt{2}, \quad (\overline{x-iy})/\sqrt{2}, \quad z \qquad (186) $$ + +which are like + +$$ uu, \quad vv, \quad (\overline{uv} + \overline{vu})/\sqrt{2} \qquad (187) $$ + +respectively in the $O(3)$ regime. Since the dot conjugation is the parity operation, they are like + +$$ -\dot{u}\dot{u}, \quad -\dot{\nu}\dot{\nu}, \quad -(\overline{\dot{u}\dot{\nu}} + \overline{\dot{\nu}\dot{u}})/\sqrt{2} \qquad (188) $$ + +In other words, + +$$ (\overline{uu}) = -\dot{u}\dot{u}, \quad \text{and} \quad (\overline{vv}) = -\dot{\nu}\dot{\nu} \qquad (189) $$ +---PAGE_BREAK--- + +We noticed a similar sign change in Equation (184). + +In order to construct the z component in this O(3) space, let us first consider + +$$f_z = \frac{1}{2} [(uv + vu) - (\dot{u}\dot{v} + \dot{v}\dot{u})], \quad g_z = \frac{1}{2i} [(uv + vu) + (\dot{u}\dot{v} + \dot{v}\dot{u})] \qquad (190)$$ + +where $f_z$ and $g_z$ are respectively symmetric and anti-symmetric under the dot conjugation or the parity operation. These quantities are invariant under the boost along the z direction. They are also invariant under rotations around this axis, but they are not invariant under boost along or rotations around the x or y axis. They are different from the scalars given in Equation (174). + +Next, in order to construct the x and y components, we start with $g_\pm$ as + +$$f_+ = \frac{1}{\sqrt{2}} (uu - \dot{u}\dot{u}) \qquad g_+ = \frac{1}{\sqrt{2}i} (uu + \dot{u}\dot{u}) \qquad (191)$$ + +$$f_- = \frac{1}{\sqrt{2}} (vv - \dot{v}\dot{v}) \qquad g_- = \frac{1}{\sqrt{2}i} (vv + \dot{v}\dot{v}) \qquad (192)$$ + +Then + +$$f_x = \frac{1}{\sqrt{2}} (f_+ + f_-) = \frac{1}{2} [(uu - \dot{u}\dot{u}) + (vv - \dot{v}\dot{v})] \qquad (193)$$ + +$$f_y = \frac{1}{\sqrt{2}i} (f_+ - f_-) = \frac{1}{2i} [-(vv - \dot{v}\dot{v})] \qquad (194)$$ + +and + +$$g_x = \frac{1}{\sqrt{2}} (g_+ + g_-) = \frac{1}{2i} [(uu + \dot{u}\dot{u}) + (vv + \dot{v}\dot{v})] \qquad (195)$$ + +$$g_y = \frac{1}{\sqrt{2}i} (g_+ - g_-) = -\frac{1}{2} [(uu + \dot{u}\dot{u}) - (vv + \dot{v}\dot{v})] \qquad (196)$$ + +Here $f_x$ and $f_y$ are symmetric under dot conjugation, while $g_x$ and $g_y$ are anti-symmetric. + +Furthermore, $f_z$, $f_x$, and $f_y$ of Equations (190) and (193) transform like a three-dimensional vector. The same can be said for $g_i$ of Equations (190) and (195). Thus, they can be grouped into the second-rank tensor + +$$T = \begin{pmatrix} +0 & -g_z & -g_x & -g_y \\ +g_z & 0 & -f_y & f_x \\ +g_x & f_y & 0 & -f_z \\ +g_y & -f_x & f_z & 0 +\end{pmatrix} \qquad (197)$$ + +whose Lorentz-transformation properties are well known. The $g_i$ components change their signs under space inversion, while the $f_i$ components remain invariant. They are like the electric and magnetic fields respectively. + +If the system is Lorentz-booted, $f_i$ and $g_i$ can be computed from Table 8. We are now interested in the symmetry of photons by taking the massless limit. According to the procedure developed in Section 6, we can keep only the terms which become larger for larger values of $\eta$. Thus, + +$$f_x \rightarrow \frac{1}{2}(uu - \dot{u}\dot{v}), \qquad f_y \rightarrow \frac{1}{2i}(uu + \dot{u}\dot{v}) \qquad (198)$$ + +$$g_x \rightarrow \frac{1}{2i}(uu + \dot{u}\dot{v}), \qquad g_y \rightarrow -\frac{1}{2}(uu - \dot{u}\dot{v}) \qquad (199)$$ + +in the massless limit. +---PAGE_BREAK--- + +Then the tensor of Equation (197) becomes + +$$F = \begin{pmatrix} 0 & 0 & -E_x & -E_y \\ 0 & 0 & -B_y & B_x \\ E_x & B_y & 0 & 0 \\ E_y & -B_x & 0 & 0 \end{pmatrix} \qquad (200)$$ + +with + +$$B_x \approx \frac{1}{2}(uu - \bar{u}\bar{v}), \quad B_y \approx \frac{1}{2i}(uu + \bar{u}\bar{v}) \qquad (201)$$ + +$$E_x = \frac{1}{2i}(uu + \bar{u}\bar{v}), \quad E_y = -\frac{1}{2}(uu - \bar{u}\bar{v}) \qquad (202)$$ + +The electric and magnetic field components are perpendicular to each other. Furthermore, + +$$E_x = B_y, \quad E_y = -B_x \qquad (203)$$ + +In order to address this question, let us go back to Equation (191). In the massless limit, + +$$B_+ \approx E_+ \approx uu, \quad B_- \approx E_- \approx \bar{u}\bar{v} \qquad (204)$$ + +The gauge transformation applicable to $u$ and $\bar{v}$ are the two-by-two matrices + +$$\begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 & 0 \\ -\gamma & 1 \end{pmatrix} \qquad (205)$$ + +respectively as noted in Sections 5.2 and 7.1. Both $u$ and $\bar{v}$ are invariant under gauge transformations, while $i\dot{u}$ and $\bar{v}$ do not. + +The $B_+$ and $E_+$ are for the photon spin along the $z$ direction, while $B_-$ and $E_-$ are for the opposite direction. In 1964 [35], Weinberg constructed gauge-invariant state vectors for massless particles starting from Wigner’s 1939 paper [1]. The bilinear spinors $uu$ and $\bar{u}\bar{v}$ correspond to Weinberg’s state vectors. + +### 8.3. Possible Symmetry of the Higgs Mechanism + +In this section, we discussed how the two-by-two formalism of the group $SL(2,c)$ leads the scalar, four-vector, and tensor representations of the Lorentz group. We discussed in detail how the four-vector for a massive particle can be decomposed into the symmetry of a two-component massless particle and one gauge degree of freedom. This aspect was studied in detail by Kim and Wigner [20,21], and their results are illustrated in Figure 6. This decomposition is known in the literature as the group contraction. + +The four-dimensional Lorentz group can be contracted to the Euclidean and cylindrical groups. These contraction processes could transform a four-component massive vector meson into a massless spin-one particle with two spin components, and one gauge degree of freedom. + +Since this contraction procedure is spelled out detail in [21], as well as in the present paper, its reverse process is also well understood. We start with one two-component massless particle with one gauge degree of freedom, and end up with a massive vector meson with its four components. + +The mathematics of this process is not unlike the Higgs mechanism [36,37], where one massless field with two degrees of freedom absorbs one gauge degree freedom to become a quartet of bosons, namely that of $W, Z^\pm$ plus the Higgs boson. As is well known, this mechanism is the basis for the theory of electro-weak interaction formulated by Weinberg and Salam [38,39]. +---PAGE_BREAK--- + +**Figure 6.** Contractions of the three-dimensional rotation group. (a) Contraction in terms of the tangential plane and the tangential cylinder [20]; (b) Contraction in terms of the expansion and contraction of the longitudinal axis [21]. In both cases, the symmetry ends up with one rotation around the longitudinal direction and one translational degree along the longitudinal axis. The rotation and translation corresponds to the helicity and gauge degrees of freedom. + +The word "spontaneous symmetry breaking" is used for the Higgs mechanism. It could be an interesting problem to see that this symmetry breaking for the two Higgs doublet model can be formulated in terms of the Lorentz group and its contractions. In this connection, we note an interesting recent paper by Dée and Ivanov [40]. + +# 9. Conclusions + +The damped harmonic oscillator, Wigner's little groups, and the Poincaré sphere belong to the three different branches of physics. In this paper, it was noted that they are based on the same mathematical framework, namely the algebra of two-by-two matrices. + +The second-order differential equation for damped harmonic oscillators can be formulated in terms of two-by-two matrices. These matrices produce the algebra of the group $Sp(2)$. While there are three trace classes of the two-by-two matrices of this group, the damped oscillator tells us how to make transitions from one class to another. + +It is shown that Wigner's three little groups can be defined in terms of the trace classes of the $Sp(2)$ group. If the trace is smaller than two, the little group is for massive particles. If greater than two, the little group is for imaginary-mass particles. If the trace is equal to two, the little group is for massless particles. Thus, the damped harmonic oscillator provides a procedure for transition from one little group to another. + +The Poincaré sphere contains the symmetry of the six-parameter $SL(2, c)$ group. Thus, the sphere provides the procedure for extending the symmetry of the little group defined within the Lorentz group of three-dimensional Minkowski space to its full Lorentz group in the four-dimensional space-time. In addition, the Poincaré sphere offers the variable which allows us to change the symmetry of a massive particle to that of a massless particle by continuously decreasing the mass. + +In this paper, we extracted the mathematical properties of Wigner's little groups from the damped harmonic oscillator and the Poincaré sphere. In so doing, we have shown that the transition from one little group to another is tangentially continuous. + +This subject was initiated by İnönü and Wigner in 1953 as the group contraction [41]. In their paper, they discussed the contraction of the three-dimensional rotation group becoming contracted to the two-dimensional Euclidean group with one rotational and two translational degrees of freedom. While the $O(3)$ rotation group can be illustrated by a three-dimensional sphere, the plane tangential at +---PAGE_BREAK--- + +the north pole is for the $E(2)$ Euclidean group. However, we can also consider a cylinder tangential at the equatorial belt. The resulting cylindrical group is isomorphic to the Euclidean group [20]. While the rotational degree of freedom of this cylinder is for the photon spin, the up and down translations on the surface of the cylinder correspond to the gauge degree of freedom of the photon, as illustrated in Figure 6. + +It was noted also that the Bargmann decomposition of two-by-two matrices, as illustrated in Figure 1 and Figure 2, allows us to study more detailed properties of the little groups, including space and time reflection reflection properties. Also in this paper, we have discussed how the scalars, four-vectors, and four-tensors can be constructed from the two-by-two representation in the Lorentz-covariant world. + +In addition, it should be noted that the symmetry of the Lorentz group is also contained in the squeezed state of light [14] and the ABCD matrix for optical beam transfers [18]. We also mentioned the possibility of understanding the mathematics of the Higgs mechanism in terms of the Lorentz group and its contractions. + +## Acknowledgements + +In his 1939 paper [1], Wigner worked out the subgroups of the Lorentz group whose transformations leave the four momentum of a given particle invariant. In so doing, he worked out their internal space-time symmetries. In spite of its importance, this paper remains as one of the most difficult papers to understand. Wigner was eager to make his paper understandable to younger physicists. + +While he was the pioneer in introducing the mathematics of group theory to physics, he was also quite fond of using two-by-two matrices to explain group theoretical ideas. He asked one of the present authors (Young S. Kim) to rewrite his 1939 paper [1] using the language of those matrices. This is precisely what we did in the present paper. + +We are grateful to Eugene Paul Wigner for this valuable suggestion. + +## Author Contributions + +This paper is largely based on the earlier papers by Young S. Kim and Marilyn E. Noz, and those by Sibel Başkal and Young S. Kim. The two-by-two formulation of the damped oscillator in Section 2 was jointly developed by Sibel Başkal and Young S. Kim during the summer of 2012. Marilyn E. Noz developed the idea of the symmetry of small-mass neutrinos in Section 7. The limiting process in the symmetry of the Poincaré sphere was formulated by Young S. Kim. Sibel Başkal initially constructed the four-by-four tensor representation in Section 8. + +The initial organization of this paper was conceived by Young S. Kim in his attempt to follow Wigner's suggestion to translate his 1939 paper into the language of two-by-two matrices. Sibel Başkal and Marilyn E. Noz tightened the organization and filled in the details. + +## Conflicts of Interest + +The authors declare no conflicts of interest. + +## References + +1. Wigner, E. On unitary representations of the inhomogeneous Lorentz Group. *Ann. Math.* **1939**, *40*, 149–204. +2. Han, D.; Kim, Y.S.; Son, D. Eulerian parametrization of Wigner little groups and gauge transformations in terms of rotations in 2-component spinors. *J. Math. Phys.* **1986**, *27*, 2228–2235. +3. Born, M.; Wolf, E. *Principles of Optics*, 6th ed.; Pergamon: Oxford, UK, 1980. +---PAGE_BREAK--- + +4. Han, D.; Kim, Y.S.; Noz, M.E. Stokes parameters as a Minkowskian four-vector. Phys. Rev. E **1997**, 56, 6065-6076. + +5. Brosseau, C. *Fundamentals of Polarized Light: A Statistical Optics Approach*; John Wiley: New York, NY, USA, 1998. + +6. Başkal, S.; Kim, Y.S. De Sitter group as a symmetry for optical decoherence. J. Phys. A **2006**, 39, 7775-7788. + +7. Kim, Y.S.; Noz, M.E. Symmetries shared by the Poincaré Group and the Poincaré Sphere. *Symmetry* **2013**, *5*, 233–252. + +8. Han, D.; Kim, Y.S.; Son, D. E(2)-like little group for massless particles and polarization of neutrinos. Phys. Rev. D **1982**, *26*, 3717–3725. + +9. Han, D.; Kim, Y.S.; Son, D. Photons, neutrinos and gauge transformations. Am. J. Phys. **1986**, *54*, 818–821. + +10. Başkal, S.; Kim, Y.S. Little groups and Maxwell-type tensors for massive and massless particles. Europhys. Lett. **1997**, *40*, 375–380. + +11. Leggett, A.; Chakravarty, S.; Dorsey, A.; Fisher, M.; Garg, A.; Zwerger, W. Dynamics of the dissipative 2-state system. Rev. Mod. Phys. **1987**, *59*, 1–85. + +12. Başkal, S.; Kim, Y.S. One analytic form for four branches of the ABCD matrix. J. Mod. Opt. **2010**, *57*, 1251–1259. + +13. Başkal, S.; Kim, Y.S. Lens optics and the continuity problems of the ABCD matrix. J. Mod. Opt. **2014**, *61*, 161–166. + +14. Kim, Y.S.; Noz, M.E. *Theory and Applications of the Poincaré Group*; Reidel: Dordrecht, The Netherlands, 1986. + +15. Bargmann, V. Irreducible unitary representations of the Lorentz group. Ann. Math. **1947**, *48*, 568–640. + +16. Iwasawa, K. On some types of topological groups. Ann. Math. **1949**, *50*, 507–558. + +17. Guillemin, V.; Sternberg, S. *Symplectic Techniques in Physics*; Cambridge University Press: Cambridge, UK, 1984. + +18. Başkal, S.; Kim, Y.S. Lorentz Group in Ray and Polarization Optics. In *Mathematical Optics: Classical, Quantum and Computational Methods; Lakshminarayanan, V., Calvo, M.L., Alieva, T., Eds.*; CRC Taylor and Francis: New York, NY, USA, 2013; Chapter 9, pp. 303–340. + +19. Naimark, M.A. *Linear Representations of the Lorentz Group*; Pergamon: Oxford, UK, 1964. + +20. Kim, Y.S.; Wigner, E.P. Cylindrical group and massless particles. J. Math. Phys. **1987**, *28*, 1175-1179. + +21. Kim, Y.S.; Wigner, E.P. Space-time geometry of relativistic particles. J. Math. Phys. **1990**, *31*, 55-60. + +22. Georgieva, E.; Kim, Y.S. Iwasawa effects in multilayer optics. Phys. Rev. E **2001**, *64*, doi:10.1103/PhysRevE.64.026602. + +23. Saleh, B.E.A.; Teich, M.C. *Fundamentals of Photonics*, 2nd ed.; John Wiley: Hoboken, NJ, USA, 2007. + +24. Papoulias, D.K.; Kosmas, T.S. Exotic Lepton Flavour Violating Processes in the Presence of Nuclei. J. Phys.: Conf. Ser. **2013**, *410*, 012123:1-012123:5. + +25. Dinh, D.N.; Petcov, S.T.; Sasao, N.; Tanaka, M.; Yoshimura, M. Observables in neutrino mass spectroscopy using atoms. Phys. Lett. B **2013**, *719*, 154-163. + +26. Miramonti, L.; Antonelli, V. Advancements in Solar Neutrino physics. Int. J. Mod. Phys. E **2013**, *22*, 1-16. + +27. Li, Y.-F.; Cao, J.; Jun, Y.; Wang, Y.; Zhan, L. Unambiguous determination of the neutrino mass hierarchy using reactor neutrinos. Phys. Rev. D **2013**, *88*, 013008:1-013008:9. + +28. Bergstrom, J. Combining and comparing neutrinoless double beta decay experiments using different 584 nuclei. J. High Energy Phys. **2013**, *02*, 093:1-093:27. + +29. Han, T.; Lewis, I.; Ruiz, R.; Si, Z.-G. Lepton number violation and $W'$ chiral couplings at the LHC. Phys. Rev. D **2013**, *87*, 035011:1-035011:25. + +30. Drewes, M. The phenomenology of right handed neutrinos. Int. J. Mod. Phys. E **2013**, *22*, 1330019:1-1330019:75. + +31. Barut, A.O.; McEwan, J. The four states of the massless neutrino with pauli coupling by spin-gauge invariance. + Lett. Math. Phys. **1986**, *11*, 67–72. + +32. Palcu, A. Neutrino Mass as a consequence of the exact solution of 3-3-1 gauge models without exotic electric charges. + Mod. Phys. Lett. A **2006**, *21*, 1203–1217. + +33. Bilenky, S.M. Neutrino. + Phys. Part. Nucl. **2013**, *44*, 1–46. + +34. Alhendi, H. A.; Lashin, E. I.; Mudlej, A. A. Textures with two traceless submatrices of the neutrino mass matrix. + Phys. Rev. D **2008**, *77*, 013009:1-013009:1-13. + +35. Weinberg, S. Photons and gravitons in S-Matrix theory: Derivation of charge conservation and equality of gravitational and inertial mass. + Phys. Rev. **1964**, *135*, B1049-B1056. + +36. Higgs, P.W. Broken symmetries and the masses of gauge bosons. + Phys. Rev. Lett. **1964**, *13*, 508-509. + +Symmetry **2014**, *6*, 473–515 +---PAGE_BREAK--- + +37. Guralnik, G.S.; Hagen, C.R.; Kibble, T.W.B. Global conservation laws and massless particles. Phys. Rev. Lett. **1964**, *13*, 585–587. + +38. Weinberg, S. A model of leptons. Phys. Rev. Lett. **1967**, *19*, 1265–1266. + +39. Weinberg, S. *Quantum Theory of Fields, Volume II, Modern Applications*; Cambridge University Press: Cambridge, UK, 1996. + +40. Dée, A.; Ivanov, I.P. Higgs boson masses of the general two-Higgs-doublet model in the Minkowski-space formalism. Phys. Rev. D **2010**, *81*, 015012:1–015012:8. + +41. Inönü, E.; Wigner, E.P. On the contraction of groups and their representations. Proc. Natl. Acad. Sci. USA **1953**, *39*, 510–524. + +© 2014 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access +article distributed under the terms and conditions of the Creative Commons Attribution +(CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +Article + +Loop Representation of Wigner's Little Groups + +Sibel Başkal ¹, Young S. Kim ²,* and Marilyn E. Noz ³ + +¹ Department of Physics, Middle East Technical University, 06800 Ankara, Turkey; baskal@newton.physics.metu.edu.tr + +² Center for Fundamental Physics, University of Maryland College Park, Maryland, MD 20742, USA + +³ Department of Radiology, New York University, New York, NY 10016, USA; marilyn.noz@med.nyu.edu + +* Correspondence: yskim@umd.edu; Tel.: +1-301-937-6306 + +Academic Editor: Sergei D. Odintsov + +Received: 12 May 2017; Accepted: 15 June 2017; Published: 23 June 2017 + +**Abstract:** Wigner's little groups are the subgroups of the Lorentz group whose transformations leave the momentum of a given particle invariant. They thus define the internal space-time symmetries of relativistic particles. These symmetries take different mathematical forms for massive and for massless particles. However, it is shown possible to construct one unified representation using a graphical description. This graphical approach allows us to describe vividly parity, time reversal, and charge conjugation of the internal symmetry groups. As for the language of group theory, the two-by-two representation is used throughout the paper. While this two-by-two representation is for spin-1/2 particles, it is shown possible to construct the representations for spin-0 particles, spin-1 particles, as well as for higher-spin particles, for both massive and massless cases. It is shown also that the four-by-four Dirac matrices constitute a two-by-two representation of Wigner's little group. + +**Keywords:** Wigner's little groups; Lorentz group; unified picture of massive and massless particles; two-by-two representations; graphical approach to internal space-time symmetries + +PACS: 02.10.Yn; 02.20.Uw; 03.65.Fd + +# 1. Introduction + +In his 1939 paper [1], Wigner introduced subgroups of the Lorentz group whose transformations leave the momentum of a given particle invariant. These subgroups are called Wigner’s little groups in the literature and are known as the symmetry groups for internal space-time structure. + +For instance, a massive particle at rest can have spin that can be rotated in three-dimensional space. +The little group in this case is the three-dimensional rotation group. For a massless particle moving +along the z direction, Wigner noted that rotations around the z axis do not change the momentum. +In addition, he found two more degrees of freedom, which together with the rotation, constitute a +subgroup locally isomorphic to the two-dimensional Euclidean group. + +However, Wigner’s 1939 paper did not deal with the following critical issues. + +1. As for the massive particle, Wigner worked out his little group in the Lorentz frame where the particle is at rest with zero momentum, resulting in the three-dimensional rotation group. He could have Lorentz-boosted the O(3)-like little group to make the little group for a moving particle. + +2. While the little group for a massless particle is like *E*(2), it is not difficult to associate the rotational degree of freedom to the helicity. However, Wigner did not give physical interpretations to the two translation-like degrees of freedom. + +3. While the Lorentz group does not allow mass variations, particles with infinite momentum should behave like massless particles. The question is whether the Lorentz-boosted O(3)-like little group becomes the *E*(2)-like little group for particles with infinite momentum. +---PAGE_BREAK--- + +These issues have been properly addressed since then [2–5]. The translation-like degrees of freedom for massless particles collapse into one gauge degree of freedom, and the *E*(2)-like little group can be obtained as the infinite-momentum limit of the *O*(3)-like little group. This history is summarized in Figure 1. + +**Figure 1.** *O*(3)-like and *E*(2)-like internal space-time symmetries of massive and massless particles. The sphere corresponds to the *O*(3)-like little group for the massive particle. There is a plane tangential to the sphere at its north pole, which is *E*(2). There is also a cylinder tangent to the sphere at its equatorial belt. This cylinder gives one helicity and one gauge degree of freedom. This figure thus gives a unified picture of the little groups for massive and massless particles [5]. + +In this paper, we shall present these developments using a mathematical language more transparent than those used in earlier papers. + +1. In his original paper [1], Wigner worked out his little group for the massive particle when its momentum is zero. How about moving massive particles? In this paper, we start with a moving particle with non-zero momentum. We then perform rotations and boosts whose net effect does not change the momentum [6–8]. This procedure can be applied to the massive, massless, and imaginary-mass cases. + +2. By now, we have a clear understanding of the group SL(2, c) as the universal covering group of the Lorentz group. The logic with two-by-two matrices is far more transparent than the mathematics based on four-by-four matrices. We shall thus use the two-by-two representation of the Lorentz group throughout the paper [5,9–11]. + +The purpose of this paper is to make the physics contained in Wigner’s original paper more transparent. In Section 2, we give the six generators of the Lorentz group. It is possible to write them in terms of coordinate transformations, four-by-four matrices, and two-by-two matrices. In Section 3, we introduce Wigner’s little groups in terms of two-by-two matrices. In Section 4, it is shown possible to construct transformation matrices of the little group by performing rotations and a boost resulting in a non-trivial matrix, which leaves the given momentum invariant. + +Since we are more familiar with Dirac matrices than the Lorentz group, it is shown in Section 5 that Dirac matrices are a representation of the Lorentz group, and his four-by-four matrices are two-by-two +---PAGE_BREAK--- + +representations of the two-by-two representation of Wigner's little groups. In Section 6, we construct spin-0 and spin-1 particles for the SL(2,c) spinors. We also discuss massless higher spin particles. + +## 2. Lorentz Group and Its Representations + +The group of four-by-four matrices, which performs Lorentz transformations on the four-dimensional Minkowski space leaving invariant the quantity ($t^2 - z^2 - x^2 - y^2$), forms the starting point for the Lorentz group. As there are three rotation and three boost generators, the Lorentz group is a six-parameter group. + +Einstein, by observing that this Lorentz group also leaves invariant $(E, p_z, p_x, p_y)$, was able to derive his Lorentz-covariant energy-momentum relation commonly known as $E = mc^2$. Thus, the particle mass is a Lorentz-invariant quantity. + +The Lorentz group is generated by the three rotation operators: + +$$J_i = -i \left( x_j \frac{\partial}{\partial x_k} - x_k \frac{\partial}{\partial x_j} \right), \qquad (1)$$ + +where $i, j, k = 1, 2, 3$, and three boost operators: + +$$K_i = -i \left( t \frac{\partial}{\partial x_i} + x_i \frac{\partial}{\partial t} \right). \qquad (2)$$ + +These generators satisfy the closed set of commutation relations: + +$$[J_i, J_j] = i\epsilon_{ijk}J_k, \quad [J_i, K_j] = i\epsilon_{ijk}K_j, \quad [K_i, K_j] = -i\epsilon_{ijk}J_k, \qquad (3)$$ + +which are known as the Lie algebra for the Lorentz group. + +Under the space inversion, $x_i \rightarrow -x_i$, or the time reflection, $t \rightarrow -t$, the boost generators $K_i$ change sign. However, the Lie algebra remains invariant, which means that the commutation relations remain invariant under Hermitian conjugation. + +In terms of four-by-four matrices applicable to the Minkowskian coordinate of $(t,z,x,y)$, the generators can be written as: + +$$J_3 = \begin{pmatrix} 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & -i \\ 0 & 0 & i & 0 \end{pmatrix}, \quad K_3 = \begin{pmatrix} 0 & i & 0 & 0 \\ i & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \end{pmatrix}, \qquad (4)$$ + +for rotations around and boosts along the z direction, respectively. Similar expressions can be written for the x and y directions. We see here that the rotation generators $J_i$ are Hermitian, but the boost generators $K_i$ are anti-Hermitian. + +We can also consider the two-by-two matrices: + +$$J_i = \frac{1}{2}\sigma_i, \quad \text{and} \quad K_i = \frac{i}{2}\sigma_i, \qquad (5)$$ + +where $\sigma_i$ are the Pauli spin matrices. These matrices also satisfy the commutation relations given in Equation (3). + +There are interesting three-parameter subgroups of the Lorentz group. In 1939 [1], Wigner considered the subgroups whose transformations leave the four-momentum of a given particle invariant. First of all, consider a massive particle at rest. The momentum of this particle is invariant under rotations in three-dimensional space. What happens for the massless particle that cannot be brought to a rest frame? In this paper we shall consider this and other problems using the two-by-two representation of the Lorentz group. +---PAGE_BREAK--- + +### 3. Two-by-Two Representation of Wigner's Little Groups + +The six generators of Equation (5) lead to the group of two-by-two unimodular matrices of the form: + +$$ G = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix}, \qquad (6) $$ + +with $\det(G) = 1$, where the matrix elements are complex numbers. There are thus six independent real numbers to accommodate the six generators given in Equation (5). The groups of matrices of this form are called SL(2, c) in the literature. Since the generators $K_i$ are not Hermitian, the matrix G is not always unitary. Its Hermitian conjugate is not necessarily the inverse. + +The space-time four-vector can be written as [5,9,11]: + +$$ \begin{pmatrix} t+z & x-iy \\ x+iy & t-z \end{pmatrix}, \qquad (7) $$ + +whose determinant is $t^2 - z^2 - x^2 - z^2$, and remains invariant under the Hermitian transformation: + +$$ X' = G X G^{\dagger}. \qquad (8) $$ + +This is thus a Lorentz transformation. This transformation can be explicitly written as: + +$$ \begin{pmatrix} t'+z' & x'-iy' \\ x'+iy' & t'-z' \end{pmatrix} = \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix} \begin{pmatrix} t+z & x-iy \\ x+iy & t-z \end{pmatrix} \begin{pmatrix} \alpha^* & \gamma^* \\ \beta^* & \delta^* \end{pmatrix}. \qquad (9) $$ + +With these six independent real parameters, it is possible to construct four-by-four matrices for Lorentz transformations applicable to the four-dimensional Minkowskian space [5,12]. For the purpose of the present paper, we need some special cases, and they are given in Table 1. + +Table 1. Two-by-two and four-by-four representations of the Lorentz group. + +
GeneratorsTwo-by-TwoFour-by-Four
J3 = 12(0   0)
                                                                                                                                                                      (exp(iφ/2)
& 0
& exp(-iφ/2))
0
& 0
0
& 0
K3 = 12(i   0)
& 0
& -i)
0
& 0
0
& 0
J1 = 12(0 & 1)
& 1
& 0)
0
& 0
0
& 0
K1 = 12(0 & i)
& i
& 0)
0
& 0
0
& 0
J2 = 12(0 & -i)
& i
& 0)
0
& 0
0
& 0
K2 = 12(0 & -1)
& -1
& 0)
0
& 0
0
& 0
+ +$$ \begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix}, \qquad (6) $$ + +$$ X' = G X G^{\dagger}. \qquad (8) $$ + +$$ \begin{pmatrix} 1 & 0 & 0 & 0 \\ 0 & \cos\theta & 0 & 0 \\ 0 & 0 & \sin\theta & 0 \\ 0 & 0 & 0 & \cos\theta \end{pmatrix}. \qquad (9) $$ + +$$ \begin{pmatrix} \cosh\lambda & 0 & \sinh\lambda & 0 \\ 0 & 1 & 0 & 0 \\ \sinh\lambda & 0 & \cosh\lambda & 0 \\ 0 & 0 & 0 & 1 \end{pmatrix}. $$ + +$$ \begin{pmatrix} \cosh\lambda & 0 & \sinh\lambda & 0 \\ 0 & -\sin\theta & 0 & 0 \\ \sinh\lambda & 0 & \cosh\lambda & 0 \\ 0 & 0 & 0 & 1 \end{pmatrix}. $$ + +$$ \begin{pmatrix} \cosh\lambda & 0 & 0 & \sinh\lambda \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ \sinh\lambda & 0 & 0 & \cosh\lambda \end{pmatrix}. $$ +---PAGE_BREAK--- + +Likewise, the two-by-two matrix for the four-momentum takes the form: + +$$P = \begin{pmatrix} p_0 + p_z & p_x - ip_y \\ p_x + ip_y & p_0 - p_z \end{pmatrix}, \qquad (10)$$ + +with $p_0 = \sqrt{m^2 + p_z^2 + p_x^2 + p_2^2}$. The transformation property of Equation (9) is applicable also to this energy-momentum four-vector. + +In 1939 [1], Wigner considered the following three four-vectors. + +$$P_+ = \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix}, \quad P_0 = \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix}, \quad P_- = \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix}. \qquad (11)$$ + +whose determinants are 1, 0, and -1, respectively, corresponding to the four-momenta of massive, massless, and imaginary-mass particles, as shown in Table 2. + +Table 2. The Wigner momentum vectors in the two-by-two matrix representation together with the corresponding transformation matrix. These four-momentum matrices have determinants that are positive, zero, and negative for massive, massless, and imaginary-mass particles, respectively. + +
Particle MassFour-MomentumTransform Matrix
Massive(10 01)(cos(θ/2) − sin(θ/2))
Massless(10 00)(10 − γ-1)
Imaginary mass(10 0−1)(cosh(λ/2) sinh(λ/2))
+ +He then constructed the subgroups of the Lorentz group whose transformations leave these four-momenta invariant. These subgroups are called Wigner's little groups in the literature. Thus, the matrices of these little groups should satisfy: + +$$W P_i W^\dagger = P_i, \qquad (12)$$ + +where $i = +, 0, -$. + +Since the momentum of the particle is fixed, these little groups define the internal space-time symmetries of the particle. For all three cases, the momentum is invariant under rotations around the z axis, as can be seen from the expression given for the rotation matrix generated by $J_3$ given in Table 1. + +For the first case corresponding to a massive particle at rest, the requirement of the subgroup is: + +$$W P_+ W^\dagger = P_+. \qquad (13)$$ + +This requirement tells that the subgroup is the rotation subgroup with the rotation matrix around the y direction: + +$$R(\theta) = \begin{pmatrix} \cos(\theta/2) & -\sin(\theta/2) \\ \sin(\theta/2) & \cos(\theta/2) \end{pmatrix}. \qquad (14)$$ + +For the second case of $P_0$, the triangular matrix of the form: + +$$\Gamma(\xi) = \begin{pmatrix} 1 & -\xi \\ 0 & 1 \end{pmatrix}, \qquad (15)$$ +---PAGE_BREAK--- + +satisfies the Wigner condition of Equation (12). If we allow rotations around the z axis, the expression becomes: + +$$ \Gamma(\xi, \phi) = \begin{pmatrix} 1 & -\xi \exp(-i\phi) \\ 0 & 1 \end{pmatrix}. \quad (16) $$ + +This matrix is generated by: + +$$ N_1 = J_2 - K_1 = \begin{pmatrix} 0 & -i \\ 0 & 0 \end{pmatrix}, \quad \text{and} \quad N_2 = J_1 + K_2 = \begin{pmatrix} 0 & 1 \\ 0 & 0 \end{pmatrix}. \quad (17) $$ + +Thus, the little group is generated by $J_3$, $N_1$, and $N_2$. They satisfy the commutation relations: + +$$ [N_1, N_2] = 0, \quad [J_3, N_1] = iN_2, \quad [J_3, K_2] = -iN_1. \quad (18) $$ + +Wigner in 1939 [1] observed that this set is the same as that of the two-dimensional Euclidean group with one rotation and two translations. The physical interpretation of the rotation is easy to understand. It is the helicity of the massless particle. On the other hand, the physics of the $N_1$ and $N_2$ matrices has a stormy history, and the issue was not completely settled until 1990 [4]. They generate gauge transformations. + +For the third case of $P_-$, the matrix of the form: + +$$ S(\lambda) = \begin{pmatrix} \cosh(\lambda/2) & \sinh(\lambda/2) \\ \sinh(\lambda/2) & \cosh(\lambda/2) \end{pmatrix}, \quad (19) $$ + +satisfies the Wigner condition of Equation (12). This corresponds to the Lorentz boost along the x direction generated by $K_1$ as shown in Table 1. Because of the rotation symmetry around the z axis, the Wigner condition is satisfied also by the boost along the y axis. The little group is thus generated by $J_3$, $K_1$, and $K_2$. These three generators: + +$$ [J_3, K_1] = iK_2, \quad [J_3, K_2] = -iK_1, \quad [K_1, K_2] = -iJ_3 \quad (20) $$ + +form the little group $O(2, 1)$, which is the Lorentz group applicable to two space-like and one time-like dimensions. + +Of course, we can add rotations around the z axis. Let us Lorentz-boost these matrices along the z direction with the diagonal matrix: + +$$ B(\eta) = \begin{pmatrix} \exp(\eta/2) & 0 \\ 0 & \exp(-\eta/2) \end{pmatrix}. \quad (21) $$ + +Then, the matrices of Equations (14), (15), and (19) become: + +$$ B(\eta)R(\theta)B(-\eta) = \begin{pmatrix} \cos(\theta/2) & -e^{\eta} \sin(\theta/2) \\ e^{-\eta} \sin(\theta/2) & \cos(\theta/2) \end{pmatrix}, \quad (22) $$ + +$$ B(\eta)\Gamma(\xi)B(-\eta) = \begin{pmatrix} 1 & -e^{\eta}\xi \\ 0 & 1 \end{pmatrix}, \quad (23) $$ + +$$ B(\eta)S(-\lambda)B(-\eta) = \begin{pmatrix} \cosh(\lambda/2) & -e^{\eta} \sinh(\lambda/2) \\ -e^{-\eta} \sinh(\lambda/2) & \cosh(\lambda/2) \end{pmatrix}, \quad (24) $$ + +respectively. We have changed the sign of $\lambda$ for future convenience. +---PAGE_BREAK--- + +When $\eta$ becomes large, $\theta$, $\tilde{\epsilon}$, and $\lambda$ should become small if the upper-right elements of these three matrices are to remain finite. In that case, the diagonal elements become one, and all three matrices become like the triangular matrix: + +$$ \begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix}. \tag{25} $$ + +Here comes the question of whether the matrix of Equation (24) can be continued from Equation (22), via Equation (23). For this purpose, let us write Equation (22) as: + +$$ \begin{pmatrix} 1 - \frac{(\gamma\epsilon)^2}{2} & -\gamma \\ \gamma\epsilon^2 & 1 - \frac{(\gamma\epsilon)^2}{2} \end{pmatrix}, \tag{26} $$ + +for small $\theta = 2\gamma\epsilon$, with $\epsilon = e^{-\eta}$. For Equation (24), we can write: + +$$ \begin{pmatrix} 1 + \frac{(\gamma\epsilon)^2}{2} & -\gamma \\ -\gamma\epsilon^2 & 1 + \frac{(\gamma\epsilon)^2}{2} \end{pmatrix}, \tag{27} $$ + +with $\lambda = -2\gamma\epsilon$. Both of these expressions become the triangular matrix of Equation (25) when $\epsilon = 0$. For small values of $\epsilon$, the diagonal elements change from $\cos(\theta/2)$ to $\cosh(\lambda/2)$ while $\sin(\theta/2)$ becomes $-\sinh(\lambda/2)$. Thus, it is possible to continue from Equation (22) to Equation (24). The mathematical details of this process have been discussed in our earlier paper on this subject [13]. + +We are then led to the question of whether there is one expression that will take care of all three cases. We shall discuss this issue in Section 4. + +**4. Loop Representation of Wigner's Little Groups** + +It was noted in Section 3 that matrices of Wigner’s little group take different forms for massive, massless, and imaginary-mass particles. In this section, we construct one two-by-two matrix that works for all three different cases. + +In his original paper [1], Wigner constructs those matrices in specific Lorentz frames. For instance, for a moving massive particle with a non-zero momentum, Wigner brings it to the rest frame and works out the *O*(3) subgroup of the Lorentz group as the little group for this massive particle. In order to complete the little group, we should boost this *O*(3) to the frame with the original non-zero momentum [4]. + +In this section, we construct transformation matrices without changing the momentum. Let us assume that the momentum is along the z direction; the rotation around the z axis leaves the momentum invariant. According to the Euler decomposition, the rotation around the y axis, in addition, will accommodate rotations along all three directions. For this reason, it is enough to study what happens in transformations within the xz plane [14]. + +It was Kupersztych [6] who showed in 1976 that it is possible to construct a momentum-preserving transformation by a rotation followed by a boost as shown in Figure 2. In 1981 [7], Han and Kim showed that the boost can be decomposed into two components as illustrated in Figure 2. In 1988 [8], Han and Kim showed that the same purpose can be achieved by one boost preceded and followed by the same rotation matrix, as shown also in Figure 2. We choose to call this loop the “D loop” and write the transformation matrix as: + +$$ D(\alpha, \chi) = R(\alpha)S(-2\chi)R(\alpha). \tag{28} $$ +---PAGE_BREAK--- + +**Figure 2.** Evolution of the Wigner loop. In 1976 [6], Kupersztych considered a rotation followed by a boost whose net result will leave the momentum invariant. In 1981 [7], Han and Kim considered the same problem with simpler forms for boost matrices. In 1988, Han and Kim [8] constructed the Lorentz kinematics corresponding to the Bargmann decomposition [10] consisting of one boost matrix sandwiched by two rotation matrices. In the present case, the two rotation matrices are identical. + +The *D* matrix can now be written as three matrices. This form is known in the literature as the Bargmann decomposition [10]. This form gives additional convenience. When we take the inverse or the Hermitian conjugate, we have to reverse the order of matrices. However, this particular form does not require re-ordering. + +The *D* matrix of Equation (28) becomes: + +$$ D(\alpha, \chi) = \begin{pmatrix} (\cos \alpha) \cosh \chi & -\sinh \chi - (\sin \alpha) \cosh \chi \\ -\sinh \chi + (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi \end{pmatrix}. \quad (29) $$ + +If the diagonal element is smaller than one with $((\cos \alpha) \cosh \chi) < 1$, the off-diagonal elements have opposite signs. Thus, this *D* matrix can serve as the Wigner matrix of Equation (22) for massive particles. If the diagonal elements are one, one of the off-diagonal elements vanishes, and this matrix becomes triangular like Equation (23). If the diagonal elements are greater than one with $((\cos \alpha) \cosh \chi) > 1$, this matrix can become Equation (24). In this way, the matrix of Equation (28) can accommodate the three different expressions given in Equations (22)–(24). + +### 4.1. Continuity Problems + +Let us go back to the three separate formulas given in Equations (22)–(24). If $\eta$ becomes infinity, all three of them become triangular. For the massive particle, $\tanh \eta$ is the particle speed, and: + +$$ \tanh \eta = \frac{p}{p_0}, \quad (30) $$ + +where *p* and $p_0$ are the momentum and energy of the particle, respectively. +When the particle is massive with $m^2 > 0$, the ratio: + +$$ \frac{\text{lower-left element}}{\text{upper-right element}'} \quad (31) $$ + +is negative and is: + +$$ -e^{-2\eta} = \frac{1 - \sqrt{1 + m^2/p^2}}{1 + \sqrt{1 + m^2/p^2}}. \quad (32) $$ +---PAGE_BREAK--- + +If the mass is imaginary with $m^2 < 0$, the ratio is positive and: + +$$e^{-2\eta} = \frac{1 - \sqrt{1 + m^2/p^2}}{1 + \sqrt{1 + m^2/p^2}} \quad (33)$$ + +This ratio is zero for massless particles. This means that when $m^2$ changes from positive to negative, the ratio changes from $-e^{-2\eta}$ to $e^{-2\eta}$. This transition is continuous, but not analytic. This aspect of non-analytic continuity has been discussed in one of our earlier papers [13]. + +The *D* matrix of Equation (29) combines all three matrices given in Equations (22)–(24) into one matrix. For this matrix, the ratio of Equation (31) becomes: + +$$\frac{\tanh \chi - \sin \alpha}{\tanh \chi + \sin \alpha} = \frac{1 - \sqrt{1 + (m/p)^2}}{1 + \sqrt{1 + (m/p)^2}} \quad (34)$$ + +Thus, + +$$\frac{m^2}{p^2} = \left( \frac{\sin \alpha}{\tanh \chi} \right)^2 - 1. \quad (35)$$ + +For the *D* loop of Figure 2, both $\tanh \chi$ and $\sin \alpha$ range from 0–1, as illustrated in Figure 3. For small values of the mass for a fixed value of the momentum, this expression becomes: + +$$-\frac{m^2}{4p^2}. \quad (36)$$ + +Thus, the change from positive values of $m^2$ to negative values is continuous and analytic. For massless particles, $m^2$ is zero, while it is negative for imaginary-mass particles. + +We realize that the mass cannot be changed within the frame of the Lorentz group and that both $\alpha$ and $\eta$ are parameters of the Lorentz group. On the other hand, their combinations according to the *D* loop of Figure 2 can change the value of $m^2$ according to Equation (35) and Figure 3. + +**Figure 3.** Non-Lorentzian transformations allowing mass variations. The *D* matrix of Equation (29) allows us to change the $\chi$ and $\alpha$ analytically within the square region in (a). These variations allow the mass variations illustrated in (b), not allowed in Lorentz transformations. The Lorentz transformations are possible along the hyperbolas given in this figure. + +## 4.2. Parity, Time Reversal, and Charge Conjugation + +Space inversion leads to the sign change in $\chi$: + +$$D(\alpha, -\chi) = \begin{pmatrix} (\cos \alpha) \cosh \chi & \sinh \chi - (\sin \alpha) \cosh \chi \\ \sinh \chi + (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi \end{pmatrix}, \quad (37)$$ +---PAGE_BREAK--- + +and time reversal leads to the sign change in both $\alpha$ and $\chi$: + +$$D(-\alpha, -\chi) = \begin{pmatrix} (\cos \alpha) \cosh \chi & \sinh \chi + (\sin \alpha) \cosh \chi \\ \sinh \chi - (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi \end{pmatrix}. \quad (38)$$ + +If we space-invert this expression, the result is a change only in the direction of rotation, + +$$D(-\alpha, \chi) = \begin{pmatrix} (\cos \alpha) \cosh \chi & -\sinh \chi + (\sin \alpha) \cosh \chi \\ -\sinh \chi - (\sin \alpha) \cosh \chi & (\cos \alpha) \cosh \chi \end{pmatrix}. \quad (39)$$ + +The combined transformation of space inversion and time reversal is known as the “charge conjugation”. All of these transformations are illustrated in Figure 4. + +Figure 4. Parity, time reversal, and charge conjugation of Wigner’s little groups in the loop representation. + +Let us go back to the Lie algebra of Equation (3). This algebra is invariant under Hermitian conjugation. This means that there is another set of commutation relations, + +$$[J_i, J_j] = i\epsilon_{ijk}J_k, \quad [J_i, \hat{K}_j] = i\epsilon_{ijk}\hat{K}_k, \quad [\hat{K}_i, \hat{K}_j] = -i\epsilon_{ijk}J_k, \quad (40)$$ + +where $K_i$ is replaced with $\hat{K}_i = -K_i$. Let us go back to the expression of Equation (2). This transition to the dotted representation is achieved by the space inversion or by the parity operation. + +On the other hand, the complex conjugation of the Lie algebra of Equation (3) leads to: + +$$[J_i^*, J_j^*] = -i\epsilon_{ijk}J_k^*, \quad [J_i^*, K_j^*] = -i\epsilon_{ijk}K_k^*, \quad [K_i^*, K_j^*] = i\epsilon_{ijk}J_k^*. \quad (41)$$ +---PAGE_BREAK--- + +It is possible to restore this algebra to that of the original form of Equation (3) if we replace $J_i^*$ by $-J_i$ and $K_i^*$ by $-K_i$. This corresponds to the time-reversal process. This operation is known as the anti-unitary transformation in the literature [15,16]. + +Since the algebras of Equations (3) and (41) are invariant under the sign change of $K_i$ and $K_i^*$, respectively, there is another Lie algebra with $J_i^*$ replaced by $-J_i$ and $K_i^*$ by $-K_i$. This is the parity operation followed by time reversal, resulting in charge conjugation. With the four-by-four matrices for spin-1 particles, this complex conjugation is trivial, and $J_i^* = -J_i$, as well as $K_i^* = -K_i$. + +On the other hand, for spin 1/2 particles, we note that: + +$$ +\begin{aligned} +J_1^* &= J_1, & J_2^* &= -J_2, & J_3^* &= J_3, \\ +K_1^* &= -K_1, & K_2^* &= K_2, & K_3^* &= -K_3. +\end{aligned} +\quad (42) $$ + +Thus, $J_i^*$ should be replaced by $\sigma_2 J_i \sigma_2$, and $K_i^*$ by $-\sigma_2 K_i \sigma_2$. + +**5. Dirac Matrices as a Representation of the Little Group** + +The Dirac equation, Dirac matrices, and Dirac spinors constitute the basic language for spin-1/2 particles in physics. Yet, they are not widely recognized as the package for Wigner's little group. Yes, the little group is for spins, so are the Dirac matrices. + +Let us write the Dirac equation as: + +$$ (p \cdot \gamma - m)\psi(\vec{x}, t) = \lambda\psi(\vec{x}, t). \quad (43) $$ + +This equation can be explicitly written as: + +$$ \left( -i\gamma_0 \frac{\partial}{\partial t} - i\gamma_1 \frac{\partial}{\partial x} - i\gamma_2 \frac{\partial}{\partial y} - i\gamma_3 \frac{\partial}{\partial z} - m \right) \psi(\vec{x}, t) = \lambda \psi(\vec{x}, t), \quad (44) $$ + +where: + +$$ \gamma_0 = \begin{pmatrix} 0 & I \\ I & 0 \end{pmatrix}, \quad \gamma_1 = \begin{pmatrix} 0 & \sigma_1 \\ -\sigma_1 & 0 \end{pmatrix}, \quad \gamma_2 = \begin{pmatrix} 0 & \sigma_2 \\ -\sigma_2 & 0 \end{pmatrix}, \quad \gamma_3 = \begin{pmatrix} 0 & \sigma_3 \\ -\sigma_3 & 0 \end{pmatrix}, \quad (45) $$ + +where *I* is the two-by-two unit matrix. We use here the Weyl representation of the Dirac matrices. + +The Dirac spinor has four components. Thus, we write the wave function for a free particle as: + +$$ \psi(\vec{x}, t) = U_{\pm} \exp [i (\vec{p} \cdot \vec{x} - p_0 t)], \quad (46) $$ + +with the Dirac spinor: + +$$ U_{+} = \begin{pmatrix} u \\ \dot{u} \end{pmatrix}, \qquad U_{-} = \begin{pmatrix} v \\ \dot{v} \end{pmatrix}, \quad (47) $$ + +where: + +$$ u = \dot{u} = \begin{pmatrix} 1 \\ 0 \end{pmatrix}, \quad \text{and} \quad v = \dot{v} = \begin{pmatrix} 0 \\ 1 \end{pmatrix}. \quad (48) $$ + +In Equation (46), the exponential form $\exp[i(\vec{p} \cdot \vec{x} - p_0 t)]$ defines the particle momentum, and the column vector $U_{\pm}$ is for the representation space for Wigner's little group dictating the internal space-time symmetries of spin-1/2 particles. + +In this four-by-four representation, the generators for rotations and boosts take the form: + +$$ J_i = \frac{1}{2} \begin{pmatrix} \sigma_i & 0 \\ 0 & \sigma_i \end{pmatrix}, \quad \text{and} \quad K_i = \frac{i}{2} \begin{pmatrix} \sigma_i & 0 \\ 0 & -\sigma_i \end{pmatrix}. \quad (49) $$ +---PAGE_BREAK--- + +This means that both dotted and undotted spinor are transformed in the same way under rotation, while they are boosted in the opposite directions. + +When this $\gamma_0$ matrix is applied to $U_\pm$: + +$$ \gamma_0 U_+ = \begin{pmatrix} 0 & I \\ I & 0 \end{pmatrix} \begin{pmatrix} u \\ \dot{u} \end{pmatrix} = \begin{pmatrix} \dot{u} \\ u \end{pmatrix}, \quad \text{and} \quad \gamma_0 U_- = \begin{pmatrix} 0 & I \\ I & 0 \end{pmatrix} \begin{pmatrix} v \\ \dot{v} \end{pmatrix} = \begin{pmatrix} \dot{v} \\ v \end{pmatrix}. \qquad (50) $$ + +Thus, the $\gamma_0$ matrix interchanges the dotted and undotted spinors. The four-by-four matrix for the rotation around the y axis is: + +$$ R_{44}(\theta) = \begin{pmatrix} R(\theta) & 0 \\ 0 & R(\theta) \end{pmatrix}, \qquad (51) $$ + +while the matrix for the boost along the z direction is: + +$$ B_{44}(\eta) = \begin{pmatrix} B(\eta) & 0 \\ 0 & B(-\eta) \end{pmatrix}, \qquad (52) $$ + +with: + +$$ B(\pm\eta) = \begin{pmatrix} e^{\pm\eta/2} & 0 \\ 0 & e^{\mp\eta/2} \end{pmatrix}. \qquad (53) $$ + +These $\gamma$ matrices satisfy the anticommutation relations: + +$$ \{\gamma_{\mu}, \gamma_{\nu}\} = 2g_{\mu\nu}, \qquad (54) $$ + +where: + +$$ g_{00} = 1, \quad g_{11} = g_{22} = g_{22} = -1, $$ + +$$ g_{\mu\nu} = 0 \quad \text{if } \mu \neq \nu. \qquad (55) $$ + +Let us consider space inversion with the exponential form changing to $\exp[i(-\vec{p} \cdot \vec{x} - p_0t)]$. For this purpose, we can change the sign of $x$ in the Dirac equation of Equation (44). It then becomes: + +$$ (-i\gamma_0 \frac{\partial}{\partial t} + i\gamma_1 \frac{\partial}{\partial x} + i\gamma_2 \frac{\partial}{\partial y} + i\gamma_3 \frac{\partial}{\partial z} - m) \psi(-\vec{x}, t) = \lambda \psi(-\vec{x}, 0). \qquad (56) $$ + +Since $\gamma_0\gamma_i = -\gamma_i\gamma_0$ for $i=1,2,3$, + +$$ (-i\gamma_0 \frac{\partial}{\partial t} - i\gamma_1 \frac{\partial}{\partial x} - i\gamma_2 \frac{\partial}{\partial y} - i\gamma_3 \frac{\partial}{\partial z} - m) [\gamma_0\psi(-\vec{x} \cdot \vec{p}, p_0t)] = \lambda[\gamma_0\psi(-\vec{x} \cdot \vec{p}, p_0t)]. \qquad (57) $$ + +This is the Dirac equation for the wave function under the space inversion or the parity operation. The Dirac spinor $U_\pm$ becomes $\gamma_0 U_\pm$, according to Equation (50). This operation is illustrated in Table 3 and Figure 4. + +**Table 3.** Parity, charge conjugation, and time reversal in the loop representation. + +
StartTime Reflection
StartStart with
R(α)S(-2χ)R(α)
Time Reversal
R(-α)S(2χ)R(-α)
Space
Inversion
Parity
R(α)S(2χ)R(α)
Charge Conjugation
R(-α)S(-2χ)R(-α)
+---PAGE_BREAK--- + +We are interested in changing the sign of $t$. First, we can change both space and time variables, and then, we can change the space variable. We can take the complex conjugate of the equation first. Since $\gamma_2$ is imaginary, while all others are real, the Dirac equation becomes: + +$$ \left( i\gamma_0 \frac{\partial}{\partial t} + i\gamma_1 \frac{\partial}{\partial x} - i\gamma_2 \frac{\partial}{\partial y} + i\gamma_3 \frac{\partial}{\partial z} - m \right) \psi^*(\vec{x}, t) = \lambda \psi^*(\vec{x}, t). \quad (58) $$ + +We are now interested in restoring this equation to the original form of Equation (44). In order to achieve this goal, let us consider $(\gamma_1 \gamma_3)$. This form commutes with $\gamma_0$ and $\gamma_2$ and anti-commutes with $\gamma_1$ and $\gamma_3$. Thus, + +$$ \left(-i\gamma_0 \frac{\partial}{\partial t} - i\gamma_1 \frac{\partial}{\partial x} - i\gamma_2 \frac{\partial}{\partial y} - i\gamma_3 \frac{\partial}{\partial z} - m\right) (\gamma_1 \gamma_3) \psi^*(\vec{x}, -t) = \lambda (\gamma_1 \gamma_3) \psi^*(\vec{x}, -t). \quad (59) $$ + +Furthermore, since: + +$$ \gamma_1 \gamma_3 = \begin{pmatrix} i\sigma_2 & 0 \\ 0 & i\sigma_2 \end{pmatrix}, \quad (60) $$ + +this four-by-four matrix changes the direction of the spin. Indeed, this form of time reversal is consistent with Table 3 and Figure 4. + +Finally, let us change the signs of both $\vec{x}$ and $t$. For this purpose, we go back to the complex-conjugated Dirac equation of Equation (43). Here, $\gamma_2$ anti-commutes with all others. Thus, the wave function: + +$$ \gamma_2 \psi(-\vec{x} \cdot \vec{p}, -p_0 t), \quad (61) $$ + +should satisfy the Dirac equation. This form is known as the charge-conjugated wave function, and it is also illustrated in Table 3 and Figure 4. + +## 5.1. Polarization of Massless Neutrinos + +For massless neutrinos, the little group consists of rotations around the z axis, in addition to $N_i$ and $\tilde{N}_i$ applicable to the upper and lower components of the Dirac spinors. Thus, the four-by-four matrix for these generators is: + +$$ N_{44(i)} = \begin{pmatrix} N_i & 0 \\ 0 & \tilde{N}_i \end{pmatrix}. \quad (62) $$ + +The transformation matrix is thus: + +$$ D_{44}(\alpha, \beta) = \exp(-i\alpha N_{44(1)} - i\beta N_{44(2)}) = \begin{pmatrix} D(\alpha, \beta) & 0 \\ 0 & \tilde{D}(\alpha, \beta) \end{pmatrix}, \quad (63) $$ + +with: + +$$ D(\alpha, \beta) = \begin{pmatrix} 1 & \alpha - i\beta \\ 0 & 1 \end{pmatrix}, \qquad \tilde{D}(\alpha, \beta) = \begin{pmatrix} 1 & 0 \\ -\alpha - i\beta & 1 \end{pmatrix}. \quad (64) $$ + +As is illustrated in Figure 1, the $D$ transformation performs the gauge transformation on massless photons. Thus, this transformation allows us to extend the concept of gauge transformations to massless spin-1/2 particles. With this point in mind, let us see what happens when this $D$ transformation is applied to the Dirac spinors. + +$$ D(\alpha, \beta)u = u, \qquad \tilde{D}(\alpha, \beta)\dot{v} = \dot{v}. \quad (65) $$ + +Thus, $u$ and $\dot{v}$ are invariant gauge transformations. +---PAGE_BREAK--- + +What happens to $v$ and $\dot{u}$? + +$$D(\alpha, \beta)v = v + (\alpha - i\beta)u, \quad \dot{D}(\alpha, \beta)\dot{u} = \dot{u} - (\alpha + i\beta)\dot{v}. \qquad (66)$$ + +These spinors are not invariant under gauge transformations [17,18]. + +Thus, the Dirac spinor: + +$$U_{\text{inv}} = \begin{pmatrix} u \\ \dot{v} \end{pmatrix}, \qquad (67)$$ + +is gauge-invariant while the spinor: + +$$U_{\text{non}} = \begin{pmatrix} v \\ \dot{u} \end{pmatrix}, \qquad (68)$$ + +is not. Thus, gauge invariance leads to the polarization of massless spin-1/2 particles. Indeed, this is what we observe in the real world. + +## 5.2. Small-Mass Neutrinos + +Neutrino oscillation experiments presently suggest that neutrinos have a small, but finite mass [19]. If neutrinos have mass, there should be a Lorentz frame in which they can be brought to rest with an $O(3)$-like $SU(2)$ little group for their internal space-time symmetry. However, it is not likely that at-rest neutrinos will be found anytime soon. In the meantime, we have to work with the neutrino with a fixed momentum and a small mass [20]. Indeed, the present loop representation is suitable for this problem. + +Since the mass is so small, it is appropriate to approach this small-mass problem as a departure from the massless case. In Section 5.1, it was noted that the polarization of massless neutrinos is a consequence of gauge invariance. Let us start with a left-handed massless neutrino with the spinor: + +$$\dot{v} = \begin{pmatrix} 0 \\ 1 \end{pmatrix}, \qquad (69)$$ + +and the gauge transformation applicable to this spinor: + +$$\Gamma(\gamma) = \begin{pmatrix} 1 & 0 \\ \gamma & 1 \end{pmatrix}. \qquad (70)$$ + +Since: + +$$\begin{pmatrix} 1 & 0 \\ \gamma & 1 \end{pmatrix} \begin{pmatrix} 0 \\ 1 \end{pmatrix} = \begin{pmatrix} 0 \\ 1 \end{pmatrix}, \qquad (71)$$ + +the spinor of Equation (69) is invariant under the gauge transformation of Equation (70). + +If the neutrino has a small mass, the transformation matrix is for a rotation. However, for a small non-zero mass, the deviation from the triangular form is small. The procedure for deriving the Wigner matrix for this case is given toward the end of Section 3. The matrix in this case is: + +$$\mathcal{D}(\gamma) = \begin{pmatrix} 1 - (\gamma\epsilon)^2/2 & -\gamma\epsilon^2 \\ \gamma & 1 - (\gamma\epsilon)^2/2 \end{pmatrix}, \qquad (72)$$ + +with $\epsilon^2 = m/p$, where *m* and *p* are the mass and momentum of the neutrino, respectively. This matrix becomes the gauge transformation of Equation (70) for $\epsilon = 0$. If this matrix is applied to the spinor of Equation (69), it becomes: + +$$D(\gamma)\dot{v} = \begin{pmatrix} -\gamma\epsilon^2 \\ 1 \end{pmatrix}. \qquad (73)$$ +---PAGE_BREAK--- + +In this way, the left-handed neutrino gains a right-handed component. We took into account that $(\gamma e)^2$ is much smaller than one. + +Since massless neutrinos are gauge independent, we cannot measure the value of $\gamma$. For the small-mass case, we can determine this value from the measured values of $m/p$ and the density of right-handed neutrinos. + +## 6. Scalars, Vectors, and Tensors + +We are quite familiar with the process of constructing three spin-1 states and one spin-0 state from two spinors. Since each spinor has two states, there are four states if combined. + +In the Lorentz-covariant world, for each spin-1/2 particle, there are two additional two-component spinors coming from the dotted representation [12,21–23]. There are thus four states. If two spinors are combined, there are 16 states. In this section, we show that they can be partitioned into + +1. scalar with one state, + +2. pseudo-scalar with one state, + +3. four-vector with four states, + +4. axial vector with four states, + +5. second-rank tensor with six states. + +These quantities contain sixteen states. We made an attempt to construct these quantities in our earlier publication [5], but this earlier version is not complete. There, we did not take into account the parity operation properly. We thus propose to complete the job in this section. + +For particles at rest, it is known that the addition of two one-half spins result in spin-zero and spin-one states. Hence, we have two different spinors behaving differently under the Lorentz boost. Around the z direction, both spinors are transformed by: + +$$Z(\phi) = \exp(-i\phi J_3) = \begin{pmatrix} e^{-i\phi/2} & 0 \\ 0 & e^{i\phi/2} \end{pmatrix}. \qquad (74)$$ + +However, they are boosted by: + +$$B(\eta) = \exp(-i\eta K_3) = \begin{pmatrix} e^{\eta/2} & 0 \\ 0 & e^{-\eta/2} \end{pmatrix},$$ + +$$\dot{B}(\eta) = \exp(i\eta K_3), = \begin{pmatrix} e^{-\eta/2} & 0 \\ 0 & e^{\eta/2} \end{pmatrix}, \qquad (75)$$ + +which are applicable to the undotted and dotted spinors, respectively. These two matrices commute with each other and also with the rotation matrix $Z(\phi)$ of Equation (74). Since $K_3$ and $J_3$ commute with each other, we can work with the matrix $Q(\eta, \phi)$ defined as: + +$$Q(\eta, \phi) = B(\eta)Z(\phi) = \begin{pmatrix} e^{(\eta-i\phi)/2} & 0 \\ 0 & e^{-(\eta-i\phi)/2} \end{pmatrix},$$ + +$$\dot{Q}(\eta, \phi) = \dot{B}(\eta)\dot{Z}(\phi) = \begin{pmatrix} e^{-(\eta+i\phi)/2} & 0 \\ 0 & e^{(\eta+i\phi)/2} \end{pmatrix}. \qquad (76)$$ + +When this combined matrix is applied to the spinors, + +$$Q(\eta, \phi)u = e^{(\eta-i\phi)/2}u, \quad Q(\eta, \phi)v = e^{-(\eta-i\phi)/2}v,$$ + +$$\dot{Q}(\eta, \phi)\dot{u} = e^{-(\eta+i\phi)/2}\dot{u}, \quad \dot{Q}(\eta, \phi)\dot{v} = e^{(\eta+i\phi)/2}\dot{v}. \qquad (77)$$ +---PAGE_BREAK--- + +If the particle is at rest, we can explicitly construct the combinations: + +$$uu, \quad \frac{1}{\sqrt{2}}(uv + vu), \quad vv, \tag{78}$$ + +to obtain the spin-1 state and: + +$$\frac{1}{\sqrt{2}}(uv - vu), \tag{79}$$ + +for the spin-zero state. This results in four bilinear states. In the $SL(2, c)$ regime, there are two dotted spinors, which result in four more bilinear states. If we include both dotted and undotted spinors, there are sixteen independent bilinear combinations. They are given in Table 4. This table also gives the effect of the operation of $Q(\eta, \phi)$. + +**Table 4.** Sixteen combinations of the $SL(2, c)$ spinors. In the $SU(2)$ regime, there are two spinors leading to four bilinear forms. In the $SL(2, c)$ world, there are two undotted and two dotted spinors. These four-spinors lead to sixteen independent bilinear combinations. + +
Spin 1Spin 0
uu, 1√2(uv + vu), vv,u1√2(uv − vu)
úú, 1√2(úv + vú), vú,v1√2(úv − vú)
uú, 1√2(uv + vú), vv,v1√2(uø − vø)
úu, 1√2(úv + vú), vú,v1√2(úv − vú)
After the operation of Q(η, φ) and Q̇(η, φ)
e−iφeηuu, 1√2(uv + vu), ee−ηvv,u1√2(uv − vu)
e−iφe−ηúú, 1√2(úv + vú), eeηvú,v1√2(úv − vú)
e−iφuú, 1√2(eηuv + e−ηvú), evú,e1√2(eηuø − e−ηvø)
e−iφúú, 1√2(úv + vú), evv,e1√2(eηúv − e−ηvø)
+ +Among the bilinear combinations given in Table 4, the following two equations are invariant under rotations and also under boosts: + +$$S = \frac{1}{\sqrt{2}}(uv - vu), \quad \text{and} \quad \dot{S} = -\frac{1}{\sqrt{2}}(\dot{u}\dot{v} - \dot{v}\dot{u}). \tag{80}$$ + +They are thus scalars in the Lorentz-covariant world. Are they the same or different? Let us consider the following combinations: + +$$S_+ = \frac{1}{\sqrt{2}}(S + \dot{S}), \quad \text{and} \quad S_- = \frac{1}{\sqrt{2}}(S - \dot{S}). \tag{81}$$ + +Under the dot conjugation, $S_+$ remains invariant, but $S_-$ changes sign. The boost is performed in the opposite direction and therefore is the operation of space inversion. Thus, $S_+$ is a scalar, while $S_-$ is called a pseudo-scalar. + +## 6.1. Four-Vectors + +Let us go back to Equation (78) and make a dot-conjugation on one of the spinors. + +$$u\dot{u}, \quad \frac{1}{\sqrt{2}}(u\dot{v} + v\dot{u}), \quad v\dot{v}, \quad \frac{1}{\sqrt{2}}(u\dot{v} - v\dot{u}),$$ + +$$\dot{u}u, \quad \frac{1}{\sqrt{2}}(\dot{u}v + \dot{v}u), \quad \dot{v}v, \quad \frac{1}{\sqrt{2}}(\dot{u}v - \dot{v}u). \tag{82}$$ +---PAGE_BREAK--- + +We can make symmetric combinations under dot conjugation, which lead to: + +$$ +\frac{1}{\sqrt{2}} (u\dot{u} + \dot{u}u), \quad \frac{1}{2} [(u\dot{\nu} + v\dot{u}) + (\dot{u}v + \dot{v}u)], \quad \frac{1}{\sqrt{2}} (v\dot{\nu} + \dot{v}v), \quad \text{for spin 1}, +$$ + +$$ +\frac{1}{2}[(u\dot{v}-v\dot{u})+(\dot{u}v-\dot{v}u)], \quad \text{for spin 0,} \tag{83} +$$ + +and anti-symmetric combinations, which lead to: + +$$ +\frac{1}{\sqrt{2}}(u\dot{u} - \dot{u}u), \quad \frac{1}{2}[(u\dot{v} + v\dot{u}) - (\dot{u}v + \dot{v}u)], \quad \frac{1}{\sqrt{2}}(v\dot{v} - \dot{v}v), \quad \text{for spin 1,} +$$ + +$$ +\frac{1}{2}[(u\ddot{v} - v\ddot{u}) - (\dot{u}\ddot{v} - \ddot{u}v)], \quad \text{for spin } 0. \qquad (84) +$$ + +Let us rewrite the expression for the space-time four-vector given in Equation (7) as: + +$$ +\begin{pmatrix} t+z & x-iy \\ x+iy & t-z \end{pmatrix}, \tag{85} +$$ + +which, under the parity operation, becomes + +$$ +\begin{pmatrix} +t-z & -x+iy \\ +-x-iy & t+z +\end{pmatrix}. +\qquad +(86) +$$ + +If the expression of Equation (85) is for an axial vector, the parity operation leads to: + +$$ +\begin{pmatrix} -t+z & x-iy \\ x+iy & -t-z \end{pmatrix}, \qquad (87) +$$ + +where only the sign of *t* is changed. The off-diagonal elements remain invariant, while the diagonal elements are interchanged with sign changes. + +We note here that the parity operation corresponds to dot conjugation. Then, from the expressions given in Equations (83) and (84), it is possible to construct the four-vector as: + +$$ +V = \begin{pmatrix} u\ddot{v} - \dot{u}u & v\ddot{v} - \dot{v}u \\ u\dot{u} - \dot{u}u & \dot{u}v - v\dot{u} \end{pmatrix}, \qquad (88) +$$ + +where the off-diagonal elements change their signs under the dot conjugation, while the diagonal elements are interchanged. + +The axial vector can be written as: + +$$ +A = \begin{pmatrix} u\ddot{v} + v\dot{u} & v\ddot{v} + v\dot{v} \\ u\dot{u} + \dot{u}u & -\dot{u}v - v\dot{u} \end{pmatrix}. \qquad (89) +$$ + +Here, the off-diagonal elements do not change their signs under dot conjugation, and the diagonal elements become interchanged with a sign change. This matrix thus represents an axial vector. + +6.2. Second-Rank Tensor + +There are also bilinear spinors, which are both dotted or both undotted. We are interested in two +sets of three quantities satisfying the O(3) symmetry. They should therefore transform like: + +$$ +(x + iy)/\sqrt{2}, \quad (x - iy)/\sqrt{2}, \quad z, \tag{90} +$$ +---PAGE_BREAK--- + +which are like: + +$$uu, \quad vv, \quad (uv + vu) / \sqrt{2}, \tag{91}$$ + +respectively, in the $O(3)$ regime. Since the dot conjugation is the parity operation, they are like: + +$$-\dot{u}\dot{u}, \quad -\dot{v}\dot{v}, \quad -(\dot{u}\dot{v} + \dot{v}\dot{u})/\sqrt{2}. \tag{92}$$ + +In other words, + +$$(uu) = -\dot{u}\dot{u}, \quad \text{and} \quad (vv) = -\dot{v}\dot{v}. \tag{93}$$ + +We noticed a similar sign change in Equation (86). + +In order to construct the z component in this $O(3)$ space, let us first consider: + +$$f_z = \frac{1}{2} [(uv + vu) - (\dot{u}\dot{v} + \dot{v}\dot{u})], \qquad g_z = \frac{1}{2i} [(uv + vu) + (\dot{u}\dot{v} + \dot{v}\dot{u})]. \tag{94}$$ + +Here, $f_z$ and $g_z$ are respectively symmetric and anti-symmetric under the dot conjugation or the parity operation. These quantities are invariant under the boost along the z direction. They are also invariant under rotations around this axis, but they are not invariant under boosts along or rotations around the x or y axis. They are different from the scalars given in Equation (80). + +Next, in order to construct the x and y components, we start with $f_{\pm}$ and $g_{\pm}$ as: + +$$f_+ = \frac{1}{\sqrt{2}}(uu - \dot{u}\dot{u}), \quad f_- = \frac{1}{\sqrt{2}}(vv - \dot{v}\dot{v}),$$ + +$$g_+ = \frac{1}{\sqrt{2i}}(uu + \dot{u}\dot{u}), \quad g_- = \frac{1}{\sqrt{2i}}(vv + \dot{v}\dot{v}). \tag{95}$$ + +Then: + +$$f_x = \frac{1}{\sqrt{2}}(f_+ + f_-) = \frac{1}{2}[(uu + vv) - (\dot{u}\dot{u} + \dot{v}\dot{v})],$$ + +$$f_y = \frac{1}{\sqrt{2i}}(f_+ - f_-) = \frac{1}{2i}[(uu - vv) - (\dot{u}\dot{u} - \dot{v}\dot{v})], \tag{96}$$ + +and: + +$$g_x = \frac{1}{\sqrt{2}}(g_+ + g_-) = \frac{1}{2}[(uu + vv) + (\dot{u}\dot{u} + \dot{v}\dot{v})],$$ + +$$g_y = \frac{1}{\sqrt{2i}}(g_+ - g_-) = \frac{1}{2i}[(uu - vv) + (\dot{u}\dot{u} - \dot{v}\dot{v})]. \tag{97}$$ + +Here, $f_x$ and $f_y$ are symmetric under dot conjugation, while $g_x$ and $g_y$ are anti-symmetric. + +Furthermore, $f_z$, $f_x$ and $f_y$ of Equations (94) and (96) transform like a three-dimensional vector. The same can be said for $g_i$ of Equations (94) and (97). Thus, they can be grouped into the second-rank tensor: + +$$\begin{pmatrix} +0 & -f_z & -f_x & -f_y \\ +f_z & 0 & -g_y & g_x \\ +f_x & g_y & 0 & -g_z \\ +f_y & -g_x & g_z & 0 +\end{pmatrix}, \tag{98}$$ + +whose Lorentz-transformation properties are well known. The $g_i$ components change their signs under space inversion, while the $f_i$ components remain invariant. They are like the electric and magnetic fields, respectively. +---PAGE_BREAK--- + +If the system is Lorentz-boosted, $f_i$ and $g_i$ can be computed from Table 4. We are now interested in the symmetry of photons by taking the massless limit. Thus, we keep only the terms that become larger for larger values of $\eta$. Thus, + +$$ +\begin{aligned} +f_x & \rightarrow \frac{1}{2} (uu - \dot{u}\dot{v}), && f_y \rightarrow \frac{1}{2i} (uu + \dot{u}\dot{v}), \\ +g_x & \rightarrow \frac{1}{2i} (uu + \dot{v}\dot{u}), && g_y \rightarrow -\frac{1}{2} (uu - \dot{u}\dot{v}), +\end{aligned} +\quad (99) $$ + +in the massless limit. + +Then, the tensor of Equation (98) becomes: + +$$ \begin{pmatrix} 0 & 0 & -E_x & -E_y \\ 0 & 0 & -B_y & B_x \\ E_x & B_y & 0 & 0 \\ E_y & -B_x & 0 & 0 \end{pmatrix}, \qquad (100) $$ + +with: + +$$ +\begin{aligned} +E_x &\approx \frac{1}{2}(uu - \dot{u}\dot{v}), && E_y \approx \frac{1}{2i}(uu + \dot{u}\dot{v}), \\ +B_x &= \frac{1}{2i}(uu + \dot{v}\dot{u}), && B_y = -\frac{1}{2}(uu - \dot{u}\dot{v}). +\end{aligned} +\quad (101) $$ + +The electric and magnetic field components are perpendicular to each other. Furthermore, + +$$ B_x = E_y, \quad B_y = -E_x. \quad (102) $$ + +In order to address symmetry of photons, let us go back to Equation (95). In the massless limit, + +$$ B_+ \approx E_+ \approx uu, \quad B_- \approx E_- \approx \dot{u}\dot{v}. \quad (103) $$ + +The gauge transformations applicable to $u$ and $\bar{v}$ are the two-by-two matrices: + +$$ \begin{pmatrix} 1 & -\gamma \\ 0 & 1 \end{pmatrix}, \quad \text{and} \quad \begin{pmatrix} 1 & 0 \\ \gamma & 1 \end{pmatrix}, \qquad (104) $$ + +respectively. Both $u$ and $\bar{v}$ are invariant under gauge transformations, while $u$ and $\bar{v}$ are not. + +The $B_+$ and $E_+$ are for the photon spin along the z direction, while $B_-$ and $E_-$ are for the opposite direction. + +### 6.3. Higher Spins + +Since Wigner's original book of 1931 [24,25], the rotation group, without Lorentz transformations, has been extensively discussed in the literature [22,26,27]. One of the main issues was how to construct the most general spin state from the two-component spinors for the spin-1/2 particle. + +Since there are two states for the spin-1/2 particle, four states can be constructed from two spinors, leading to one state for the spin-0 state and three spin-1 states. With three spinors, it is possible to construct four spin-3/2 states and two spin-1/2 states, resulting in six states. This partition process is much more complicated [28,29] for the case of three spinors. Yet, this partition process is possible for all higher spin states. + +In the Lorentz-covariant world, there are four states for each spin-1/2 particle. With two spinors, we end up with sixteen (4 × 4) states, and they are tabulated in Table 4. There should be 64 states for +---PAGE_BREAK--- + +three spinors and 256 states for four spinors. We now know how to Lorentz-boost those spinors. We also know that the transverse rotations become gauge transformations in the limit of zero-mass or infinite-$\eta$. It is thus possible to bundle all of them into the table given in Figure 5. + +**Figure 5.** Unified picture of massive and massless particles. The gauge transformation is a Lorentz-boosted rotation matrix and is applicable to all massless particles. It is possible to construct higher-spin states starting from the four states of the spin-1/2 particle in the Lorentz-covariant world. + +In the relativistic regime, we are interested in photons and gravitons. As was noted in Sections 6.1 and 6.2, the observable components are invariant under gauge transformations. They are also the terms that become largest for large values of $\eta$. + +We have seen in Section 6.2 that the photon state consists of $uu$ and $\bar{u}\bar{v}$ for those whose spins are parallel and anti-parallel to the momentum, respectively. Thus, for spin-2 gravitons, the states must be $uuuu$ and $\bar{u}\bar{v}\bar{v}\bar{v}$, respectively. + +In his effort to understand photons and gravitons, Weinberg constructed his states for massless particles [30], especially photons and gravitons [31]. He started with the conditions: + +$$N_1|\text{state}>=0, \quad \text{and} \quad N_2|\text{state}>=0, \qquad (105)$$ + +where $N_1$ and $N_2$ are defined in Equation (17). Since they are now known as the generators of gauge transformations, Weinberg's states are gauge-invariant states. Thus, $uu$ and $\bar{u}\bar{v}$ are Weinberg's states for photons, and $uuuu$ are $\bar{u}\bar{v}\bar{v}\bar{v}$ are Weinberg's states for gravitons. + +## 7. Concluding Remarks + +Since the publication of Wigner's original paper [1], there have been many papers written on the subject. The issue is how to construct subgroups of the Lorentz group whose transformations do not change the momentum of a given particle. The traditional approach to this problem has been to work with a fixed mass, which remains invariant under Lorentz transformation. + +In this paper, we have presented a different approach. Since, we are interested in transformations that leave the momentum invariant, we do not change the momentum throughout mathematical processes. Figure 3 tells the difference. In our approach, we fix the momentum, and we allow transitions from one hyperbola to another analytically with one transformation matrix. It is an interesting future problem to see what larger group can accommodate this process. + +Since the purpose of this paper is to provide a simpler mathematics for understanding the physics of Wigner's little groups, we used the two-by-two $SL(2,c)$ representation, instead of four-by-four matrices, for the Lorentz group throughout the paper. During this process, it was noted in Section 5 that the Dirac equation is a representation of Wigner's little group. + +We also discussed how to construct higher-spin states starting from four-component spinors for the spin-1/2 particle. We studied how the spins can be added in the Lorentz-covariant world, as illustrated in Figure 5. + +**Author Contributions:** Each of the authors participated in developing the material presented in this paper and in writing the manuscript. +---PAGE_BREAK--- + +**Conflicts of Interest:** The authors declare no conflict of interest. + +References + +1. Wigner, E. On unitary representations of the inhomogeneous Lorentz group. *Ann. Math.* **1939**, *40*, 149–204. + +2. Han, D.; Kim, Y.S.; Son, D. Gauge transformations as Lorentz-boosted rotations. *Phys. Lett. B* **1983**, *131*, 327–329. + +3. Kim, Y.S.; Wigner, E.P. Cylindrical group and massless particles. *J. Math. Phys.* **1987**, *28*, 1175–1179. + +4. Kim, Y.S.; Wigner, E.P. Space-time geometry of relativistic-particles. *J. Math. Phys.* **1990**, *31*, 55–60. + +5. Başkal, S.; Kim, Y.S.; Noz, M.E. *Physics of the Lorentz Group*, IOP Concise Physics; Morgan & Claypool Publishers: San Rafael, CA, USA, 2015. + +6. Kupersztych, J. Is there a link between gauge invariance, relativistic invariance and Electron Spin? *Nuovo Cimento* **1976**, *31B*, 1–11. + +7. Han, D.; Kim, Y.S. Little group for photons and gauge transformations. *Am. J. Phys.* **1981**, *49*, 348–351. + +8. Han, D.; Kim, Y.S. Special relativity and interferometers. *Phys. Rev. A* **1988**, *37*, 4494–4496. + +9. Dirac, P.A.M. Applications of quaternions to Lorentz transformations. *Proc. R. Irish Acad.* **1945**, *A50*, 261–270. + +10. Bargmann, V. Irreducible unitary representations of the Lorentz group. *Ann. Math.* **1947**, *48*, 568–640. + +11. Naimark, M.A. *Linear Representations of the Lorentz Group*; Pergamon Press: Oxford, UK, 1954. + +12. Kim, Y.S.; Noz, M.E. *Theory and Applications of the Poincaré Group*; Reidel: Dordrecht, The Netherlands, 1986. + +13. Başkal, S.; Kim, Y.S.; Noz, M.E. Wigner’s space-time symmetries based on the two-by-two matrices of the damped harmonic oscillators and the poincaré sphere. *Symmetry* **2014**, *6*, 473–515. + +14. Han, D.; Kim, Y.S.; Son, D. Eulerian parametrization of Wigner little groups and gauge transformations in terms of rotations in 2-component spinors. *J. Math. Phys.* **1986**, *27*, 2228–2235. + +15. Wigner, E.P. Normal form of antiunitary operators. *J. Math. Phys.* **1960**, *1*, 409–413. + +16. Wigner, E.P. Phenomenological distinction between unitary and antiunitary symmetry operators. *J. Math. Phys.* **1960**, *1*, 413–416. + +17. Han, D.; Kim, Y.S.; Son, D. E(2)-like little group for massless particles and polarization of neutrinos. *Phys. Rev. D* **1982**, *26*, 3717–3725. + +18. Han, D.; Kim, Y.S.; Son, D. Photons, neutrinos, and gauge transformations. *Am. J. Phys.* **1986**, *54*, 818–821. + +19. Mohapatra, R.N.; Smirnov, A.Y. Neutrino mass and new physics. *Ann. Rev. Nucl. Part. Sci.* **2006**, *56*, 569–628. + +20. Kim, Y.S.; Maguire, G.Q., Jr.; Noz, M.E. Do small-mass neutrinos participate in gauge transformations? *Adv. High Energy Phys.* **2016**, 2016, 1847620, doi:10.1155/2016/1847620. + +21. Berestetskii, V.B.; Pitaevskii, L.P.; Lifshitz, E.M. *Quantum Electrodynamics*, Volume 4 of the Course of Theoretical Physics, 2nd ed.; Pergamon Press: Oxford, UK, 1982. + +22. Gel'fand, I.M.; Minlos, R.A.; Shapiro, A. *Representations of the Rotation and Lorentz Groups and their Applications*; MacMillan: New York, NY, USA, 1963. + +23. Weinberg, S. Feynman rules for any spin. *Phys. Rev.* **1964**, *133*, B1318-B1332. + +24. Wigner, E. *Gruppentheorie und ihre Anwendungen auf die Quantenmechanik der Atomspektren*; Friedrich Vieweg und Sohn: Braunsweig, Germany, 1931. (In German) + +25. Wigner, E.P. *Group Theory and Its Applications to the Quantum Mechanics of Atomic Spectra*, Translated from the German; Griffin, J.J., Ed.; Academic Press: New York, NY, USA, 1959. + +26. Condon, E.U.; Shortley, G.H. *The Theory of Atomic Spectra*; Cambridge University Press: London, UK, 1951. + +27. Hamermesh, M. *Group Theory and Application to Physical Problems*; Addison-Wesley: Reading, MA, USA, 1962. + +28. Feynman, R.P.; Kislinger, M.; Ravndal, F. Current matrix elements from a relativistic quark model. *Phys. Rev. D* **1971**, *3*, 2706–2732. + +29. Hussar, P.E.; Kim, Y.S.; Noz, M.E. Three-particle symmetry classifications according to the method of Dirac. *Am. J. Phys.* **1980**, *48*, 1038–1042. + +30. Weinberg, S. Feynman rules for any spin II. massless particles. *Phys. Rev.* **1964**, *134*, B882-B896. + +31. Weinberg, S. Photons and gravitons in S-Matrix theory: Derivation of charge conservation and equality of gravitational and inertial mass. *Phys. Rev.* **1964**, *135*, B1049-B1056. + +© 2017 by the authors. Licensee MDPI, Basel, Switzerland. This article is an open access article distributed under the terms and conditions of the Creative Commons Attribution (CC BY) license (http://creativecommons.org/licenses/by/4.0/). +---PAGE_BREAK--- + +MDPI AG + +St. Alban-Anlage 66 +4052 Basel, Switzerland + +Tel. +41 61 683 77 34 +Fax +41 61 302 89 18 + +http://www.mdpi.com + +*Symmetry* Editorial Office + +E-mail: symmetry@mdpi.com + +http://www.mdpi.com/journal/symmetry +---PAGE_BREAK--- + + +---PAGE_BREAK--- + +MDPI AG +St. Alban-Anlage 66 +4052 Basel +Switzerland + +Tel: +41 61 683 77 34 +Fax: +41 61 302 89 18 + +www.mdpi.com \ No newline at end of file diff --git a/samples_new/texts_merged/6016935.md b/samples_new/texts_merged/6016935.md new file mode 100644 index 0000000000000000000000000000000000000000..17e6c8dd495cb38eab76754d99a3d73dc90176d8 --- /dev/null +++ b/samples_new/texts_merged/6016935.md @@ -0,0 +1,1067 @@ + +---PAGE_BREAK--- + +# Practical Quantum Computing: solving the wave equation using a quantum approach + +Adrien Suau, Gabriel Staffelbach, Henri Calandra + +► To cite this version: + +Adrien Suau, Gabriel Staffelbach, Henri Calandra. Practical Quantum Computing: solving the wave equation using a quantum approach. ACM Transactions on Quantum Computing, ACM, 2021, 2 (1), pp.1-35. 10.1145/3430030. lirmm-03262927 + +HAL Id: lirmm-03262927 + +https://hal-lirmm.ccsd.cnrs.fr/lirmm-03262927 + +Submitted on 16 Jun 2021 + +**HAL** is a multi-disciplinary open access archive for the deposit and dissemination of scientific research documents, whether they are published or not. The documents may come from teaching and research institutions in France or abroad, or from public or private research centers. + +L'archive ouverte pluridisciplinaire **HAL**, est destinée au dépôt et à la diffusion de documents scientifiques de niveau recherche, publiés ou non, émanant des établissements d'enseignement et de recherche français ou étrangers, des laboratoires publics ou privés. +---PAGE_BREAK--- + +# Practical Quantum Computing: solving the wave equation using a quantum approach + +Adrien Suau,$^{1,2,*}$ Gabriel Staffelbach,$^1$ and Henri Calandra$^3$ + +$^1$CERFACS, 42 Avenue Gaspard Coriolis, 31057 Toulouse, France + +$^2$LIRMM, University of Montpellier, 161 rue Ada, 34095 Montpellier, France + +$^3$TOTAL SA, 2 Avenue de Vignancour, 64000 Pau, France + +(Dated: June 14, 2021) + +In the last years, several quantum algorithms that try to address the problem of partial differential equation solving have been devised. On one side, “direct” quantum algorithms that aim at encoding the solution of the PDE by executing one large quantum circuit. On the other side, variational algorithms that approximate the solution of the PDE by executing several small quantum circuits and making profit of classical optimisers. In this work we propose an experimental study of the costs (in terms of gate number and execution time on a idealised hardware created from realistic gate data) associated with one of the “direct” quantum algorithm: the wave equation solver devised in [PCS. Costa, S. Jordan, A. Ostrander, *Phys. Rev. A* **99**, 012323, 2019]. We show that our implementation of the quantum wave equation solver agrees with the theoretical big-O complexity of the algorithm. We also explain in great details the implementation steps and discuss some possibilities of improvements. Finally, our implementation proves experimentally that some PDE can be solved on a quantum computer, even if the direct quantum algorithm chosen will require error-corrected quantum chips, which are not believed to be available in the short-term. + +## I. INTRODUCTION + +Quantum computing has drawn a lot of attention in the last few years, following the successive announcements from several world-wide companies about the implementation of quantum hardware with an increasing number of qubits or reduced error rates [4, 8, 9, 12, 52]. + +Along with the hardware improvement, new quantum algorithms were discovered, yielding potential quantum speed-up and applications in various fields such as quantum chemistry [23], linear algebra [22, 38, 40, 41, 55, 66] or optimisation [35, 42, 43]. Recent works even show that differential equations may be solved by using a quantum computer [11, 16, 21, 25, 26, 37, 46, 51, 58, 62, 65]. But despite the large number of algorithms available, it is hard to find an actual implementation of a quantum differential equation solver, Hamiltonian simulation being the unique exception by solving the time-dependant Schrödinger equation. + +In this work, we present and analyse a quantum wave equation solver we implemented from scratch according to the algorithm depicted in [32]. During the solver implementation, we had to look for a Hamiltonian Simulation procedure. The implementations we found being too restricted, we decided to implement our own Hamiltonian Simulation procedure, which will also be analysed. + +To the best of our knowledge, this work is the first to analyse experimentally the characteristics of a quantum PDE solver. Such a study has already been performed on the HHL algorithm, in [54]. We checked that the practical implementation agrees with the theoretical asymptotic complexities on several quantities of interest such as the total gate count with respect to the number of discretisation points used or the precision, the number of qubits required versus the number of discretisation points used to approximate the solution or precision of the solution when compared to a classical finite-difference solver. Finally, we verified that the execution time of the generated quantum circuit on today's accessible quantum hardware was still following the theoretical asymptotic complexities devised for the total gate count. Quantum hardware data were extracted from IBM Q chips. + +We show experimentally that it is possible to solve the 1-dimensional wave equation on a quantum computer with a time-complexity that grows as $\mathcal{O}(N_d^{3/2} \log(N_d)^2)$ where $N_d$ is the number of discretisation points used to approximate the solution. But even if the asymptotic scaling is better than classical algorithms, we found out that the constants hidden in the big-O notation were huge enough to make the solver less efficient than classical solvers for reasonable discretisation sizes. + +* adrien.suau@cerfacs.fr +---PAGE_BREAK--- + +## II. PROBLEM CONSIDERED + +We consider a simplified version of the wave equation on the 1-dimensional line [0, 1] where the propagation speed $c$ is constant and equal to 1. This equation can be written as + +$$ \frac{\partial^2}{\partial t^2} \phi(x,t) = \frac{\partial^2}{\partial x^2} \phi(x,t). \qquad (1) $$ + +Moreover, we only consider solving eq. (1) with the Dirichlet boundary conditions + +$$ \frac{\partial}{\partial x}\phi(0,t) = \frac{\partial}{\partial x}\phi(1,t) = 0. \qquad (2) $$ + +No assumption is made on initial speed $\phi(x, 0)$ and initial velocity $\frac{\partial\phi}{\partial t}(x, 0)$. + +The resolution of this simplified wave equation on a quantum computer is an appealing problem for the first implementation of a PDE solver for several reasons. First, the wave equation is a well-known and intensively studied problem for which a lot of theoretical results have been verified. Secondly, even-though it is a relatively simple PDE, the wave equation can be used to solve some interesting problems such as seismic imaging [13, 14]. Finally, the theoretical implementation of a quantum wave equation solver has already been studied in [32]. + +In this paper, we present the complete implementation of a 1-dimensional wave equation solver using quantum technologies based on qat library. To the best of our knowledge, this work is the first to consider the implementation of an entire PDE solver that can run on a quantum computer. Specifically, we explain all the implementation details of the solver from the mathematical theory to the actual quantum circuit used. The characteristics of the solver are then discussed and analysed, such as the estimated gate count and estimated execution time on real quantum hardware. We show that the implementation follows the theoretical asymptotic behaviours devised in [32]. Moreover, the wave equation solver algorithm relies critically on an efficient implementation of a Hamiltonian simulation algorithm, which we have also implemented and analysed thoroughly. + +## III. IMPLEMENTATION + +The algorithm used to solve the wave equation is explained in [32] and uses a Hamiltonian simulation procedure. Costa et al. chose the Hamiltonian simulation algorithm described in [20] for its nearly optimal theoretical asymptotic behaviour. We privileged instead the Hamiltonian simulation procedure explained in [10, 17] for its good experimental results based on [27] and its simpler implementation (detailed in appendix A). + +The code has been written using qat, a Python library shipped with the Quantum Learning Machine (QLM), a package developed and maintained by Atos. It has not been extensively optimized yet, which means that there is still a large room for possible improvements. + +All the circuits used in this paper have been generated with a subset of qat's gate set: + +$$ \{H, X, R_y(\theta), P_h(\theta), CP_h(\theta), CNOT, CCNOT\} \qquad (3) $$ + +and have then been translated to the gate set + +$$ \{U_1(\lambda), U_2(\lambda, \phi), U_3(\lambda, \phi, \theta), CNOT\} \qquad (4) $$ + +for $U_1$, $U_2$ and $U_3$ defined in Equation (7) of [31] as follow: + +$$ U(\lambda, \phi, \theta) = \begin{pmatrix} \cos(\frac{\theta}{2}) & -e^{i\lambda} \sin(\frac{\theta}{2}) \\ e^{i\phi} \sin(\frac{\theta}{2}) & e^{i(\lambda+\phi)} \cos(\frac{\theta}{2}) \end{pmatrix} \qquad (5) $$ + +$$ U_3(\lambda, \phi, \theta) = U(\lambda, \phi, \theta) \qquad (6) $$ + +$$ U_2(\lambda, \phi) = U\left(\frac{\pi}{2}, \lambda, \phi\right) \qquad (7) $$ + +$$ U_1(\lambda) = U(0, 0, \lambda) \qquad (8) $$ +---PAGE_BREAK--- + +**Note 1.** The target gate set presented in eq. (4) does not correspond to the physical gate set implemented by IBM hardware (see Equation (8) of [31]). This choice is justified by the fact that IBM only provides hardware characteristics such as gate times for the gate set of eq. (4) and not for the real hardware gate set. + +This implementation aims at validating in practice the theoretical asymptotic complexities of Hamiltonian simulation algorithms and providing a proof-of-concept showing that it is possible to solve a partial differential equation on a quantum computer. + +## A. Sparse Hamiltonian simulation algorithm + +**Definition 1.** *s-sparse matrix:* A s-sparse matrix with $s \in \mathbb{N}^*$ is a matrix that has at most s non-zero entries per row and per column + +**Definition 2.** *sparse matrix:* A sparse matrix is a s-sparse matrix with $s \in O(\log(N))$, $N$ being the size of the matrix. + +In the past years, a lot of algorithms have been devised to simulate the effect of a Hamiltonian on a quantum state [17–20, 24, 29, 44, 47–50, 57]. Among all these algorithms, only few have already been implemented for specific cases [3, 7] but to the best of our knowledge no implementation is currently capable of simulating a generic sparse Hamiltonian. + +The domain of application of the already existing methods being too narrow, we decided to implement our own generic sparse Hamiltonian simulation procedure. We based our work on the product-formula approach described in [10, 17]. One advantage of this approach is that product-formula based algorithms have already been thoroughly analysed both theoretically [10, 17] and practically [27, 54], and several implementations are publicly available, though restricted to Hamiltonians that can be decomposed as a sum of tensor products of Pauli matrices. Moreover, [10] provides a lot of implementation details that allowed us to go straight to the development step. + +Our implementation is capable of simulating an arbitrary sparse Hamiltonian provided that it has already been decomposed into a sum of 1-sparse Hermitian matrices with either only real or only complex entries, each described by an oracle. The implementation has been validated with several automated tests and a more complex case involving the simulation of a 2-sparse Hamiltonian and described in section III B. Furthermore, it agrees perfectly with the theoretical complexities devised in [10, 17] as studied and verified in section IV. + +## B. Quantum wave equation solver + +Using the Hamiltonian simulation algorithm implementation, we successfully implemented a 1-dimensional wave equation solver using the algorithm described in [32] and explained in appendix B and appendix C. + +For the specific case considered (eq. (1) and eq. (2)), solving the wave equation for a time $T$ on a quantum computer boils down to simulating a 2-sparse Hamiltonian for a time $f(T)$, the function $f$ being thoroughly described in [32] and eq. (18). The constructed quantum circuit can then be applied to a quantum state representing the initial position $\psi(x, 0)$ and velocity $\frac{\partial\phi}{\partial t}(x, 0)$, and will evolve this state towards a quantum state representing the final position $\phi(x, T)$ and velocity $\frac{\partial\phi}{\partial t}(x, T)$. + +As for the Hamiltonian simulation procedure, the practical results we obtain from the implementation of the quantum wave equation solver seems to match the theoretical asymptotic complexities. See section IV for an analysis of the theoretical asymptotic complexities. + +# IV. RESULTS + +Using a simulator instead of a real quantum computer has several advantages. In terms of development process, a simulator allows the developer to perform several actions that are not possible as-is on a quantum processor such as describing a quantum gate with a unitary matrix instead of a sequence of hardware operations. Another useful operation that is possible on a quantum simulator and not currently achievable on a quantum processor is efficient generic state preparation. + +Our implementation uses only standard quantum gates and does not leverage any of the simulator-only features such as quantum gates implemented from a unitary matrix. In other words, both the Hamiltonian simulation procedure and the quantum wave equation solver are “fully quantum” and are readily executable on a quantum processor, provided that it has enough qubits. As a proof, and in order to benchmark our implementation, we translated the +---PAGE_BREAK--- + +generated quantum circuits to IBM Q Melbourne gate-set (see eq. (4)). IBM Q Melbourne [2] is a quantum chip with 14 usable qubits made available by IBM the 23th of September, 2018. + +**Note 2.** We chose IBM Q Melbourne mainly because, at the time of writing, it was the publicly accessible quantum chip with the larger number of qubits and so was deemed to be the closest to future quantum hardware. It is important to note that even if IBM Q Melbourne has 14 qubits, the quantum circuits constructed in this paper are not runnable because they require more qubits. Consequently, because of this hardware limitation, hardware topology has also been left apart of the study. + +This allowed us to have an estimation of the number of hardware gates needed to either solve the wave equation or simulate a specific Hamiltonian on this specific hardware. Combining these numbers and the hardware gate execution time published in [6], we were able to compute a rough approximation of the time needed to solve the considered problem presented in eq. (1) and eq. (2) on this specific hardware. + +A. Hamiltonian simulation + +As explained in section III A, the Hamiltonian simulation algorithm implemented has been first devised in [10, 17]. A quick review of the algorithm along with implementation details can be found in appendix A. This Hamiltonian simulation procedure requires that the Hamiltonian matrix $H$ to simulate can be decomposed as + +$$H = \sum_{j=1}^{m} H_j \quad (9)$$ + +where each $H_j$ is an efficiently simulable Hermitian matrix. + +In our benchmark, we simulated the Hamiltonian described in eq. (B11). According to [10], real 1-sparse Hermitian matrices with only 1 or 0 entries can be simulated with $O(n)$ gates and 2 calls to the oracle, *n* being the number of qubits the Hamiltonian *H* acts on. The exact gate count can be found in table I in the row 1-*sparse HS*. + +Let $O_i$ be the gate complexity of the oracle implementing the $i$-th Hermitian matrix $H_i$ of the decomposition in eq. (9), we end up with an asymptotic complexity of $O(n+O_i)$ to simulate $H_i$. Once again, the exact gate count is decomposed in table I. + +Applying the Trotter-Suzuki product-formula of order *k* (see Definition definition 4 in appendix A5 for the definition of the Trotter-Suzuki product-formula) on the quantum circuit simulating the Hermitian matrices produces a circuit of size + +$$\mathcal{O}\left(5^k \sum_{i=1}^{m} (n + O_i)\right). \qquad (10)$$ + +This circuit should finally be repeated *r* times in order to achieve an error of at most $\epsilon$, with + +$$r \in \mathcal{O}\left(5^k m \tau \left(\frac{m\tau}{\epsilon}\right)^{\frac{1}{2k}}\right), \quad (11)$$ + +and $\tau = t \max_i ||H_i||$, *t* being the time for which we want to simulate the given Hamiltonian and $|| \cdot ||$ being the spectral norm [17]. + +Merging eq. (10) and eq. (11) gives us the complexity + +$$\mathcal{O}\left(5^{2k} m \tau \left(\frac{m\tau}{\epsilon}\right)^{\frac{1}{2k}} \sum_{i=1}^{m} (n + O_i)\right). \quad (12)$$ + +This generic expression of the asymptotic complexity can be specialized to our benchmark case. The number of gates needed to implement the oracles is $O(n^2)$ and the chosen decomposition contains $m = 2$ Hermitian matrices, each with a spectral norm of 1. Replacing the symbols in eq. (10) and eq. (11) results in the asymptotic gate complexity of + +$$\mathcal{O}(5^k n^2) \quad (13)$$ +---PAGE_BREAK--- + +for the circuit simulating $e^{-iHt/r}$ and a number + +$$r \in \mathcal{O} \left( 5^k t \left( \frac{t}{\epsilon} \right)^{\frac{1}{2k}} \right) \qquad (14)$$ + +of repetitions, which lead to a total gate complexity of + +$$\mathcal{O} \left( 5^{2k} n^2 t \left( \frac{t}{\epsilon} \right)^{\frac{1}{2k}} \right). \qquad (15)$$ + +In order to check that our implementation follows this theoretical asymptotic behaviour, we chose to let $k=1$ and plotted the number of gates generated versus the three parameters that have an impact on the number of gates: the number of discretisation points $N_d$ (fig. 1(a)), the time of simulation $t$ (fig. 1(b)) and the precision $\epsilon$ (fig. 1(c)). The corresponding asymptotic complexity should be + +$$\mathcal{O}\left(n^2 \frac{t^{3/2}}{\sqrt{\epsilon}}\right) = \mathcal{O}\left(\log_2(N_d)^2 \frac{t^{3/2}}{\sqrt{\epsilon}}\right). \qquad (16)$$ + +A small discrepancy can be observed in fig. 1(a): the theoretical asymptotic number of gates is $\mathcal{O}(\log_2(N)^2)$ but the experimental values seem better fitted with an asymptotic behaviour of $\mathcal{O}(\log_2(N)^{7/4})$. This may be caused by the asymptotic regime not being reached yet. + +## B. Wave equation solver + +The first characteristic of the wave equation solver that needs to be checked is its validity: is the quantum wave equation solver capable of solving accurately the wave equation as described in eq. (1) and eq. (2)? + +To check the validity of the solver, we used `qat` simulators and Atos QLM to simulate the quantum program generated to solve the wave equation with different values for the number of discretisation points $N_d$, for the physical time $t$ and for the precision $\epsilon$. fig. 3 shows the classical solution versus the quantum solution and the absolute error between the two solutions for $N_d = 32$, $t = 0.4$ and $\epsilon = 10^{-3}$. The solution obtained by the quantum solver is nearly exactly the same as the classical solution obtained with finite differences. The error between the two solutions is of the order of $10^{-7}$, which is 4 orders of magnitudes smaller than the error we asked for. + +Once the validity of our solver has been checked on multiple test cases, the next interesting property we would like to verify is the asymptotic cost: does the implemented simulator seem to agree with the theoretical asymptotic complexities derived from [32] and [17]? + +In our specific case, the Hamiltonian $H$ to simulate can be decomposed in two 1-sparse Hermitian matrices, both of them having a spectral norm of 1. The exact decomposition can be found in appendix B 3. We chose to let the product-formula order be equal to $k=1$ and reuse the asymptotic complexity found in eq. (15) by changing the time of simulation $t$ by the time $f(t)$: + +$$\mathcal{O} \left( 5^{2k} n^2 f(t) \left( \frac{f(t)}{\epsilon} \right)^{\frac{1}{2k}} \right). \qquad (17)$$ + +Following the study performed in [32], + +$$f(t) = \frac{t}{\delta x} = t(N_d - 1) \qquad (18)$$ + +where $\delta x$ is the distance between two discretisation points. Moreover, it is possible to prove (see appendix B 3) that + +$$n = \lfloor \log_2(2N_d - 1) \rfloor \qquad (19)$$ + +Replacing $f(t)$ and $n$ in eq. (10) and eq. (11) gives us a gate complexity of + +$$\mathcal{O}\left(5^k \log_2 (N_d)^2\right) \qquad (20)$$ +---PAGE_BREAK--- + +FIG. 1. Number of quantum gates needed to simulate the Hamiltonian described in appendix B using the oracles implemented following appendix C. Graphs generated with a Trotter-Suzuki product-formula order $k = 1, 32$ discretisation points (i.e. $n = 6$ qubits) for fig. 1(b) and fig. 1(c), a physical time $t = 1$ for fig. 1(a) and fig. 1(c) and a precision $\epsilon = 10^{-5}$ for fig. 1(a) and fig. 1(b). + +to construct a circuit simulating $e^{-iHt/r}$ and a number of repetitions + +$$r \in \mathcal{O} \left( 5^k t N_d \left( \frac{t N_d}{\epsilon} \right)^{\frac{1}{2k}} \right). \qquad (21)$$ + +Merging the two expression results in a gate complexity of + +$$\mathcal{O} \left( 5^{2k} t N_d \log_2 (N_d)^2 \left( \frac{t N_d}{\epsilon} \right)^{\frac{1}{2k}} \right). \qquad (22)$$ + +Choosing the Totter-Suzuki formula order $k = 1$ gives us a final complexity of + +$$\mathcal{O}\left(N_d^{3/2} \log_2(N_d)^2 \frac{t^{3/2}}{\sqrt{\epsilon}}\right) \qquad (23)$$ + +to solve the wave equation presented in eq. (1). This theoretical result is verified experimentally in fig. 4(a). +---PAGE_BREAK--- + +FIG. 2. Plot of the number of logical qubits needed to run the wave equation solver for a time $t = 1$, a precision $\epsilon = 10^{-5}$ and a Trotter-Suzuki product-formula of order $k = 1$. The constants values 11 and 3 have been chosen arbitrarily to fit the experimental data. The number of physical qubits needed will depend on their error rate as noted in [36]. Multiplying the number of logical qubits by 3 to 4 orders of magnitude might be a good estimate of the actual number of physical qubits required. + +FIG. 3. Comparison of the classical solver and the quantum solver. Both solvers solved the 1-D wave equation with $N_d = 32$ discretisation points and a physical time of $t = 0.4$. The classical solver uses finite-differences with a very small time-step in order to avoid as much as possible errors due to time-discretisation. The quantum solver was instructed to solve the wave equation with a precision of at least $\epsilon = 10^{-3}$, used a Trotter-Suzuki order of $k = 1$. The solutions of the two solvers are too close to be able to notice a difference (they overlap on the graph), that is why a second graph plotting the absolute error between the two solvers is included. + +V. DISCUSSION + +In this work, we focus on the practical cost of implementing a 1-dimensional quantum wave equation solver on a quantum computer. We show that a quantum computer is able to solve partial differential equations by constructing and simulating the quantum circuits described. We also study the scaling of the solver with respect to several parameters of interest and show that the theoretical asymptotic bounds are mostly verified. + +In future works, one can study the possibilities of circuit optimisation. It would also be interesting to implement Neumann boundary conditions instead of Dirichlet ones. A practical implementation including a non-constant propagation speed $c$ has also been realised during the writing of this paper. The results were encouraging but were not judged mature enough to include them in the paper. Finally, future works might want to extend the wave equation solver to 2 dimensions or more. +---PAGE_BREAK--- + +FIG. 4. Graphs generated with a Trotter-Suzuki product-formula order $k = 1$, a physical time $t = 1$ and a precision $\epsilon = 10^{-5}$. + +ACKNOWLEDGMENTS + +The authors would like to thank Reims University, the ROMEO HPC center, Total, the CCRT and Atos for their support by giving us access to Atos quantum simulator. + +SUPPLEMENTARY MATERIAL + +The implementation of the quantum wave equation solver is available at https://gitlab.com/cerfacs/qaths. +The qprof tool is available at https://gitlab.com/qcomputing/qprof/qprof. + +[1] 2015. Constructing Large Controlled Nots. https://algassert.com/circuits/2015/06/05/Constructing-Large-Controlled-Nots.html. (2015). Accessed: 2020-03-27. + +[2] 2019. 14-qubit backend: IBM Q team, "IBM Q 16 Melbourne backend specifications V1.3.0" (2019). (2019). Retrieved from https://quantum-computing.ibm.com. + +[3] 2019. Hamiltonian simulation implementation in qiskit-aqua. https://github.com/Qiskit/qiskit-aqua/blob/master/qiskit/aqua/operators/weighted_pauli_operator.py#L837. (2019). Accessed: 2020-03-27. + +[4] 2019. IBM Quantum Computing. https://www.ibm.com/quantum-computing/. (2019). Accessed: 2020-03-27. + +[5] 2019. Melbourne gate specification. https://github.com/Qiskit/ibmq-device-information/tree/master/backends/melbourne/V1#gate-specification. (2019). Accessed: 2020-03-27. + +[6] 2019. Melbourne hardware operation execution time. https://github.com/Qiskit/ibmq-device-information/blob/master/backends/melbourne/V1/version_log.md#gate-specification. (2019). Accessed: 2020-03-27. + +[7] 2019. Quantum algorithms for the simulation of Hamiltonian dynamics. https://github.com/njross/simcount. (2019). Accessed: 2020-03-27. + +[8] 2019. Quantum computing — Intel Newsroom. https://newsroom.intel.com/press-kits/quantum-computing/. (2019). Accessed: 2020-03-27. + +[9] 2019. Quantum Supremacy Using a Programmable Superconducting Processor. https://ai.googleblog.com/2019/10/quantum-supremacy-using-programmable.html. (2019). Accessed: 2020-03-27. + +[10] Graeme Robert Ahokas. 2004. *Improved Algorithms for Approximate Quantum Fourier Transforms and Sparse Hamiltonian Simulations*. Master's thesis. University of Calgary. https://doi.org/10.11575/PRISM/22839 + +[11] Juan Miguel Arrazola, Timjan Kalajdzievski, Christian Weedbrook, and Seth Lloyd. 2018. Quantum algorithm for non-homogeneous linear partial differential equations. (09 2018). arXiv:1809.02622v1 http://arxiv.org/abs/1809.02622v1 + +[12] Frank Arute, Kunal Arya, Ryan Babbush, Dave Bacon, Joseph C. Bardin, Rami Barends, Rupak Biswas, Sergio Boixo, Fernando G. S. L. Brandao, David A. Buell, Brian Burkett, Yu Chen, Zijun Chen, Ben Chiaro, Roberto Collins, William Courtney, Andrew Dunsworth, Edward Farhi, Brooks Foxen, Austin Fowler, Craig Gidney, Marissa Giustina, Rob Graff, +---PAGE_BREAK--- + +Keith Guerin, Steve Habegger, Matthew P. Harrigan, Michael J. Hartmann, Alan Ho, Markus Hoffmann, Trent Huang, Travis S. Humble, Sergei V. Isakov, Evan Jeffrey, Zhang Jiang, Dvir Kafri, Kostyantyn Kechedzhi, Julian Kelly, Paul V. Klimov, Sergey Knyshev, Alexander Korotkov, Fedor Kozitsa, David Landhuis, Mike Lindmark, Erik Lucero, Dmitry Lyakh, Salvatore Mandrà, Jarrod R. McClean, Matthew McEwen, Anthony Megrant, Xiao Mi, Kristel Michielsen, Masoud Mohseni, Josh Mutus, Ofer Naaman, Matthew Neeley, Charles Neill, Murphy Yuezhen Niu, Eric Ostby, Andre Petukhov, John C. Platt, Chris Quintana, Eleanor G. Rieffel, Pedram Roushan, Nicholas C. Rubin, Daniel Sank, Kevin J. Satzinger, Vadim Smelyanskiy, Kevin J. Sung, Matthew D. Trevithick, Amit Vainsencher, Benjamin Villalonga, Theodore White, Z. Jamie Yao, Ping Yeh, Adam Zalcman, Hartmut Neven, and John M. Martinis. 2019. Quantum supremacy using a programmable superconducting processor. *Nature* 574 (10 2019), 505–510. Issue 7779. https://doi.org/10.1038/s41586-019-1666-5 + +[13] Alain Bamberger, Guy Chavent, and Patrick Lailly. 1977. Une application de la théorie du contrôle à un problème inverse de sismique. *Ann. Geophys* 33, 1 (1977), 2. + +[14] Alain Bamberger, Guy Chavent, and Patrick Lailly. 1979. About the stability of the inverse problem in 1-D wave equations – application to the interpretation of seismic profiles. *Applied Mathematics & Optimization* 5 (3 1979), 1–47. Issue 1. https://doi.org/10.1007/BF01442542 + +[15] Adriano Barenco, Artur Ekert, Kalle-Antti Suominen, and Päivi Törmä. 1996. Approximate Quantum Fourier Transform and Decoherence. (Jan 1996). https://doi.org/10.1103/PhysRevA.54.139 arXiv:quant-ph/9601018v1 + +[16] Dominic W. Berry. 2010. High-order quantum algorithm for solving linear differential equations. (10 2010). https://doi.org/10.1088/1751-8113/47/10/105301 arXiv:1010.2745v2 J. Phys. A: Math. Theor. 47, 105301 (2014). + +[17] Dominic W. Berry, Graeme Ahokas, Richard Cleve, and Barry C. Sanders. 2007. Efficient Quantum Algorithms for Simulating Sparse Hamiltonians. *Communications in Mathematical Physics* 270 (1 2007), 359–371. Issue 2. https://doi.org/10.1007/s00220-006-0150-x arXiv:quant-ph/0508139v2 Communications in Mathematical Physics 270, 359 (2007). + +[18] Dominic W. Berry and Andrew M. Childs. 2012. Black-box Hamiltonian Simulation and Unitary Implementation. *Quantum Info. Comput.* 12, 1-2 (01 2012), 29–62. https://doi.org/10.26421/QIC12.1-2 arXiv:0910.4157v4 Quantum Information and Computation 12, 29 (2012). + +[19] Dominic W. Berry, Andrew M. Childs, Richard Cleve, Robin Kothari, and Rolando D. Somma. 2015. Simulating Hamiltonian Dynamics with a Truncated Taylor Series. *Physical Review Letters* 114 (3 2015). Issue 9. https://doi.org/10.1103/PhysRevLettt.114.090502 arXiv:1412.4687v1 Phys. Rev. Lett. 114, 090502 (2015). + +[20] Dominic W. Berry, Andrew M. Childs, and Robin Kothari. 2015. Hamiltonian Simulation with Nearly Optimal Dependence on all Parameters. In *2015 IEEE 56th Annual Symposium on Foundations of Computer Science*. 792–809. https://doi.org/10.1109/FOCS.2015.54 arXiv:1501.01715v3 Proceedings of the 56th IEEE Symposium on Foundations of Computer Science (FOCS 2015), pp. 792–809 (2015). + +[21] Dominic W. Berry, Andrew M. Childs, Aaron Ostrander, and Guoming Wang. 2017. Quantum Algorithm for Linear Differential Equations with Exponentially Improved Dependence on Precision. *Communications in Mathematical Physics* 356 (12 2017), 1057–1081. Issue 3. https://doi.org/10.1007/s00220-017-3002-y arXiv:1701.03684v2 Communications in Mathematical Physics 356, 1057–1081 (2017). + +[22] Carlos Bravo-Prieto, Ryan LaRose, M. Cerezo, Yigit Subasi, Lukasz Cincio, and Patrick J. Coles. 2019. Variational Quantum Linear Solver: A Hybrid Algorithm for Linear Systems. (09 2019). arXiv:1909.05820v1 http://arxiv.org/abs/1909.05820v1 + +[23] Yudong Cao, Jonathan Romero, Jonathan P. Olson, Matthias Degroote, Peter D. Johnson, Mária Kieferová, Ian D. Kivlichan, Tim Menke, Borja Peropadre, Nicolas P. D. Sawaya, Sukin Sim, Libor Veis, and Alán Aspuru-Guzik. 2018. Quantum Chemistry in the Age of Quantum Computing. (12 2018). arXiv:1812.09976v2 http://arxiv.org/abs/1812.09976v2 + +[24] Andrew M. Childs and Robin Kothari. 2011. Simulating Sparse Hamiltonians with Star Decompositions. In *Theory of Quantum Computation, Communication, and Cryptography*. Springer Berlin Heidelberg, 94–103. https://doi.org/10.1007/978-3-642-18073-6_8 arXiv:1003.3683v2 Theory of Quantum Computation, Communication, and Cryptography (TQC 2010), Lecture Notes in Computer Science 6519, pp. 94–103 (2011). + +[25] Andrew M. Childs and Jin-Peng Liu. 2019. Quantum spectral methods for differential equations. (01 2019). arXiv:1901.00961v1 http://arxiv.org/abs/1901.00961v1 + +[26] Andrew M. Childs, Jin-Peng Liu, and Aaron Ostrander. 2020. High-precision quantum algorithms for partial differential equations. (Feb 2020). arXiv:2002.07868v1 http://arxiv.org/abs/2002.07868v1 + +[27] Andrew M. Childs, Dmitri Maslov, Yunseong Nam, Neil J. Ross, and Yuan Su. 2018. Toward the first quantum simulation with quantum speedup. *Proceedings of the National Academy of Sciences* 115 (09 2018), 9456–9461. Issue 38. https://doi.org/10.1073/pnas.1801723115 arXiv:1711.10980v1 Proceedings of the National Academy of Sciences 115, 9456–9461 (2018). + +[28] Andrew M. Childs, Yuan Su, Minh C. Tran, Nathan Wiebe, and Shuchen Zhu. 2019. A Theory of Trotter Error. (Dec 2019). arXiv:1912.08854v1 http://arxiv.org/abs/1912.08854v1 + +[29] Andrew M. Childs and Nathan Wiebe. 2012. Hamiltonian Simulation Using Linear Combinations of Unitary Operations. (02 2012). https://doi.org/10.26421/QIC12.11-12 arXiv:1202.5822v1 Quantum Information and Computation 12, 901–924 (2012). + +[30] Richard Cleve and John Watrous. 2000. Fast parallel circuits for the quantum Fourier transform. (06 2000). arXiv:quant-ph/0006004v1 http://arxiv.org/abs/quant-ph/0006004v1 +---PAGE_BREAK--- + +[31] Patrick J. Coles, Stephan Eidenbenz, Scott Pakin, Adetokunbo Adedoyin, John Ambrosiano, Petr Anisimov, William Casper, Gopinath Chennupati, Carleton Coffrin, Hristo Djidjev, David Gunter, Satish Karra, Nathan Lemons, Shizeng Lin, Andrey Lokhov, Alexander Malyzhenkov, David Mascarenas, Susan Mniszewski, Balu Nadiga, Dan O'Malley, Diane Oyen, Lakshman Prasad, Randy Roberts, Phil Romero, Nandakishore Santhi, Nikolai Sinitsyn, Pieter Swart, Marc Vuffray, Jim Wendelberger, Boram Yoon, Richard Zamora, and Wei Zhu. 2018. Quantum Algorithm Implementations for Beginners. (04 2018). arXiv:1804.03719v1 http://arxiv.org/abs/1804.03719v1 + +[32] Pedro C. S. Costa, Stephen Jordan, and Aaron Ostrander. 2019. Quantum algorithm for simulating the wave equation. *Physical Review A* **99** (1 2019). Issue 1. https://doi.org/10.1103/PhysRevA.99.012323 arXiv:1711.05394v1 Phys. Rev. A **99**, 012323 (2019). + +[33] Steven A. Cuccaro, Thomas G. Draper, Samuel A. Kutin, and David Petrie Moulton. 2004. A new quantum ripple-carry addition circuit. (10 2004). arXiv:quant-ph/0410184v1 http://arxiv.org/abs/quant-ph/0410184v1 + +[34] Thomas G. Draper. 2000. Addition on a Quantum Computer. (08 2000). arXiv:quant-ph/0008033v1 http://arxiv.org/abs/quant-ph/0008033v1 + +[35] Edward Farhi, Jeffrey Goldstone, and Sam Gutmann. 2014. A Quantum Approximate Optimization Algorithm. (Nov 2014). arXiv:1411.4028v1 http://arxiv.org/abs/1411.4028v1 + +[36] Austin G. Fowler, Matteo Mariantoni, John M. Martinis, and Andrew N. Cleland. 2012. Surface codes: Towards practical large-scale quantum computation. (2012). https://doi.org/10.1103/PhysRevA.86.032324 arXiv:quant-ph/1208.0928v2 + +[37] Juan José García-Ripoll. 2019. Quantum-inspired algorithms for multivariate analysis: from interpolation to partial differential equations. (09 2019). arXiv:1909.06619v1 http://arxiv.org/abs/1909.06619v1 + +[38] András Gilyén, Yuan Su, Guang Hao Low, and Nathan Wiebe. 2018. Quantum singular value transformation and beyond: exponential improvements for quantum matrix arithmetics. (06 2018). arXiv:1806.01838v1 http://arxiv.org/abs/1806.01838v1 + +[39] Thomas Häner, Martin Roetteler, and Krysta M. Svore. 2016. Factoring using 2n+2 qubits with Toffoli based modular multiplication. (11 2016). arXiv:1611.07995v2 http://arxiv.org/abs/1611.07995v2 Quantum Information and Computation, Vol. 17, No. 7 & 8 (2017). + +[40] Aram W. Harrow, Avinatan Hassidim, and Seth Lloyd. 2009. Quantum Algorithm for Linear Systems of Equations. *Physical Review Letters* **103** (10 2009). Issue 15. https://doi.org/10.1103/PhysRevLett.103.150502 arXiv:0811.3171v3 Phys. Rev. Lett. vol. 15, no. 103, pp. 150502 (2009). + +[41] Hsin-Yuan Huang, Kishor Bharti, and Patrick Rebentrost. 2019. Near-term quantum algorithms for linear systems of equations. (Sep 2019). arXiv:1909.07344v2 http://arxiv.org/abs/1909.07344v2 + +[42] Iordanis Kerenidis and Anupam Prakash. 2017. Quantum gradient descent for linear systems and least squares. (04 2017). arXiv:1704.04992v3 http://arxiv.org/abs/1704.04992v3 + +[43] Iordanis Kerenidis and Anupam Prakash. 2018. A Quantum Interior Point Method for LPs and SDPs. (08 2018). arXiv:1808.09266v1 http://arxiv.org/abs/1808.09266v1 + +[44] Maria Kieferova, Artur Scherer, and Dominic Berry. 2018. Simulating the dynamics of time-dependent Hamiltonians with a truncated Dyson series. (05 2018). arXiv:1805.00582v1 http://arxiv.org/abs/1805.00582v1 Only eprint on arXiv. + +[45] Taewan Kim and Byung-Soo Choi. 2018. Efficient decomposition methods for controlled-Rnusing a single ancillary qubit. *Scientific Reports* **8**, 1 (03 Apr 2018), 5445. https://doi.org/10.1038/s41598-018-23764-x + +[46] Sarah K. Leyton and Tobias J. Osborne. 2008. A quantum algorithm to solve nonlinear differential equations. (12 2008). arXiv:0812.4423v1 http://arxiv.org/abs/0812.4423v1 + +[47] Guang Hao Low. 2018. Hamiltonian simulation with nearly optimal dependence on spectral norm. (07 2018). arXiv:1807.03967v1 http://arxiv.org/abs/1807.03967v1 + +[48] Guang Hao Low and Isaac L. Chuang. 2016. Hamiltonian Simulation by Qubitization. (10 2016). arXiv:1610.06546v2 http://arxiv.org/abs/1610.06546v2 Only available as eprint, no journal publication. + +[49] Guang Hao Low and Isaac L. Chuang. 2017. Hamiltonian Simulation by Uniform Spectral Amplification. (07 2017). arXiv:1707.05391v1 http://arxiv.org/abs/1707.05391v1 Only available as eprint. No journal publication. + +[50] Guang Hao Low and Isaac L. Chuang. 2017. Optimal Hamiltonian Simulation by Quantum Signal Processing. *Physical Review Letters* **118** (1 2017). Issue 1. https://doi.org/10.1103/PhysRevLett.118.010501 arXiv:1606.02685v2 Phys. Rev. Lett. **118**, 010501 (2017). + +[51] Michael Lubasch, Jaewoo Joo, Pierre Moinier, Martin Kiffner, and Dieter Jaksch. 2019. Variational Quantum Algorithms for Nonlinear Problems. (Jul 2019). arXiv:1907.09032v2 http://arxiv.org/abs/1907.09032v2 + +[52] Juan M. Pino, Joan M. Dreiling, Caroline Figgatt, John P. Gaebler, Steven A. Moses, Charles H. Baldwin, Michael Foss-Feig, David Hayes, K. Mayer, Ciarán Ryan-Anderson, and Brian Neyenhuis. 2020. Demonstration of the QCCD trapped-ion quantum computer architecture. (Mar 2020). arXiv:2003.01293v2 http://arxiv.org/abs/2003.01293v1 + +[53] Neil J Ross and Peter Selinger. 2014. Optimal ancilla-free Clifford+ T approximation of z-rotations. *arXiv preprint* arXiv:1403.2975 (2014). + +[54] Artur Scherer, Benoît Valiron, Siun-Chuon Mau, Scott Alexander, Eric van den Berg, and Thomas E. Chapuran. 2017. Concrete resource analysis of the quantum linear-system algorithm used to compute the electromagnetic scattering cross section of a 2D target. *Quantum Information Processing* **16** (3 2017). Issue 3. https://doi.org/10.1007/s11128-016-1495-5 arXiv:1505.06552v2 Quantum Inf Process (2017) 6: 60. + +[55] Changpeng Shao and Hua Xiang. 2020. Row and column iteration methods to solve linear systems on a quantum computer. +*Phys. Rev.* A **101** (Feb 2020), 022322. +Issue 2. +https://doi.org/10.1103/PhysRevA.101.022322 + +[56] Vivek V. Shende and Igor L. Markov. +On the CNOT-cost of TOFFOLI gates. +(2008). +arXiv:quant-ph/0803.2316 +---PAGE_BREAK--- + +[57] David Shmoys, Dominic W. Berry, Andrew M. Childs, Richard Cleve, Robin Kothari, and Rolando D. Somma. 2014. Exponential improvement in precision for simulating sparse Hamiltonians. In Proceedings of the 46th Annual ACM Symposium on Theory of Computing - STOC 14. 283-292. https://doi.org/10.1145/2591796.2591854 arXiv:1312.1414v2 + +Proceedings of the 46th ACM Symposium on Theory of Computing (STOC 2014), pp. 283-292 (2014). + +[58] Siddhartha Srivastava and Veera Sundararaghavan. 2018. Box algorithm for the solution of differential equations on a quantum annealer. (12 2018). arXiv:1812.10572v2 http://arxiv.org/abs/1812.10572v2 + +[59] Masuo Suzuki. 1986. Quantum statistical monte carlo methods and applications to spin systems. Journal of Statistical Physics 43 (6 1986), 883-909. Issue 5-6. https://doi.org/10.1007/BF02628318 + +[60] Masuo Suzuki. 1990. Fractal decomposition of exponential operators with applications to many-body theories and Monte Carlo simulations. Physics Letters A 146 (6 1990), 319-323. Issue 6. https://doi.org/10.1016/0375-9601(90)90962-N + +[61] Himanshu Thapliyal and Nagarajan Ranganathan. 2017. Design of Efficient Reversible Logic Based Binary and BCD Adder Circuits. (12 2017). https://doi.org/10.1145/2491682 arXiv:1712.02630v1 J. Emerg. Technol. Comput. Syst. 9 (2013) 17:1-17:31. + +[62] Blaga N. Todorova and René Steijl. 2020. Quantum algorithm for the collisionless Boltzmann equation. J. Comput. Phys. 409 (5 2020), 109347. https://doi.org/10.1016/j.jcp.2020.109347 + +[63] Almudena Carrera Vazquez. 2018. *Quantum Algorithm for Solving Tri-Diagonal Linear Systems of Equations*. Master's thesis. ETH Zürich. + +[64] Vlatko Vedral, Adriano Barenco, and Artur Ekert. 1996. Quantum networks for elementary arithmetic operations. Physical Review A 54, 1 (Jul 1996), 147-153. https://doi.org/10.1103/physreva.54.147 + +[65] Tao Xin, Shijie Wei, Jianlian Cui, Junxiang Xiao, Iñigo Arrazola, Lucas Lamata, Xiangyu Kong, Dawei Lu, Enrique Solano, and Guilu Long. 2018. A Quantum Algorithm for Solving Linear Differential Equations: Theory and Experiment. (07 2018). arXiv:1807.04553v1 http://arxiv.org/abs/1807.04553v1 + +[66] Xiaosi Xu, Jinzhao Sun, Suguru Endo, Ying Li, Simon C. Benjamin, and Xiao Yuan. 2019. Variational algorithms for linear algebra. (09 2019). arXiv:1909.03898v1 http://arxiv.org/abs/1909.03898v1 + +# Appendix A: Product-formula implementation details + +## 1. Hamiltonian simulation + +Hamiltonian simulation is the problem of constructing a quantum circuit that will evolve a quantum state according to a Hamiltonian matrix, following the Schrödinger equation. In other words, Hamiltonian simulation algorithms generate a quantum circuit performing the unitary transformation $U$ such that $||U - e^{-iHt}|| < \epsilon$, $H$ being a given Hamiltonian matrix, $t$ a time of evolution and $\epsilon$ a precision with respect to $||\cdot||$, the spectral norm. + +Several quantum algorithms have been developed in the last few years to solve the problem of s-sparse Hamiltonian simulation [17–20, 24, 29, 44, 47–50, 57]. Among these algorithms we decided to implement the product-formula approach [10, 17], for the reasons presented in section III A. + +The product formula algorithm has three main steps: decompose, simulate, recompose. It works by first decomposing the s-sparse Hamiltonian matrix $H$ that should be simulated as a sum of Hermitian matrices $H_j$ that are considered easy to simulate + +$$ H = \sum_{j=0}^{m-1} H_j. \qquad (A1) $$ + +The second step is then to simulate each $H_j$ separately, i.e. to create quantum circuits implementing $e^{-iH_j t}$ for all the $H_j$ in the decomposition in eq. (A1). The last step uses the simulations computed in step two to approximate $e^{-iHt}$. + +The very first questions that should be answered before starting any implementation of the product-formula algorithm are “What is an easy to simulate matrix?” and “What kind of Hermitian matrices are easy to simulate?”. + +## 2. Easy to simulate matrices + +One of the most desirable properties for an “easy to simulate” matrix is the possibility to simulate it exactly, i.e. to construct a quantum circuit that will perfectly implement $e^{-iHt}$. This property becomes a requirement when one wants rigorous bounds on the error of the final simulation. Another enviable property of these matrices is that they can be simulated with a low gate number and only a few calls to the matrix oracle. + +**Definition 3** (Easy to simulate matrix). A Hermitian matrix $H$ can be qualified as “easy to simulate” if there exist an algorithm that takes as input a time $t$ and the matrix $H$ and outputs a quantum circuit $C(H)_t$ such that +---PAGE_BREAK--- + +1. The quantum circuit $C(H)_t$ implements exactly the unitary transformation $e^{-iHt}$, i.e. + +$$||e^{-iHt} - C(H)_t|| = 0.$$ + +2. The algorithm only needs $\mathcal{O}(1)$ calls to the oracle of $H$ and $\mathcal{O}(\log N)$ additional gates, $N$ being the dimension of the matrix $H$. + +With this definition of an “easy to simulate” matrix, we can now search for matrices or group of matrices that satisfy this definition. + +### a. Multiples of the identity + +The first and easiest matrices that fulfil the easy to simulate matrix requirements are the multiples of the identity matrix $\{\alpha I, \alpha \in \mathbb{R}\}$ with $I$ the identity matrix. The quantum circuit to simulate this class of matrices can be found in [63]. + +### b. 1-sparse Hermitian matrices + +A larger class of matrices that can be efficiently and exactly simulated are the 1-sparse, integer weighted, Hermitian matrices. Quantum circuits simulating exactly 1-sparse matrices with integer weights can be found in [10]. + +**Note 3.** Procedures simulating 1-sparse matrices with real (non-integers) weights are also described in the paper, but these matrices do not fall in the “easy to simulate” category because the procedures explained are exact only if all the matrix weights can be represented exactly with a fixed-point representation, which is not always verified. + +**Note 4.** Multiples of identity matrices presented in appendix A 2a are a special case of 1-sparse matrices. The two classes have been separated because more efficient quantum circuits exist for $\alpha I$ matrices. + +## 3. Decomposition of H + +Once the set of “easy to simulate” matrices has been established, the next step of the algorithm is to decompose the s-sparse matrix $H$ as a sum of matrices in this set. + +There are two possible ways of performing this decomposition, each one with its advantages and drawbacks: applying a procedure computing the decomposition automatically, or decompose the matrix $H$ beforehand and provide the decomposition to the algorithm. + +The first solution, which is to automatically construct the oracles of the $H_j$ matrices from the oracle of the $H$ matrix has been studied in [10] and [24]. Thanks to this automatic decomposition procedure, we only need to implement one oracle. This simplicity comes at the cost of a higher gate count: each call to the automatically constructed oracles of the matrices $H_j$ will require several calls to the oracle of $H$ along with additional gates. + +On the other hand, the second solution offers more control at the cost of less abstraction and more work. The decomposition of $H$ is not automatically computed and should be performed beforehand. Once the matrix $H$ has been decomposed as in eq. (9), the oracles for the matrices $H_j$ should be implemented. This means that we should now implement $m$ oracles instead of only 1 for the first solution. The main advantage of this method over the one using automatic-decomposition is that it gives us more control, a control that can be used to optimize even more the decomposition of eq. (A1) (less $H_j$ in the decomposition, $H_j$ matrices that can be simulated more efficiently, ...). + +All the advantages and drawbacks weighted, we chose to implement the second option for several reasons. First, the implementation of the automatic decomposition procedure adds a non-negligible implementation complexity to the whole Hamiltonian simulation procedure. Moreover, the automatic decomposition procedure can be implemented afterwards and plugged effortlessly to the non-automatic implementation. Finally, our use-case only required to simulate a 2-sparse Hamiltonian that can be decomposed as the sum of two 1-sparse, easy to simulate, Hermitian matrices, which makes the manual decomposition step manageable. + +## 4. Simulation of the $H_j$ + +Once the matrix $H$ has been decomposed following eq. (A1) with each $H_j$ being an “easy to simulate” matrix, the simulation of $H_j$ becomes a straightforward application of the procedures described in appendix A 2. + +After this step, we have access to quantum circuits implementing $e^{-iH_j t}$ for $j \in [0, m-1]$ and $t \in \mathbb{R}$. +---PAGE_BREAK--- + +FIG. 5. Graph $G_{\delta x}$ built from the discretisation of the 1-dimensional line $[0, 1]$ with $N_d$ discretisation points (i.e. $\delta x = \frac{1}{N_d-1}$). + +**5. Re-composition of the $e^{-iH_j t}$** + +The ultimate step of the algorithm is to approximate the desired evolution $e^{-iHt}$ with the evolutions $e^{-iH_j t}$. In the special case of mutually commuting $H_j$, this step is trivial as it boils down to use the properties of the exponential function on matrices and write $e^{iHt} = e^{i\sum_j H_j t} = \prod_j e^{iH_j t}$. But in the more realistic case where the matrices $H_j$ do not commute, a more sophisticated method should be used to approximate the evolution $e^{-iHt}$. To this end, we used the first-order Lie-Trotter-Suzuki product formula defined in Definition definition 4. + +**Definition 4** (Lie-Trotter-Suzuki product formula [27, 59, 60]). The Lie-Trotter-Suzuki product formula approximates + +$$ \exp \left( \lambda \sum_{j=0}^{m-1} \alpha_j H_j \right) \qquad (A2) $$ + +with + +$$ S_2(\lambda) = \prod_{j=0}^{m-1} e^{\alpha_j H_j \lambda/2} \prod_{j=m-1}^{0} e^{\alpha_j H_j \lambda/2} \qquad (A3) $$ + +and can be generalized recursively to higher-orders + +$$ S_{2k}(\lambda) = [S_{2k-2}(p_k\lambda)]^2 \times S_{2k-1}((1-4p_k)\lambda) [S_{2k-2}(p_k\lambda)]^2 \qquad (A4) $$ + +with $p_k = (4 - 4^{1/(2k-1)})^{-1}$ for $k > 1$. Using this formula, we have the approximation + +$$ e^{\lambda H} = \left[ S_{2k} \left( \frac{\lambda}{n} \right) \right]^n + O \left( \frac{|\lambda|^{2k+1}}{n^{2k}} \right). \qquad (A5) $$ + +We used the Lie-Trotter-Suzuki product formula with $\lambda = -it$ to approximate the operator $e^{-iHt}$ up to an error of $\epsilon \in O(\frac{t^{2k+1}}{n^{2k}})$. + +## Appendix B: Hermitian matrix construction and decomposition + +One of the main challenge in implementing a quantum wave equation solver lies in the construction and implementation of the needed oracles. This appendix describes the first step of the implementation process: the construction and decomposition of the Hamiltonian matrix that will be simulated using the Hamiltonian simulation procedure introduced in appendix A. + +This appendix follows the analysis performed in [32] and adds details and observations that will be refereed to in appendix C when dealing with the actual oracle implementation. + +### 1. Hamiltonian matrix description + +In order to devise the Hamiltonian matrix that should be simulated to solve the wave equation, the first step is to discretise eq. (1) with respect to space. Such a discretisation can be seen as a graph $G_{\delta x}$ whose vertices are the discretisation points and with edges between nearest neighbour vertices. The graph $G_{\delta x}$ is depicted in fig. 5. + +The graph Laplacian of $G_{\delta x}$, defined as + +$$ L(G_{\delta x})_{i,j} := \begin{cases} \deg(v_i) & \text{if } i=j \\ -1 & \text{if } (i \neq j) \land (v_i \text{ adjacent to } v_j) \\ 0 & \text{otherwise} \end{cases} \qquad (B1) $$ +---PAGE_BREAK--- + +can then be used to approximate the differential operator $\frac{\partial^2}{\partial x^2}$. By using the discretisation approximation + +$$ \frac{\partial^2 \phi}{\partial x^2}(i\delta x, t) \approx \frac{\phi_{i-1,t} - 2\phi_{i,t} + \phi_{i+1,t}}{\delta x^2} \qquad (B2) $$ + +with $\phi_{i,t} = \phi(i\delta x, t)$, and approximating $\phi(x, t)$ with a vector $\phi = [\phi_{i,t}]_{0\le i
GateToffoli countCNOT count1-qubit gate count# ancillasnotes
or1050
QFT03(2n2 - 2n + ⌊n/2⌋)2(n2 + n) H
4(n2 - n) T
3(n2 - n)/2 Rn
1 |0⟩-initRn gates might need to be decomposed [53].
add_arith20n - 1022n0n - 1 |0⟩-initSee [64].
add_qft06(2n2 - 2n + ⌊n/2⌋)2(n2 + n) H
4(n2 - n) T
3(n2 - n)/2 Rn
1 |0⟩-initSee QFT note on Rn. fig. 11.
sub_qft06(2n2 - 2n + ⌊n/2⌋)2(n2 + n) H
4(n2 - n) T
3(n2 - n)/2 Rn
1 |0⟩-initSee QFT note on Rn. fig. 9.
CARRY2(n - 1)2 + ⌊0, n - 1⌋2n + ⌊0, n - 1⌋ Xn - 1 borrowedSee [39].
n-contr. CNOT4n00n borrowedSee [1].
eq4n02[0, n] Xn borrowedfig. 13.
cmp2(n - 1)2 + ⌊0, n - 1⌋4n + ⌊0, n - 1⌋ Xn - 1 borrowedSee CARRY and appendix C 3 c.
A2n4n3n H 3n S
2n T 2n X
0See [10, Fig. 4.3].
e-iZ⊗Z⊗Ft8n24n36n Ph
8 X
0Adapted from [10, Fig. 4.6]
1-sparse HS10n28n3n H 3n S
2n T 2n + 8 X
36n Ph
0Oracle implementation cost not included. 2 calls to the oracle are required. fig. 7.
M14(n - 1)5 + 2[0, n - 1]10n + 2 + ⌊0, n - 1⌋ X1 |0⟩-init
n - 1 borrowed
add implementation cost not included. 2 calls to add are required. fig. 14.
V12(n - 1)2 + ⌊0, n - 1⌋4n + ⌊0, n - 1⌋ Xn - 1 borrowedfig. 15.
S10000eq. (C12).
M-14(n - 1)5 + 2[0, n - 1]10n + 2 + ⌊0, n - 1⌋ X1 |0⟩-init
n - 1 borrowed
add implementation cost not included. 2 calls to add are required. fig. 16.
V-12(n - 1)2 + ⌊0, n - 1⌋4n + ⌊0, n - 1⌋ Xn - 1 borrowedfig. 17.
S-116n + 105 + 8[0, n] Xn borrowedfig. 18.
+ +TABLE I. Precise gate count for the most important subroutines used in the quantum implementation of the wave equation solver. $n$ always represents the size of the input(s), except for the $n$-controlled CNOT where $n$ is the number of controls. When the number of gates depends on a generation-time value, the range of all the integer values possible is shown with square brackets. For example, `[0, n-1]` means that, depending on the generation-time value provided, the number of gates will be an integer between 0 and $n-1$ included. `$|0⟩$-init` ancillas represent the standard ancilla-type: qubits that are given in the state `$|0⟩$` and should be returned in that exact same state. On the other side, borrowed ancillas can be given in any state and should be returned in the exact same state they were borrowed in. +---PAGE_BREAK--- + +
UnitaryToffoli countCNOT count1-qubit gate count# ancillasnotes
$e^{-iH_1t}$$22n-12$$28n+7+3[0,n-1]$$3n H \quad 3n S \quad 2n T \quad 36n P_h \quad 30n + 10 + 2[0,n-1] X$$1|0\rangle\text{-init } n-1$ borrowedadd implementation cost not included. 4 calls to add are required.
$e^{-iH_{-1}t}$$38n-11$$28n+7+3[0,n-1]$$3n H \quad 3n S \quad 2n T \quad 36n P_h \quad 30n + 15 + 10[0,n] X$$1|0\rangle\text{-init } n-1$ borrowedadd implementation cost not included. 4 calls to add are required.
$e^{-iHt}$$82n-35$$84n+21+9[0,n-1]$$9n H \quad 9n S \quad 6n T \quad 108n P_h \quad 90n + 35 + 14[0,n] X$$1|0\rangle\text{-init } n-1$ borrowedadd implementation cost not included. 12 calls to add are required.
+ +TABLE II. Number of gates and ancillas needed to simulate the easy-to-simulate Hamiltonians $H_1$ and $H_{-1}$ that are part of the decomposition of $H$ as well as $e^{-iHt}$. It is important to realise that the gate counts for $e^{-iHt}$ are only valid up to a given $t$ or $\epsilon$ (once one is fixed, the value of the other can be computed). In order to make the gate count generic for any $t$ and $\epsilon$, the number of repetitions should be computed (see $n$ in eq. (A5)). Note that some of the $[0, n-1]$ ranges have been simplified to $[0, n]$ for conciseness. + +
Adder usedToffoli countCNOT count1-qubit gate count# ancillas
add_qft$82n - 35$$144n^2 - 60n$$24n^2 + 25n H \quad 9n S \quad 48n^2 - 42n T \quad 108n P_h \quad 18n^2 - 18n R_n \quad 114n + 35 + 14 [0, n] X$$2 |0\rangle\text{-init } n-1$ borrowed
add_arith$222n - 175$$348n + 21 + 9 [0, n-1]$$9n H \quad 9n S \quad 6n T \quad 108n P_h \quad 90n + 35 + 14 [0, n] X$$n |0\rangle\text{-init } n-1$ borrowed
+ +TABLE III. Number of gates and ancillas needed to simulate the Hamiltonian used to solve the 1-dimensional wave equation depending on the adder implementation used. It is important to realise that the gate counts for $e^{-iHt}$ reported in this table are only valid up to a given $t$ or $\epsilon$ (once one is fixed, the value of the other can be computed). In order to make the gate count generic for any $t$ and $\epsilon$, a number of repetitions $r$ should be computed (named $n$ in eq. (A5) and studied in [27, arXiv: Appendix F] and [28]). Note that the gate counts have been simplified by removing negligible terms when possible. + +The first bound has been devised by analytically bounding the error of simulation due to the Trotter-Suzuki formula approximation by $\epsilon_0$ + +$$ \left\| \exp \left[ -it \sum_{j=0}^{m-1} H_j \right] - \left[ S_{2k} \left( -\frac{it}{r} \right) \right]^r \right\| \le \epsilon_0 \qquad (\text{F1}) $$ + +and then let $\epsilon_0 \le \epsilon$ for a given desired precision $\epsilon$. If we let $\Lambda = \max_j ||H_j||$ and + +$$ \tau = 2m5^{k-1}\Lambda|t| \qquad (\text{F2}) $$ + +then + +$$ r_{2k}^{ana} = \max_{\tau} \tau^{\frac{2k \overline{e\tau^{2k+1}}}{3\epsilon}} \qquad (\text{F3}) $$ + +This bound is called the *analytic bound*. +---PAGE_BREAK--- + +A better bound called the *minimised bound* can be devised by searching for the smallest possible $r$ that satisfies the conditions detailed in [27, Propositions F.3 and F.4]. This bound is rewritten in Equation (F4). + +$$ r_{2k}^{min} = \min \left\{ r \mid N^* : \frac{\tau^{2k+1}}{3r^{2k}} \exp\left(\frac{\tau}{r}\right) < \epsilon \right\} \quad (F4) $$ + +Another bound involving nested commutators of the $H_i$ is described in [28] and gives + +$$ r_{2k}^{\text{comm}} \quad \mathcal{O} \left( \frac{\alpha_{\text{comm}}^{\frac{1}{2k}} t^{1+\frac{1}{2k}}}{\epsilon^{\frac{1}{2k}}} \right) \quad (F5) $$ + +where $k$ is the order of the product-formula used, $t$ the time of simulation, $\epsilon$ the error and + +$$ \alpha_{\text{comm}} = \sum_{i_0, i_1, \ldots, i_p=0}^{m-1} ||[H_{i_p}, [H_{i_1}, H_{i_0}]]|| \quad (F6) $$ + +Once the value of $r$ has been computed, the quantum circuit simulating the matrix $H$ for a time $\frac{t}{r}$ should be repeated $r$ times. This adds a factor of $r$ in front of all the gate counts computed in table I, table II and table III. + +### 3. Impact of error-correction + +When error-correction is studied, two gates are particularly important: $T$ and Toffoli gates. The $T$ gate has a prohibitive cost when compared to the Clifford quantum gates and implementing a Toffoli gate requires 7 of such $T$ gates as noted in [36] and [56, Fig. 1]. + +table IV summarise the cost of the non Clifford quantum gates used in the implementation of the 1-dimensional wave equation solver. The rotation gates need to be approximated. One solution to approximate the $R_n$ and $P_h$ gates is given in [53]. In order to obtain practical results as opposed to theoretical ones, we chose to use the number computed in [45, Table 1]. + +The final $T$-count is summarised in fig. 20. From fig. 20(b) it is clear that the **add_arith** implementation is more efficient than the **add_qft** one. + +
GateT countNotes
T1
S2
CCNOT7See [36].
Ph379ε = 10-15, approximated from [45].
Rn379ε = 10-15, approximated from [45].
+ +TABLE IV. $T$-gate cost of the non Clifford quantum gates used in the wave equation solver implementation. +---PAGE_BREAK--- + +
Adder usedT-count
add_qft6870n² + 34660n - 245
add_arith42510n - 1225
+ +(a) Number of T-gates needed to simulate the Hamiltonian used to solve the 1-dimensional wave equation depending on the adder implementation used. Based on table III and table IV. + +(b) Plot of the T-count devised in fig. 20(a) for the two different adder implementations. + +FIG. 20. Analysis of the *T*-count of the 1-dimensional wave equation solver quantum implementation with respect to the adder implementation used. \ No newline at end of file diff --git a/samples_new/texts_merged/6218816.md b/samples_new/texts_merged/6218816.md new file mode 100644 index 0000000000000000000000000000000000000000..16727583595ce7dbe59f1b4a2a30466827f30b83 --- /dev/null +++ b/samples_new/texts_merged/6218816.md @@ -0,0 +1,379 @@ + +---PAGE_BREAK--- + +0020-7683(94)00077-8 + +FORMULAS FOR THE STIFFNESS OF COMPOSITES +WITH PERIODIC MICROSTRUCTURE + +R. LUCIANO + +University of Cassino, via Zamosh 43, Cassino, Italy† + +and + +E. J. BARBERO + +West Virginia University, Morgantown, WV 26506-6101, U.S.A. + +*(Received 19 October 1993; in revised form 23 April 1994)* + +**Abstract**—In this paper, the mechanical behavior of composite materials with periodic microstructure is analysed. The corresponding elastic problem is solved by using the Fourier series technique and assuming the homogenization eigenstrain to be piecewise constant. Then, the coefficients of the overall stiffness tensor of the composite material are expressed analytically in terms of the elastic properties of the constituents (fibers and matrix) and as a function of nine triple series which take into account the geometry of the inclusions. In the case of composite materials reinforced by long fibers, simple formulas for evaluating these series are proposed. Close-form expressions for the elastic moduli of the fiber reinforced composite with periodic microstructure and for the equivalent transversely isotropic material are obtained. Finally, several comparisons with experimental results are presented. + +# I. INTRODUCTION + +Micromechanical models represent an efficient tool to estimate the overall stiffness of composite materials and a large number of results and comparisons with experimental data are available (Aboudi, 1991; Mura, 1987; Nemat-Nasser and Hori, 1993). The simplest model is the composite sphere or cylinder scheme which was proposed by Hashin (1962). In this method the composite material is modeled as a gradation of sizes of spherical or cylindrical inclusions embedded in a continuous matrix phase. Otherwise, in the self-consistent scheme (S-C), formulated by Budiansky (1965) and Hill (1965a, b), the fibers or the defects are considered as a typical micro inclusion embedded in an unbounded homogeneous elastic solid characterized by the unknown moduli of the composite. Then the overall elastic properties are computed by an iterative numerical procedure to take into account the interaction effects between the phases. For example, Budiansky and O'Connell (1976), Laws (1977), Laws et al. (1983), Laws and Brockenbrough (1987), Laws and Dvorak (1987) and Hoening (1979) used the S-C method to estimate the elastic properties of cracked composite materials. They analysed several cases such as: different geometries of cracks (ellipsoidal or cylindrical), isotropic and orthotropic matrix, two or more phase composite materials and obtained closed form solutions useful for engineering applications. Hori and Nemat-Nasser (1983) applied the S-C method for materials damaged by open and closed cracks and obtained the anisotropic response of the composite as a function of the load conditions and of loading path. Although the self-consistent method is simple to use, in the case of high volume fraction of the inclusions, it cannot be always applied for the analysis of multi-phase composite materials (Christensen, 1990). Conversely, the generalized self-consistent method, proposed by Christensen and Lo (1979, 1986), gives good results also in this case. They used this scheme to estimate the effective shear modulus and obtained physically realistic results for both spherical and cylindrical inclusions. On the other hand, for different geometries of the inclusions, many authors employed another micromechanical model, based on the Mori-Tanaka's theory (Mori and Tanaka, 1973). + +†Presently at Department of Mechanical and Aerospace Engineering, West Virginia University, Morgantown, WV 26506-6101, U.S.A. +---PAGE_BREAK--- + +Fig. 1. Geometry of the unit cell D. + +They considered isotropic, transversely isotropic and orthotropic matrix and ellipsoidal, cylindrical and ribbon fibers or cracks [see for example, Taya and Chou (1981), Weng (1984), Zhao *et al.* (1989), Tandon and Weng (1984) and Taya (1981)]. Recently, Benveniste proposed a mathematical justification of the Mori-Tanaka's method and, for composites with or without cracks, obtained estimates for the overall stiffness and compliance tensor (Benveniste, 1987). Finally the behavior of the advanced composites was analysed by Aboudi (1991), who proposed the method of cells, and by Iwakuma and Nemat-Nasser (1983) who formulated the linear elastic problem of composites with periodic microstructure. In Nemat-Nasser and Taya (1981, 1985) and Nemat-Nasser *et al.* (1982) the concept of a unit cell was introduced and the Fourier series technique was applied to estimate the overall elastic properties of materials with periodic distributed voids. Several approximations to the distribution of the homogenization eigenstrains were considered to solve the problem and in the hypothesis of piecewise constant eigenstrains, Nemat-Nasser *et al.* (1993) proposed analytical expressions to evaluate the coefficients of the stiffness tensor of cracked solids. Otherwise, for composites with periodic elastic inclusions, they proposed a procedure which entails considerable numerical efforts [see Iwakuma and Nemat-Nasser (1983)]. In the present paper, close-form expressions for the coefficients of the stiffness tensor and for technical elastic moduli of composites materials with periodically distributed elastic inclusions or voids are proposed. Moreover, analytical expressions are given for the elastic moduli of the transversely isotropic material equivalent to the solid reinforced by periodic long fibers. Finally, comparisons with available experimental data, numerical results obtained by Aboudi (1991) and results of the generalized self-consistent method (Christensen and Lo, 1979) are presented. + +## 2. RELATION BETWEEN THE EIGENSTRAIN AND THE STRAIN INSIDE THE INCLUSION + +Consider an infinitely extended linearly elastic solid represented by an assembly of unit cells. For simplicity, let each cell $D$ be a parallelepiped with dimensions $a_j$ (Fig. 1) in the direction of the coordinate axes $x_j$ where $j = 1, 2, 3$, and let $V$ be its volume. Then denote by $\Omega$ the part of $D$ occupied by the inclusions, let $D - \Omega$ denote the matrix and let $f$ be the volume fraction of $\Omega$. + +Next, the relation between the eigenstrain and the strain inside the inclusion is introduced. In order to simulate the inclusions inside the body, consider the homogenization eigenstrain $\epsilon^*$ defined in all $D$, which must be periodic for the particular geometry of the problem and different to zero only in $\Omega$. Since the material is linear elastic, the actual stress tensor $\sigma$ inside the unit cell can be expressed in terms of $\epsilon^*$ and the actual strain tensor $\epsilon$ in the following way: +---PAGE_BREAK--- + +$$ \sigma = C(\epsilon - \epsilon^*) \text{ in } D \quad (1) $$ + +where C is the elasticity tensor of the matrix. Then, assuming the body forces equal to zero, the tensor $\sigma$ must satisfy the following equilibrium conditions: + +$$ \operatorname{div} \sigma = 0 \text{ in } D \quad (2) $$ + +where div denotes the divergence of a tensor field. Furthermore, since in a solid with periodic structure and suitable boundary conditions the displacement $\mathbf{u}$ are periodic, the following Fourier series representation of $\mathbf{u}$, $\epsilon$ and $\epsilon^*$ can be considered: + +$$ \mathbf{u}(x) = \sum_{\xi}^{\pm\infty} \bar{\mathbf{u}}(\xi) \exp(i\xi x) \quad (3) $$ + +$$ \varepsilon(x) = \operatorname{sym} (\nabla \mathbf{u}(x)) = \sum_{\xi}^{\pm\infty} \bar{\varepsilon}(\xi) \exp(i\xi x) \quad (4) $$ + +$$ \varepsilon^*(x) = \sum_{\xi}^{\pm\infty} \bar{\varepsilon}^*(\xi) \exp(i\xi x) \quad (5) $$ + +where $\xi = (\xi_1, \xi_2, \xi_3)$ with $\xi_j = 2\pi n_j / a_j$ ($n_j = 0, \pm 1, \pm 2, \dots$, $j$ not summed, $j = 1, 2, 3$) and: + +$$ \bar{\mathbf{u}}(\xi) = \int_D \mathbf{u}(x) \exp(-i\xi x) dx \quad (6) $$ + +$$ \bar{\varepsilon}(\xi) = \frac{i}{2} [\xi \otimes \bar{\mathbf{u}}(\xi) + \bar{\mathbf{u}}(\xi) \otimes \xi] \quad (7) $$ + +$$ \bar{\varepsilon}^*(\xi) = \int_D \varepsilon^*(x) \exp(-i\xi x) dx. \quad (8) $$ + +Combination of eqns (1) and (2) gives: + +$$ \operatorname{div} (C(\epsilon - \epsilon^*)) = 0 \text{ in } D \quad (9) $$ + +then by using eqns (4), (7) and (5) in (9) the following expressions are obtained: + +$$ -\xi \cdot C(\xi \otimes \bar{\mathbf{u}}(\xi)) = i\xi \cdot C\bar{\varepsilon}^*(\xi) \text{ for every } \xi \neq 0 \quad (10) $$ + +where the symbols $\otimes$ and $\cdot$ represent the outer and the inner products, respectively (Spiegel, 1959). Thus, since C represents the elastic tensor of the matrix, the coefficients $\bar{\mathbf{u}}(\xi)$ are obtained uniquely in terms of the $\bar{\varepsilon}^*(\xi)$ in the following way: + +$$ \bar{\mathbf{u}}(\xi) = -i(\xi \cdot C \circ \xi)^{-1} \circ \xi \cdot C\bar{\varepsilon}^*(\xi) \text{ for every } \xi \neq 0 \quad (11) $$ + +and from eqn (7) the Fourier coefficients of the corresponding strain are: + +$$ \bar{\varepsilon}(\xi) = \operatorname{sym} (\xi \otimes (\xi \cdot C \circ \xi)^{-1} \otimes \xi): C\bar{\varepsilon}^*(\xi) \text{ for every } \xi \neq 0. \quad (12) $$ + +Finally denoting: +---PAGE_BREAK--- + +$$P'(\xi) = \operatorname{sym} (\xi \otimes (\xi \cdot C \cdot \xi)^{-1} \otimes \xi) \quad (13)$$ + +obtain the actual strain inside the inclusion from eqn (12) using eqns (4) and (8) as: + +$$\epsilon(x) = \frac{1}{V} \sum_{\xi} P'(\xi) : C \int_D \epsilon^*(x) \exp(-i\xi(x'-x)) dx' \quad (14)$$ + +where a prime on the sum indicates that $\xi = 0$ is excluded in the summation. + +Now, note that the exact expression of the strain tensor $\epsilon(x)$ is not necessary to obtain the overall elastic tensor $C^*$ but only its volume average on $\Omega$ denoted by $(\bar{\epsilon} = \int_{\Omega} \epsilon(x) dx / V_{\Omega})$: + +$$\bar{\epsilon} = \frac{1}{V} \sum_{\xi} P'(\xi) : C \left( \frac{g_0(\xi)}{V_{\Omega}} \right) \int_D \epsilon^*(x) \exp(-i\xi x') dx' \quad (15)$$ + +where $V_{\Omega}$ is the volume of the inclusion and: + +$$g_0(\xi) = \int_{\Omega} \exp(i\xi x) dx. \quad (16)$$ + +A good approximation of eqn (15) is obtained when a constant $\epsilon^*$ is considered in $\Omega$, as shown in Nemat-Nasser *et al.* (1982). Then, replacing $\epsilon^*$ with its volume average $\bar{\epsilon}^*$, eqn (15) becomes: + +$$\bar{\epsilon} = \frac{1}{V} \sum_{\xi} P'(\xi) : C \left( \frac{g_0(\xi)g_0(-\xi)}{V_{\Omega}} \right) \bar{\epsilon}^* \quad (17)$$ + +or + +$$\bar{\epsilon} = f \sum_{\xi}^{+\infty'} \left( \frac{g_0(\xi)}{V_{\Omega}} \right) \left( \frac{g_0(-\xi)}{V_{\Omega}} \right) P'(\xi) : C : \bar{\epsilon}^* \quad (18)$$ + +and by denoting: + +$$t(\xi) = f \left( \frac{g_0(\xi)}{V_{\Omega}} \right) \left( \frac{g_0(-\xi)}{V_{\Omega}} \right) \quad (19)$$ + +and + +$$P = \sum_{\xi}^{+\infty'} t(\xi) P'(\xi) \quad (20)$$ + +the following expression holds: + +$$\bar{\epsilon} = P : C : \bar{\epsilon}^* \text{ in } \Omega. \quad (21)$$ + +Note that eqn (21) represents the relation between the volume average of the strain inside the inclusion $\bar{\epsilon}$ and the volume average of the eigenstrain $\bar{\epsilon}^*$. + +### 3. OVERALL STIFFNESS TENSOR + +In order to obtain the homogenization eigenstrain which simulates the presence of the periodic inclusions inside the body, consider an average strain tensor $\bar{\epsilon}_o$ in the unit cell, which is arbitrarily prescribed. In this hypothesis the following average consistency condition (equivalent eigenstrain method) can be used (Nemat-Nasser and Hori, 1993): +---PAGE_BREAK--- + +$$C': (\bar{\varepsilon}_0 \div P : C : \bar{\varepsilon}^*) = C : (\bar{\varepsilon}_0 + (P : C - I^{(4)}) : \bar{\varepsilon}^*) \quad (22)$$ + +where $C'$ is the elastic tensor of the inclusion and $I^{(4)}$ is the identity fourth-order tensor. Observe that the tensor $P$ takes into account the geometry of the inclusion and can be evaluated once and for all. Then from eqn (22), the equivalent average volume eigenstrain $\bar{\varepsilon}^*$ can be solved in terms of the tensors $C'$, $C$, $P$ and $\bar{\varepsilon}_0$ as: + +$$\bar{\varepsilon}^* = [((C-C')^{-1}-P)C]^{-1}\bar{\varepsilon}_0. \quad (23)$$ + +Furthermore, since in this case the uniform overall stress $\sigma_0$ in the unit cell is: + +$$C^*: \bar{\epsilon}_0 = C: (\bar{\epsilon}_0 - f \bar{\epsilon}^*) \quad (24)$$ + +by using eqn (23) and noting that $\bar{\epsilon}_0$ is arbitrary, the following expression of the overall stiffness tensor of the composite material is obtained: + +$$C^* = C - f((C-C')^{-1}-P)^{-1}. \quad (25)$$ + +It is worth noting that evaluation of $C^*$ [eqn (25)] involves the inversion of a symmetric tensor since $P$, $C$ and $C'$ are all symmetric tensors. In particular if the matrix is isotropic, denoting by $\bar{\xi} = \xi/|\xi|$, the tensor $P$ is (Mura, 1987; Nemat-Nasser and Hori, 1993): + +$$P = \frac{1}{\mu_0} \sum_{\xi}^{\pm\infty} t(\xi) \left( \operatorname{sym}(\bar{\xi} \otimes I^{(2)} \otimes \bar{\xi}) - \frac{1}{2(1-\nu_0)} (\bar{\xi} \otimes \bar{\xi} \otimes \bar{\xi} \otimes \bar{\xi}) \right) \quad (26)$$ + +where $\mu_0$ and $\nu_0$ are the shear modulus and the Poisson ratio of the matrix, respectively and $I^{(2)}$ is the identity second-order tensor. Hence, when the matrix and the inclusion are both isotropic, eqn (25) can be written: + +$$C^* = \lambda_0 I^{(2)} \otimes I^{(2)} + 2\mu_0 I^{(4)} - f[(\lambda_0 - \lambda_1)I^{(2)} \otimes I^{(2)} + 2(\mu_0 - \mu_1)I^{(4)}]^{-1} \\ - \frac{1}{\mu_0} \sum_{\xi}^{\pm\infty} t(\xi) \left[ \left( \operatorname{sym}(\bar{\xi} \otimes I^{(2)} \otimes \bar{\xi}) - \frac{1}{2(1-\nu_0)} (\bar{\xi} \otimes \bar{\xi} \otimes \bar{\xi} \otimes \bar{\xi}) \right) \right]^{-1} . \quad (27)$$ + +Here $\mu_0$, $\lambda_0$, $\mu_1$ and $\lambda_1$ are the Lamé constants of the matrix and the inclusion, respectively. Then, defining the following series $S_l$ (with $l=1-9$) as: + +$$S_1 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_1^2, \quad S_2 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_2^2, \quad S_3 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_3^2$$ + +$$S_4 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_4^2, \quad S_5 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_5^2, \quad S_6 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_6^2$$ + +$$S_7 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_7^2, \quad S_8 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_8^2, \quad S_9 = \sum_{\xi}^{\pm\infty'} t(\xi)\bar{\xi}_9^2$$ + +(28) + +the final expressions of the components of the tensor $C^*$ different from zero can be written in the following way: + +$$C_{1i}^* = \lambda_0 + 2\mu_0 - f \left( \frac{S_3 S_2}{\mu_0^2} - \frac{S_5 S_3 + S_6 S_2}{\mu_0^2 g} - \frac{a(S_2 + S_3)}{2\mu_0 c} + \frac{S_6 S_5 - S_7^2}{\mu_0^2 g^2} + \frac{a(S_5 + S_6) + 2bS_7}{2\mu_0 gc} + \frac{a^2 - b^2}{4c^2} \right) / D$$ +---PAGE_BREAK--- + +$$C_{12}^* = \lambda_0 + f\left(-\frac{S_9}{\mu_0^2 g} + \frac{b}{2c\mu_0}\right)S_3 + \frac{S_9 S_6 - S_8 S_7}{\mu_0^2 g^2} - \frac{b(S_6 - S_7) - bS_8 - aS_9}{2c\mu_0 g} - \frac{ba+b^2}{4c^2}\right)/D$$ + +$$C_{13}^* = \lambda_0 - f\left(\frac{S_8}{\mu_0^2 g} - \frac{b}{2c\mu_0}\right)S_2 - \frac{S_8 S_5 - S_9 S_7}{\mu_0^2 g^2} + \frac{b(S_5 - S_7) - aS_8 - bS_9}{2c\mu_0 g} + \frac{ab+b^2}{4c^2}\right)/D$$ + +$$C_{22}^* = \lambda_0 + 2\mu_0 - f\left(\frac{S_3 S_1}{\mu_0^2} - \frac{S_4 S_3 + S_6 S_1}{\mu_0^2 g} - \frac{a(S_1 + S_3)}{2\mu_0 c} + \frac{S_6 S_4 - S_8^2}{\mu_0^2 g^2} + \frac{a(S_4 + S_6) + 2bS_8}{2\mu_0 gc} + \frac{a^2 - b^2}{4c^2}\right)/D$$ + +$$C_{33}^* = \lambda_0 + 2\mu_0 - f\left(\frac{S_2 S_1}{\mu_0^2} - \frac{S_4 S_2 + S_5 S_1}{\mu_0^2 g} - \frac{a(S_1 + S_2)}{2\mu_0 c} + \frac{S_5 S_4 - S_9^2}{\mu_0^2 g^2} + \frac{a(S_5 + S_4) + 2bS_9}{2\mu_0 gc} + \frac{a^2 - b^2}{4c^2}\right)/D$$ + +$$C_{23}^* = \lambda_0 + f\left(-\frac{S_7}{\mu_0^2 g} + \frac{b}{2c\mu_0}\right)S_1 + \frac{S_7 S_4 - S_9 S_8}{\mu_0^2 g^2} - \frac{b(S_4 - S_8 - S_9) - aS_7}{2c\mu_0 g} - \frac{ab+b^2}{4c^2}\right)/D$$ + +$$C_{44}^* = \mu_0 - f\left(-\frac{S_2}{\mu_0} - \frac{S_3}{\mu_0} + (\mu_0 - \mu_1)^{-1} + \frac{4S_7}{\mu_0(2-2v_0)}\right)^{-1}$$ + +$$C_{55}^* = \mu_0 - f\left(-\frac{S_1}{\mu_0} - \frac{S_3}{\mu_0} + (\mu_0 - \mu_1)^{-1} + \frac{4S_8}{\mu_0(2-2v_0)}\right)^{-1}$$ + +$$C_{66}^* = \mu_0 - f\left(-\frac{S_1}{\mu_0} - \frac{S_2}{\mu_0} + (\mu_0 - \mu_1)^{-1} + \frac{4S_9}{\mu_0(2-2v_0)}\right)^{-1} \quad (29)$$ + +where: + +$$ +\begin{aligned} +D &= -\frac{S_{3}S_{2}S_{1}}{\mu_{0}^{3}} + \frac{(S_{6}S_{2} + S_{6}S_{2} + S_{6}S_{2})S_{1}}{\mu_{0}^{3}g} + \frac{a(S_{1}S_{2} + (S_{1}+S_{2})S_{3})}{2\mu_{0}^{2}c} \\ +&\quad + \frac{(S_{5}S_{4} + S_{7}^{2})S_{1} + (S_{6}S_{4} + S_{8}^{2})S_{2} + (S_{5}S_{4} + S_{9}^{2})S_{3}}{\mu_{0}^{3}g^{2}} \\ +&\quad - \frac{(aS_{5} + aS_{6} + 2bS_{7}^{2})S_{1} + (aS_{4} + aS_{6} + 2bS_{8}^{2})S_{2} + (aS_{4} + aS_{5} + 2bS_{9}^{2})S_{3}}{2\mu_{0}^{2}gc} \\ +&\quad + \frac{(b^{2}-a^{2})(S_{1}+S_{2}+S_{3})}{4\mu_{0}c^{2}} + \frac{(S_{5}S_{6}-S_{7}^{2})S_{4}-S_{8}^{2}S_{5}-S_{9}^{2}S_{6}-2S_{8}S_{9}S_{7}}{\mu_{0}^{3}g^{3}} \\ +&\quad + \frac{(aS_{5}+aS_{6}+2bS_{7})S_{4}-(aS_{7}+2bS_{8}+2bS_{9})S_{7}+(2bS_{5}-aS_{8}+2bS_{9})S_{8}}{-aS_{9}^{2}+(2bS_{9}+aS_{5})S_{6}} \\ +&\quad + \frac{a(aS_{4}+aS_{5}+aS_{6}+2(bS_{7}+bS_{8}+bS_{9}))}{4\mu_{0}gc^{2}} + \frac{d(2(S_{7}+S_{8}+S_{9})-(S_{4}+S_{5}+S_{6}))}{4} \\ +&\quad + \frac{a^{3}-3ab^{2}-2b^{3}}{8c^{3}} +\end{aligned} +$$ + +(30) +---PAGE_BREAK--- + +and + +$$ +a = \mu_1 - \mu_0 - 2\mu_1 v_0 + 2\mu_0 v_1 +$$ + +$$ +b = -\mu_0 v_0 + \mu_1 v_1 + 2\mu_0 v_0 v_1 - 2\mu_1 v_0 v_1 +$$ + +$$ +c = (\mu_0 - \mu_1)(-\mu_0 + \mu_1 - \mu_0 v_0 - 2\mu_1 v_0 + 2\mu_0 v_1 + \mu_1 v_1 + 2\mu_0 v_0 v_1 - 2\mu_1 v_0 v_1) +$$ + +$$ +d = b^2 / (\mu_0 gc^2) +$$ + +$$ +g = (2 - 2v_0). \tag{31} +$$ + +Numerical values for the series $S_i$ are given by Nemat-Nasser *et al.* (1982) and Iwakuma and Nemat-Nasser (1983) for several geometries of the inclusions. It is worth noting that the stiffness values presented by Nemat-Nasser *et al.* (1982) and Iwakuma and Nemat-Nasser (1983) can be obtained by using eqns (29) to (31). + +4. UNIDIRECTIONAL COMPOSITE + +In the case of composite material reinforced by long circular cylindrical fibers, five +series are different from zero and only three are independent (Nemat-Nasser et al., 1982). +For unidirectional fibers aligned with the x₁-axis, the tensor ε*(x) is constant in the x₁- +direction, therefore the Fourier series of ε*(x) in the x₁-direction reduces to a constant. +Then, for the case of fibers aligned with the x₁-axis, we have: + +$$ +S_1 = S_4 = S_8 = S_9 = 0 \\ +S_2 = S_3, \quad S_5 = S_6. \tag{32} +$$ + +Therefore, the following formulas can be used to evaluate the stiffness tensor of a uni- +directional composite with periodic microstructure: + +$$ +C_{11}^* = \lambda_0 + 2\mu_0 - f \left[ \frac{S_3^2}{\mu_0^2} - \frac{2S_6S_3}{\mu_0^2 g} - \frac{aS_3}{\mu_0 c} + \frac{S_6^2 - S_7^2}{\mu_0^2 g^2} + \frac{aS_6 + bS_7}{\mu_0 gc} + \frac{a^2 - b^2}{4c^2} \right] / D +$$ + +$$ +C_{12}^* = \lambda_0 + f \left[ \frac{S_3}{2c\mu_0} - \frac{S_6 - S_7}{2c\mu_0 g} - \frac{a+b}{4c^2} \right] / D +$$ + +$$ +C_{23}^* = \lambda_0 + f \left[ \frac{aS_7}{2\mu_0 gc} - \frac{ba+b^2}{4c^2} \right] / D +$$ + +$$ +C_{22}^* = \lambda_0 + 2\mu_0 - f \left[ -\frac{aS_3}{2\mu_0 c} + \frac{aS_6}{2\mu_0 gc} + \frac{a^2-b^2}{4c^2} \right] / D +$$ + +$$ +C_{44}^* = \mu_0 - f \left[ -\frac{2S_3}{\mu_0} + (\mu_0 - \mu_1)^{-1} + \frac{4S_7}{\mu_0(2-2v_0)} \right]^{-1} +$$ + +$$ +C_{66}^{*} = \mu_{0} - f \left[ -\frac{S_{3}}{\mu_{0}} + (\mu_{0} - \mu_{1})^{-1} \right]^{-1} \quad (33) +$$ + +where: +---PAGE_BREAK--- + +$$D = \frac{aS_3^2}{2\mu_0^2 c} - \frac{aS_6 S_3}{\mu_0^2 gc} + \frac{a(S_6^2 - S_7^2)}{2\mu_0^2 g^2 c} + \frac{S_3(b^2 - a^2)}{2\mu_0 c^2} \\ + \frac{S_6(a^2 - b^2) + S_7(ab + b^2)}{2\mu_0 gc^2} + \frac{(a^3 - 2b^3 - 3ab^2)}{8c^3} \quad (34)$$ + +and + +$$a = \mu_1 - \mu_0 - 2\mu_1 v_0 + 2\mu_0 v_1$$ + +$$b = -\mu_0 v_0 + \mu_1 v_1 + 2\mu_0 v_0 v_1 - 2\mu_1 v_0 v_1$$ + +$$c = (\mu_0 - \mu_1)(-\mu_0 + \mu_1 - \mu_0 v_0 - 2\mu_1 v_0 + 2\mu_0 v_1 + \mu_1 v_1 + 2\mu_0 v_0 v_1 - 2\mu_1 v_0 v_1)$$ + +$$g = (2 - 2v_0) \qquad (35)$$ + +where the series $S_3$, $S_6$, $S_7$ are given by Nemat-Nasser *et al.* (1982) in tabular form for several values of the volume fraction of the inclusions. However, the tabular data can be fitted with parabolic expressions using a least-square method. In the case of long fibers, the following expressions fit the data with a correlation coefficient $R = 1$: + +$$S_3 = 0.49247 - 0.47603f - 0.02748f^2$$ + +$$S_6 = 0.36844 - 0.14944f - 0.27152f^2$$ + +$$S_7 = 0.12346 - 0.32035f + 0.23517f^2. \quad (36)$$ + +This procedure avoids the numerical evaluation of the series for each value of the fiber volume fraction, which entails significant computational effort, and also allows us to arrive at algebraic expressions for the elastic moduli. + +## 5. TRANSVERSELY ISOTROPIC MATERIAL + +Because of the periodicity of the microstructure, the stiffness tensor $C^*$ for uni-directional composite represents an orthotropic material with square symmetry. In the case considered in the previous section, the directions $x_2$ and $x_3$ are equivalent and the stiffness tensor is unchanged by a rotation about $x_1$ of $n\pi/2$ ($n = 0, \pm 1, \pm 2, ...$). This implies that only six components are required to describe the tensor completely. + +In order to obtain a transversely isotropic stiffness tensor, equivalent in average sense to the stiffness tensor with square symmetry, the following averaging procedure (Aboudi, 1991) is used. A rotation $\theta$ about the $x_1$-axis of the tensor $C^*$ produces + +$$B(\theta) = Q(\theta) C^* Q^T(\theta) \quad (37)$$ + +where $Q(\theta)$ is the fourth-order orthogonal rotation tensor. Then the equivalent transversely isotropic tensor is obtained as: + +$$\vec{B} = \frac{1}{\pi} \int_{0}^{\pi} B(\theta) d\theta. \quad (38)$$ + +Then, using the relations between the engineering constants and the components of the $\vec{B}$ tensor, the following expressions are obtained explicitly in terms of the coefficients of the tensor $C^*$ [eqns (33)-(36)]: +---PAGE_BREAK--- + +Fig. 2. Comparison with experimental results of transverse modulus $E_T$ normalized with respect to the matrix modulus $E_0$. + +$$ +\begin{align*} +E_A &= C_{11}^* - \frac{2C_{12}^{*2}}{C_{22}^* + C_{23}^*} \\ +E_T &= \frac{(2C_{11}^* C_{22}^* + 2C_{11}^* C_{23}^* - 4C_{12}^{*2})(C_{22}^* - C_{23}^* + 2C_{44}^*)}{3C_{11}^* C_{22}^* + C_{11}^* C_{23}^* + 2C_{11}^* C_{44}^* - 4C_{12}^{*2}} \\ +G_A &= C_{66}^* \\ +G_T &= \frac{C_{22}^*}{4} - \frac{C_{23}^*}{4} + \frac{C_{44}^*}{2} = \frac{E_T}{2(1+v_T)} \\ +v_A &= \frac{C_{12}^*}{C_{22}^* + C_{23}^*} +\end{align*} +$$ + +$$ v_T = \frac{C_{11}^* C_{22}^* + 3C_{11}^* C_{23}^* - 2C_{11}^* C_{44}^* - 4C_{12}^{*2}}{3C_{11}^* C_{22}^* + C_{11}^* C_{23}^* + 2C_{11}^* C_{44}^* - 4C_{12}^{*2}} \quad (39) $$ + +In particular the transverse shear modulus $G_T$ can be written in the following way: + +$$ G_T = \mu_0 - \frac{f}{4} \left[ \left( -\frac{aS_3}{2\mu_0 c} + \frac{a(S_7+S_6)}{2\mu_0 gc} - \frac{ba+2b^2-a^2}{4c^2} \right) / D + 2 \left( -\frac{2S_3}{\mu_0} + (\mu_0-\mu_1)^{-1} + \frac{4S_7}{\mu_0(2-2v_0)} \right)^{-1} \right] \quad (40) $$ + +where a, b, c, D and g are given in eqn (35) and $S_3$, $S_6$ and $S_7$ can be evaluated by eqn (36). + +## 6. COMPARISONS WITH EXPERIMENTAL RESULTS + +Comparisons with experimental results and the expressions proposed by other authors are presented in this section. Tsai and Hahn (1980) measured the transverse Young's modulus $E_T$ and the axial shear modulus $G_A$ of glass-epoxy composite for several values of the fiber volume fraction. The properties of the constituents are $v_0 = 0.38$, $v_1 = 0.22$, and $E_1/E_0 = 21.19$. The results obtained with eqns (39) and (36) (present result) are compared to the experimental data and to predictions using the method of cells (Aboudi, 1991) in Figs 2 and 3. Predicted values of the axial and transverse Poisson ratios are shown in Fig. 4. For the same properties of the constituents, the axial modulus predicted by the first of eqns (39) coincides with the rule of mixture estimate and the axial shear modulus obtained +---PAGE_BREAK--- + +Fig. 3. Comparison with experimental results of axial shear modulus $G_A$ normalized with respect to the matrix modulus $\mu_0$. + +Fig. 4. Transverse and axial Poisson ratio ($v_T$ and $v_A$) as a function of the fiber volume fraction for glass-epoxy composite. + +Fig. 5. Comparison with general self-consistent method of transverse shear modulus $G_T$ normalized with respect to the matrix modulus $\mu_0$. + +from the third of eqns (39) gives the same values of the expressions proposed by Christensen and Lo (1979), for all values of the fiber volume fraction. Then, in Fig. 5 the transverse shear modulus obtained by eqn (40) is compared with the analytical expression proposed +---PAGE_BREAK--- + +by Christensen and Lo (1979) and with the transverse shear modulus in the material with square symmetry $C_{44}^*$ [given by eqn (33)]. + +## 7. CONCLUSIONS + +Simple formulas for the coefficients of the stiffness tensor of composite materials with general types of elastic inclusions or voids with periodic microstructure are presented. These formulas are reduced for the particular case of long fiber composites and the engineering properties of equivalent transversely isotropic materials are proposed. Good agreement with available experimental data is obtained. The interaction effects between the constituents are fully accounted for. + +*Acknowledgements*—This work was supported by the Italian National Council of Research (CNR) and the Constructed Facilities Center (CFC) at West Virginia University. + +## REFERENCES + +- Aboudi, J. (1991). *Mechanics of Composite Materials*. Elsevier Science Publishers, Netherlands. +- Benveniste, Y. (1987). A new approach to the application of Mori-Tanaka's theory in composite materials. *Mech. Mater.* **6**, 147–157. +- Budiansky, B. (1965). On the elastic moduli of heterogeneous materials. *J. Mech. Phys. Solids* **13**, 213–227. +- Budiansky, B. and O'Connell, R. J. (1976). Elastic moduli of a cracked solid. *Int. J. Solids Structures* **12**, 81–97. +- Christensen, R. M. (1990). A critical evaluation for a class of micromechanics models. *J. Mech. Phys. Solids* **38**(3), 379–404. +- Christensen, R. M. and Lo, K. H. (1979). Solutions for effective shear properties in three phase sphere and cylinder models. *J. Mech. Phys. Solids* **27**, 315–330. +- Christensen, R. M. and Lo, K. H. (1986). Erratum: solutions for effective shear properties in three phase sphere and cylinder models. *J. Mech. Phys. Solids* **34**(6), 639. +- Hashin, Z. (1962). The elastic moduli of heterogeneous materials. *J. Appl. Mech.* **29**, Trans. ASME **84**(E), 143–150. +- Hill, R. (1965a). Continuum micromechanics of elasto-plastic polycrystal. *J. Mech. Phys. Solids* **13**, 89–100. +- Hill, R. (1965b). A self-consistent mechanics of composite materials. *J. Mech. Phys. Solids* **13**, 227–240. +- Hoening, A. (1979). Elastic moduli of a non-randomly cracked body. *Int. J. Solids Structures* **15**, 137–154. +- Hori, H. and Nemat-Nasser, S. (1983). Overall moduli of solids with microcracks: load-induced anisotropy. *J. Mech. Phys. Solids* **31**(2), 155–171. +- Iwakuma, T. and Nemat-Nasser, S. (1983). Composites with periodic microstructure. *Comput. Structures* **16**(1–4), 13–19. +- Laws, N. (1977). A note on interaction energies associated with cracks in anisotropic media. *Phil. Mag.* **36**, 367–372. +- Laws, N. and Brockenbrough, J. R. (1987). The effect of micro-crack system on the loss of stiffness of brittle solids. *Int. J. Solids Structures* **23**(9), 1247–1268. +- Laws, N. and Dvorak, G. J. (1987). The effect of fiber breaks and aligned penny-shaped cracks on the stiffness and energy release rates in unidirectional composites. *Int. J. Solids Structures* **23**(9), 1269–1283. +- Laws, N., Dvorak, G. J. and Hejazi, M. (1983). Stiffness changes in unidirectional composites caused by crack systems. *Mech. Mater.* **2**, 123–137. +- Mori, T. and Tanaka, K. (1973). Average stress in matrix and average elastic energy of materials with misfitting inclusions. *Acta Metall.* **21**, 571–574. +- Mura, T. (1987). *Micromechanics of Defects in Solids* (2nd edn, rev). Dordrecht, The Netherlands. +- Nemat-Nasser, S. and Hori, M. (1993). *Micromechanics: Overall Properties of Heterogeneous Solids*. Elsevier Science Publishers, Amsterdam. +- Nemat-Nasser, S., Iwakuma, T. and Hejazi, M. (1982). On composites with periodic structure. *Mech. Mater.* **1**, 239–267. +- Nemat-Nasser, S and Taya, M. (1981). On effective moduli of an elastic body containing periodically distributed voids. *Q. Appl. Math.* **39**, 43–59. +- Nemat-Nasser, S. and Taya, M. (1985). On effective moduli of an elastic body containing periodically distributed voids: comments and corrections. *Q. Appl. Math.* **43**, 187–188. +- Nemat-Nasser, S., Yu, N. and Hori, M. (1993). Solids with periodically distributed cracks. *Int. J. Solids Structures* **30**, 2071–2095. +- Spieget, M. R. (1959). *Vector Analysis*. Schum's Outline Series. McGraw-Hill, New York. +- Tandon, G. P. and Weng, G. J. (1984). The effect of aspect ratio of inclusions on the elastic properties of unidirectional aligned composites. *Polymer Compos.* **5**, 327–333. +- Taya, M. (1981). On stiffness and strength of an aligned short-fiber reinforced composite containing penny-shaped cracks in the matrix. *J. Compos. Mater.* **15**, 198–210. +- Taya, M. and Chou, T. W. (1981). On two kinds of ellipsoidal inhomogeneities in an infinite elastic body: an application to a hybrid composite. *Int. J. Solids Structures* **17**, 553–563. + + +---PAGE_BREAK--- + +Tsai, S. W. and Hahn, H. T. (1980). *Introduction to Composite Materials*. Technomic, Lancaster, PA. + +Weng, G. J. (1984). Some elastic properties of reinforced solids, with special reference to isotropic ones containing spherical inclusions. *Int. J. Engng Sci.* **22**(7), 845-856. + +Zhao, Y. H., Tandon, G. P. and Weng, G. J. (1989). Elastic moduli for a class of porous materials. *Acta Mechanica* **76**, 105-130. \ No newline at end of file diff --git a/samples_new/texts_merged/6293016.md b/samples_new/texts_merged/6293016.md new file mode 100644 index 0000000000000000000000000000000000000000..76fd63a9d080e7373c286dafd71424d72e4306d9 --- /dev/null +++ b/samples_new/texts_merged/6293016.md @@ -0,0 +1,525 @@ + +---PAGE_BREAK--- + +On the Relation Between Primitive Recursion, +Schematization, and Divergence + +Miki HERMANN* + +CRIN (CNRS) and INRIA-Lorraine +Campus Scientifique, BP 239, +54506 Vandœuvre-lès-Nancy, France + +e-mail: Miki.Hermann@loria.fr + +Abstract + +The paper presents a new schematization of infinite families of terms called the primal grammars, based on the notion of primitive recursive rewrite systems. This schematization is presented by a generating term and a canonical rewrite system. It is proved that the class of primal grammars covers completely the class of crossed rewrite systems. This proof contains a construction of a primal grammar from a crossed rewrite system. + +# 1 Introduction + +Infinite sequences of terms, equations, rules or substitutions of common origin (sometimes called *infinite families of*) appear frequently at different moments within equational reasoning, automated deduction, and logic programming. One of these moments is e.g. the divergent behavior of the completion procedure when it is applied to certain rewrite systems. There exists sufficient conditions, presented in the form of patterns called *crossed rewrite systems*, whose presence guarantees the divergence. Unfortunately, there exist finitely presented decidable equational theories which imply a divergent behavior of the completion procedure. Nevertheless, sometimes there is a need to use even this infinite canonical rewrite system. Therefore one may want to capture by finite means the infinite family of rules originating from a crossed system. Other possibility for the use primal grammars presents equational unification when an infinite set of (most general) unifiers is generated. + +*Schematizations* present a suitable formalism to cope directly, by finite means, with infinite families. To our knowledge, so far there are four schematizations of infinite fam- ilies. These are the *meta-rules* [Kir89], the *term schemes* [Gra88], the *recurrence do- mains* [CllK90], with their subclass *ω-terms* [Cll91] called also *ρ-terms*, and the rewrite + +*Partially supported by Institut National Polytechnique de Lorraine grant 910 0146 R1. +---PAGE_BREAK--- + +tization of infinite families of terms, but on the contrary to other schematizations (which usually exploit a more complicated notion, such as higher order terms or some sort of constraints) they are presented by a generating terms plus a canonical rewrite system. As we will see later, primal grammars correspond exactly with the class of crossed systems. + +The idea of this paper originated from two different sources. On the one hand, this paper develops further the type of schematization introduced by Chen, Hsiang, and Kong [C11K90, C1191]. The second source was the paper of Sattler-Klein [SK91]. + +## 2 Basic notation and definitions + +It is supposed that the reader is familiar with the theory of rewrite systems. For reviews see e.g. [D.J90, Bac91]. The used notation is conform with that of [D.J91]. + +Denote by $\mathcal{T}(\mathcal{F}, \mathcal{X})$ the set of all terms over variables $\mathcal{X}$ and symbols $\mathcal{F}$. $\mathrm{Var}(t)$ denotes the set of all variables in the term $t$. $\mathrm{Head}(t)$ denotes the function symbol heading term $t$. + +$\mathcal{Pos}(t)$ denotes the set of positions of the term $t$. The subset of variable positions of $t$ is denoted by $\mathcal{V}\mathcal{Pos}(t)$, the subset of non-variable positions of $t$ by $\mathcal{F}\mathcal{Pos}(t)$. The expression $a \leq b$ denotes a position *a* above the position *b*. The expression $a \parallel b$ denotes that the positions *a* and *b* are parallel (incomparable). A subterm of $t$ at a position $a \in \mathcal{Pos}(t)$ is denoted by $t|_a$. Denote by $s[t]_a$ a new term obtained from the term $s$ after replacing its subterm $s|_a$ by $t$. Denote by $s[\cdot]_a$ a context of $s$ with a hole at the position $a$. + +Denote a substitution $\sigma: \mathcal{X} \to \mathcal{T}(\mathcal{F}, \mathcal{X})$ by $[x_1 \mapsto t_1, \ldots, x_n \mapsto t_n]$ when the terms $t_i$ are substituted for the variables $x_i$. A term $t$ instantiated by a substitution $\sigma$ is denoted by $\mathit{ta}_\sigma$. Denote by $\mathit{Dom}(\sigma)$, $\mathcal{V}\mathit{Ran}(\sigma)$, and $\mathcal{V}\mathit{ar}(\sigma)$ the variable domain, variable range, and all variables (union of variable domain and variable range) of a substitution $\sigma$, respectively. + +A *rewrite rule* is an ordered pair of terms $s \to t$ such that $\mathcal{V}\mathit{ar}(t) \subseteq \mathcal{V}\mathit{ar}(s)$. A term *rewriting system* (or *rewrite system*) is a finite set of rules $R = \{s \to t \mid s, t \in \mathcal{T}(\mathcal{F}, \mathcal{X})\}$. A *rewriting relation* $\to_R$ is the smallest relation containing $R$, closed under substitution and replacement. The relation $\stackrel{*}{\to}_R$ denotes the reflexive and transitive closure of $\to_R$: the relation $\leftarrow_R$ denotes the converse of $\to_R$, the equivalence relation $\leftarrow^*_{R}$ denotes the reflexive, symmetric, and transitive closure of $\to_R$. The normal form of a term $t$ wrt a terminating rewrite relation $\to_R$ is denoted by $t\downarrow_R$. + +Denote by $\vec{a}$ ambiguously either the vector of distinct objects $\langle a_1, \ldots, a_n \rangle$, or the sequence of distinct objects $a_1, \ldots, a_n$, or else the set $\{a_1, \ldots, a_n\}$. Therefore the expression $\tilde{f}(\vec{x})$ means $f_1(x_1, \ldots, x_k), \ldots, f_n(x_1, \ldots, x_k)$. + +Suppose that $\succ$ is a precedence on $\mathcal{F}$. A lexicographic path ordering $\succ_{lpo}$ on $\mathcal{T}(\mathcal{F}, \mathcal{X})$ is defined by $s = f(\vec{s}) \succ_{lpo} g(\vec{t}) = t$ if one of the following holds: $\exists s_i \in \vec{s}^\top$ such that $s_i \supseteq_{lpo} t$, or $f \succ g$ and $\forall t_i \in \vec{t}^\top$ we have $s \succ_{lpo} t_i$, or $f \equiv g$ and $\vec{s}^\top \succ_{lpo}^\text{lex} \vec{t}^\top$, where $\succ_{lpo}^\text{lex}$ is the lexicographic extension of the ordering $\succ_{lpo}$. + +### 2.1 Crossed systems + +The sum [ller90a] of $\varphi$ and $\psi$ is the substitution $\varphi \triangleq \psi$ defined as $[x \mapsto x\varphi\psi | x \in \mathit{Dom}(\varphi), x\varphi\psi \neq x]$. The iterative operator **turtle** [ller90a] on $\sigma, \psi$, and $\varphi$ is defined +---PAGE_BREAK--- + +Recall that the crossed rewrite systems present a sufficient pattern for description and recognition of divergent rewrite systems. For crossed systems see Examples 3.1, 3.2 and 5.3, or the paper [ller90b]. + +**Definition 2.1** [Kl190] The rewrite rules $s_1 \rightarrow t_1$ and $s_2 \rightarrow t_2$ (with supposed disjoint variables) form a forward [... a backward] crossed rewrite system if $\int t_1$ is not a variable, there are substitutions $\sigma_2$ [... substitutions $\sigma_1$], $\varphi_1, \varphi_2$ in own variables of $s_2$ [... of $s_1$], an idempotent substitution $\sigma_1$ [... substitution $\sigma_2$], and positions $a \in \mathcal{F}Pos(s_1)$, $b \in \mathcal{F}Pos(t_2)$ [... and a position $b \in \mathcal{F}Pos(s_1)$] such that + +1. $\langle\sigma_1, \sigma_2\rangle$ is the most general semi-unifier of $s_1|_a$ [... of $s_1|_b$] and $s_2$: $$ s_1|_a\sigma_1 = s_2\sigma_2 \text{ [...] } s_1|_b\sigma_1 = s_2\sigma_2. $$ + +2. $\langle\varphi_1, \varphi_2\rangle$ is the most general semi-unifier of $t_2|_b$ and $s_2$ [... of $t_1$ and $s_1|_b$]: $$ t_2|_b\varphi_1 = s_2\varphi_2 \text{ [...] } t_1\varphi_1 = s_1|_b\varphi_2. $$ + +3. $\mathrm{Dom}(\varphi_1) \cap (\mathrm{Var}(\varphi_2) \cup \mathrm{Var}(\sigma_2)) = \emptyset$ or $\mathrm{Var}(\varphi_1) \cap (\mathrm{Dom}(\varphi_2) \cup \mathrm{Dom}(\sigma_2)) = \emptyset$ \\ [... $\mathrm{Dom}(\varphi_1) \cap (\mathrm{Var}(\varphi_2) \cup \mathrm{Var}(\sigma_1)) = \emptyset$ or $\mathrm{Var}(\varphi_1) \cap (\mathrm{Dom}(\varphi_2) \cup \mathrm{Dom}(\sigma_1)) = \emptyset$.] + +This definition is a simplified and cumulated version of those given in [Kl190]. The latter, more general, definitions treat the case of crossed systems consisting of more than two rules, exploiting the notion of an *overlap closure* [GKM83] $s_2 \dashv\vdash t_2$ ($s_1 \dashv\vdash t_1$) instead of a simple rewrite rule $s_2 \rightarrow t_2$ ($s_1 \rightarrow t_1$). From the formal point of view the closure is treated in the same way as the rule, therefore we use the simplified definition(s) for our purposes. + +It is evident from Definition 2.1 of crossed systems that $\mathrm{Dom}(\varphi_1) \cap \mathrm{Dom}(\varphi_2) = \emptyset$. + +**Theorem 2.2** [Kl190] Let $S = \{s_1 \to t_1, s_2 \to t_2\}$ form a forward (... a backward) crossed system. Assume that each nontrivial critical pair $\langle s\sigma[t'\sigma]_c, t\sigma \rangle$ computed by the completion procedure from $S$ and an ordering $\succcurlyeq$ satisfies $s\sigma[t'\sigma]_c \succcurlyeq t\sigma$ (... satisfies $t\sigma$ $\succcurlyeq$ $s\sigma[t'\sigma]_c$). A fair completion procedure without interreduction produces from $S$ the sequence of rules + +$$ +\begin{array}{l@{\hspace{4em}}l} +\begin{array}{rcl} +\text{forward case} & & \\ +u_1 & \to & v_1 = (s_1\sigma_1[t_2\sigma_2]_a)\rho_1 \to t_1\sigma_1\rho_1 \\ +u_{n+1} & \to & v_{n+1} = u_n\omega_n[t_2\omega_n]_{ab^n} \to v_n\omega_n +\end{array} +& +\begin{array}{rcl} +\text{backward case} & & \\ +u_1 & \to & v_1 = t_1\sigma_1\rho_1 \to (s_1\sigma_1[t_2\sigma_2]_b)\rho_1 \\ +u_{n+1} & \to & v_{n+1} = t_1\omega_n \to s_1\omega_n[v_n\omega_n]_b +\end{array} +\end{array} +$$ + +called the iterated family $\mathcal{I}(S)$, where + +$$ \omega_n = ((\pi_n \Delta (\varphi_1 \Delta T_{n-1}(\psi, \varphi_2, \varphi_1))) \cup (\varphi_2 \Delta T_{n-1}(\psi, \varphi_2, \varphi_1)))\rho_{n+1} $$ + +with $\psi = \sigma_2$ in forward case and $\psi = \sigma_1$ in backward case, is the iterative substitution and + +$$ +\begin{array}{l@{\hspace{4em}}l} +\begin{array}{l} +\text{forward case} \\ +\pi_n = [x_n \mapsto x \mid x_n \in \mathcal{V}\mathrm{ar}(u_n|_{ab^n})] \\ +\rho_n = [x \mapsto x_n \mid x \in \mathcal{V}\mathrm{ar}(s_2)] +\end{array} +& +\begin{array}{l} +\text{backward case} \\ +\pi_n = [x_n \mapsto x \mid x_n \in \mathcal{V}\mathrm{ar}(u_n|_b)] \\ +\rho_n = [x \mapsto x_n \mid x \in \mathcal{V}\mathrm{ar}(s_1)] +\end{array} +\end{array} +$$ + +is a pair of fold/unfold substitutions for explicit variable renaming. +---PAGE_BREAK--- + +In addition to the signature of *plain* symbols $\mathcal{F}$, we consider also another signature of auxiliary symbols $\mathcal{H}$, where $\mathcal{F} \cap \mathcal{H} = \emptyset$, plus the special symbols successor $s$ and the zero constant 0, both not included neither in $\mathcal{F}$ nor in $\mathcal{H}$. The auxiliary symbols from $\mathcal{H}$ will be denoted by a hat to distinguish them from the 'bare headed' plain symbols from $\mathcal{F}$. + +The arguments of the function symbols $\hat{f} \in \mathcal{H}$ are divided into two parts by a semi-colon. Those before the semicolon are called *counters*, or *counter variables* if they consist just of a variable. Each auxiliary symbol $\hat{f}$ has a *counter arity*, denoted by $ar_c(\hat{f})$, indicating its number of counters. The set $CPos(t) = \{a.n \mid Head(t|_a) = f \in \mathcal{H}, n \le ar_c(f)\}$ is called the set of *counter positions* in a term $t \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$. These are the positions in $t$ immediately below an auxiliary symbol $\hat{f}$, before the semicolon. The set of counter variables of a term $t$ is denoted by $CVar(t) = \{t|_a \mid a \in CPos(t) \cap VPos(t)\}$. + +The auxiliary positions of the term $t \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$ are denoted by + +$$ \mathrm{Pos}_{\mathcal{H}}(t) = \{a \in \mathcal{F} \mathrm{Pos}(t) \mid \mathrm{Head}(t|_a) \in \mathcal{H}\} $$ + +The outermost auxiliary positions of $t \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$ are denoted by + +$$ \mathrm{OPos}_{\mathcal{H}}(t) = \{a \in \mathrm{Pos}_{\mathcal{H}}(t) \mid a \le b \text{ or } a \parallel b \text{ for all } b \in \mathrm{Pos}_{\mathcal{H}}(t)\} = \liminf_{\le} \mathrm{Pos}_{\mathcal{H}}(t) $$ + +**Definition 3.1** Suppose there exists a precedence $\succ$ on the auxiliary symbols $\mathcal{H}$. The prime rewrite system $P_\mathcal{H}$ upon $\mathcal{H}$ contains for each symbol $\hat{f} \in \mathcal{H}$ the pair of rewrite rules + +$$ (\hat{f}(0, \vec{x}; \vec{y}) \rightarrow t_1) \quad (\hat{f}(s(z), \vec{x}; \vec{y}) \rightarrow t_2[\hat{f}(z, \vec{x}\delta(x); \vec{y})]_\Lambda) $$ + +where $\Lambda \subseteq Pos(t_2)$ is a finite set of mutually parallel positions incomparable with the auxiliary positions $Pos_H(t_2)$. $\vec{x}$ and $\vec{y}$ are variable vectors, $\delta(x)$ is the substitution $\delta(x) = [x \mapsto s(x)]$, and $t_1, t_2$ are terms from $T(\mathcal{F} \cup \mathcal{H} \cup \{s\}, \mathcal{X})$, such that for both $i=1,2$ + +• for all auxiliary positions $a \in Pos_H(t_i)$ there exists an auxiliary symbol $\hat{g} \in \mathcal{H}$ and a subsequence $\vec{w}$ of $\vec{x}$, such that $\hat{f} \succ \hat{g}$ and $t_i|_a = \hat{g}(\vec{w}; \vec{y})$; + +• for all variable positions $a \in VPos(t_i)$, which are incomparable with all auxiliary positions $Pos_H(t_i)$, we have $t_i|_a = y$ or $t_i|_a = y_m$ where $y \in \vec{y}$ is a variable and $m$ is its mark, with either $m \in \{0\} \cup \vec{x}$ if $i=1$ or $m \in \{s(z)\} \cup \vec{x}$ if $i=2$. + +Prime rewrite systems are primitive recursive rewrite systems of special type. The meaning of $\vec{x}\delta(x)$ is to transform the variable $x$ into $s(x)$ if $x$ belongs to the variable sequence $\vec{x}$. Prime rewrite systems violate the requirement $\mathrm{Var}(r) \subseteq \mathrm{Var}(l)$ for rewrite rules $l \to r$ of classic rewrite systems, because there may exist variables $\mathcal{V} \subseteq \mathrm{Var}(r) - \mathrm{Var}(l)$ for rules $l \to r \in P_\mathcal{H}$, and therefore they should be considered as production systems. If $y_m \in \mathrm{Var}(r) - \mathrm{Var}(l)$ is such a variable in a rule $l \to r \in P_\mathcal{H}$ of a prime rewrite system $P_\mathcal{H}$, then the mark $m$ is the counter subterm $l|_a$ for a counter position $a \in CPos(l)$ and the original variable is $y \in \mathrm{Var}(l) - CVar(l)$. For $y_m$ we say that the variable $y$ is marked by the counter expression $m$. +---PAGE_BREAK--- + +rules are called flat. + +**Example 3.2** Suppose that $\mathcal{H} = \{\hat{f}, \hat{g}, \hat{h}\}$ and $\hat{f} \succ \hat{g} \succ \hat{h}$. The rewrite system + +$$ +\begin{align*} +\hat{f}(0, v, w; x, y) &\rightarrow \hat{g}(v, w; x, y) \\ +\hat{f}(s(u), v, w; x, y) &\rightarrow \hat{f}(u, v, w; x, y) + (\hat{f}(u, v, w; x, y) + \hat{f}(u, v, w; x, y)) \\ +\hat{g}(0, w; x, y) &\rightarrow \hat{h}(w; x, y) \\ +\hat{g}(s(v), w; x, y) &\rightarrow \hat{g}(v, w; x, y) * \hat{g}(v, w; x, y) +\end{align*} +\quad +\begin{align*} +\hat{h}(0; x, y) &\rightarrow A(x) \\ +\hat{h}(s(w); x, y) &\rightarrow B(y_w). \hat{h}(w; x, y) +\end{align*} +$$ + +is prime, whereas each of the following systems contains a counterexample to the Definition 3.1: + +• $\hat{f}(s(u), v, w) \to \hat{f}(u, s(v), w) * \hat{f}(u, v, s(w))$ does not match the right-hand side of prime rewrite systems because $\hat{f}(u, s(v), w)$ and $\hat{f}(u, v, s(w))$ are different. + +• $\hat{f}(s(u); x) \to F(\hat{g}(u; \hat{f}(u; x)))$ is contrary to the fact that auxiliary symbols cannot be encapsulated. + +• $\{\hat{f}(s(u)) \to \hat{g}(u) * \hat{f}(u), \hat{g}(s(u)) \to \hat{f}(u) + \hat{g}(u)\}$ violates the precedence requirement on the auxiliary symbols: these two rules would imply $\hat{f} \succ \hat{g} \succ \hat{f}$. + +All prime rewrite systems are confluent because they are orthogonal and left-linear. Prime rewrite systems are terminating since we can construct a lexicographic path ordering $\succ_{lpo}$ for each prime system. The precedence $\succ$ on auxiliary symbols $\mathcal{H}$ can be enlarged to plain symbols $\mathcal{F}$ in the following way: $\forall \hat{f} \in \mathcal{H} \ \forall g \in \mathcal{F}$ we define $\hat{f} \succ g$. This enlarged precedence, together with the left-to-right status of all auxiliary symbols, defines the required ordering. + +# 4 Generators and folded forms + +If all counter positions of a term $t \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{N})$ are occupied by variables, i.e. $CPos(t) \subseteq VPos(t)$, then the term $t$ is called a **generator**. We say also that a generator is a term with *open counters*. + +Denote by $\mathcal{N} = \{s^i(0) | i \in \mathcal{N}\}$ the infinite set of terms representing *natural numbers*. A (partial) **enumerator** for a generator $t$ is a ground substitution $\xi: \mathcal{X} \to \mathcal{N}$ such that $Dom(\xi) = CVar(t)$ ($Dom(\xi) \subset CVar(t)$). A (partial) **enumerator** $\xi$ is called *basic* if for all variables $x \in Dom(\xi)$ we have $x\xi = 0$. Denote by $\Xi(t)$ ($\pi\Xi(t)$) the set of all possible (partial) enumerators for the generator $t$, called the (*partial enumeration of t*). + +Speaking about the normal form $t\xi\downarrow_{P_i}$ makes sense only for *flat* prime rewrite systems $P_\mathcal{H}$. Otherwise the prime rewrite systems may introduce new variables. + +## 4.1 Production of fresh variables + +A difficult problem in describing an infinite sequence of rewrite rules produced during divergence or an infinite sequence of unifiers as a solution of an equational unification problem is how to create fresh variables and how to manage properly this creation. This +---PAGE_BREAK--- + +of a prime rewrite system for rewriting, not only the variables but also their marks get instantiated. This allows us to obtain richer structures as normal forms of enumerated generators using the prime rewrite systems. This is the case e.g. if the divergence makes new variables to appear originating from variable renamings during superpositions (see Theorem 2.2), or if an infinite sequence of unifiers in an equational unification problem creates new variables for the same reason. + +**Example 4.1** Consider an equational unification [F1186] with the symbols $\mathcal{F}_0 = \{a, b\}$, $\mathcal{F}_1 = \{g\}$, $\mathcal{F}_2 = \{f\}$, and the set of equations $E = \{f(b, x) = x, g(f(x, y)) = g(y)\}$. The unification problem $g(x) =_E g(a)$ has the infinite sequence of unifiers + +$$[x \mapsto a], [x \mapsto f(y_0, a)], [x \mapsto f(y_1, f(y_0, a))], \dots, [x \mapsto f(y_n, \dots, f(y_0, a)\dots)], \dots$$ + +This sequence can be produced from the generator $x \mapsto \hat{h}(z; y)$ using the prime system + +$$\hat{h}(0; y) \rightarrow a \qquad \hat{h}(s(z); y) \rightarrow f(y_s, \hat{h}(z; y))$$ + +under the condition that we know to rename the variable $y_s$, marked by the counter expression $z$, in the term $f(y_s, \cdot)$ into the variables $y_0, y_1, \dots, y_n$. + +Assume that a term $t \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$, with all counter variables enumerated, contains variables in a redex of $t$ headed by an auxiliary symbol $\hat{h}$ and suppose that these variables, marked by a counter, appear in the right-hand side $r$ of a rewrite rule $l \to r \in P_{\mathcal{H}}$, where $\text{Head}(l) = \hat{h}$, at a position not below $\hat{h}$ (we say that these variables get unfolded by the rule $l \to r$), exactly as the variable $y$ in the Example 4.1. During a rewrite step, these variables must be renamed, which is done by "marking" them, and which means they receive a subscript created according to the rule $l \to r$ being applied. Actually, this mark is the value of one counter expression of $\hat{h}$, in Example 4.1 it is the counter variable $z$. The rewriting relation coupled with the marking process is called *marked rewriting*. + +Marking a term means the application of a substitution at positions not below an auxiliary symbol $\tilde{f} \in \mathcal{H}$ and also the evaluation of the counter expressions as marks by the same substitution. Let us denote by $t \bullet_{\mathcal{H}} \sigma$ such an application of a substitution $\sigma$, formally defined as + +$$ +\begin{array}{lll} +f(\vec{u}) \bullet_{\mathcal{H}} \sigma & = & f(\vec{u} \bullet_{\mathcal{H}} \sigma) \\ +f(\vec{u}) \bullet_{\mathcal{H}} \sigma & = & f(\vec{u}) \\ +y_m \bullet_{\mathcal{H}} \sigma & = & y_{\sigma m\sigma} \\ +y \bullet_{\mathcal{H}} \sigma & = & y\sigma +\end{array} +\quad +\begin{array}{l} +\text{if } f \notin \mathcal{H}, \\ +\text{if } f \in \mathcal{H}, \\ +\text{if } y_m \text{ is a marked variable,} \\ +\text{if } y \text{ is an unmarked variable.} +\end{array} +$$ + +for each term vector $\vec{u}$. + +**Definition 4.2 (Marked rewriting)** Let $t, t' \in T(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$ be two enumerated terms and $P_{\mathcal{H}}$ be a prime rewrite system. We write $t \implies_{P_{\mathcal{H}}} t'$ iff + +* there exist an outermost position $a \in OP_{\mathcal{H}}(t)$, a rewrite rule $l \to r \in P_{\mathcal{H}}$ and a substitution $\sigma$, such that $t|_a = l\sigma$; and + +* $t' = t[r \bullet_{\mathcal{H}} \sigma]_a$ +---PAGE_BREAK--- + +The expression $m \bullet_H \sigma$ yields the value of the mark $m$, determined by the match $\sigma$, for each marked variable $y_m \in Var(r)$. According to the choice of the mark of a variable, we get decreasing, increasing or stable markings of the variables within the marked rewriting relation $\Rightarrow_{P_{\mathcal{H}}}$. + +**Example 4.3** Let us take the enumerated term $t = a + \hat{f}(s^3(0), s^2(0), 0; x)$. If we apply the prime rewrite system $P_H$ consisting of the rules + +$$ \hat{f}(0, u, v; x) \rightarrow b \qquad \hat{f}(s(z), u, v; x) \rightarrow x_{s(z)} * \hat{f}(z, s(u), v; x) $$ + +on it, then we get $t' = a + (x_3 * \hat{f}(s^2(0), s^3(0), 0; x))$. If we change the second rule of the prime system to + +$$ \hat{f}(s(z), u, v; x) \rightarrow x_u * \hat{f}(z, s(u), v; x) $$ + +we get $t' = a + (x_2 * \hat{f}(s^2(0), s^3(0), 0; x))$. Finally changing the second rule into + +$$ \hat{f}(s(z), u, v; x) \rightarrow x_v * \hat{f}(z, s(u), v; x) $$ + +we get $t' = a + (x_0 * \hat{f}(s^2(0), s^3(0), 0; x))$ in the marked rewrite relation $t \Rightarrow_{P_{\mathcal{H}}} t'$. The normal form $\tau_{P_{\mathcal{H}}}$ of the term $t$ will be + +$$ +\begin{align*} +a + (x_3 * (x_2 * (x_1 * b))) & \quad \text{for } \hat{f}(s(z), u, v; x) \to x_{s(z)} * \hat{f}(z, s(u), v; x) & (\text{decreasing}), \\ +a + (x_2 * (x_3 * (x_4 * b))) & \quad \text{for } \hat{f}(s(z), u, v; x) \to x_u * \hat{f}(z, s(u), v; x) & (\text{increasing}), \\ +a + (x_0 * (x_0 * (x_0 * b))) & \quad \text{for } \hat{f}(s(z), u, v; x) \to x_v * \hat{f}(z, s(u), v; x) & (\text{stable}). +\end{align*} +$$ + +respectively. + +## 4.2 Primal grammars + +We use generators to schematize recursive sets of terms from $\mathcal{T}(\mathcal{F}, \mathcal{N})$. For this reason we introduce the *primal term grammars*. + +**Definition 4.4** A primal term grammar (or primal grammar for short) $G$ is a 4-tuple $(\mathcal{F}, \mathcal{H}, P_H, t)$, where $\mathcal{F}$ is a signature of plain symbols, $\mathcal{H}$ is a signature of auxiliary symbols, $P_H$ is a prime rewrite system, and $t$ is a (partially basically enumerated) generator. + +The language generated by a primal term grammar $G = (\mathcal{F}, \mathcal{H}, P_H, t)$, denoted by $L(G)$, is the set of terms $L(G) = \{t\xi\downarrow_{P_{\mathcal{H}}} | \xi \in \Xi(t)\}$. The generator $t$ is called a **folded form** of $L(G)$. + +The generator $t$ in Definition 4.4 extends to equations and rules just by considering them as terms in the extended signature $\mathcal{F} \cup \{\rhd\}$ and $\mathcal{F} \cup \{\rhd\}$, respectively. + +The class of $\omega$-terms ($\rho$-terms) [C1191] is included in the class of primal grammars. Let $t$ be an $\omega$-term and $\vec{a}$ be the finite sequence of all positions such that $t|_{\alpha_i} = \Phi(h_i|b_i \leftarrow$ +---PAGE_BREAK--- + +bols $\mathcal{H} = \hat{f}$, the generator $t[\hat{f}_1(z_1; \vec{x}), \dots, \hat{f}_n(z_n; \vec{x})]_\mathfrak{A}$, and the prime system $P_\mathcal{H}$ containing +the pair of rules + +$$ +\hat{f}_i(0; \vec{x}) \rightarrow l_i \qquad \hat{f}_i(s(z_i); \vec{x}) \rightarrow h_i[\hat{f}_i(z_i; \vec{x})]_{b_i} +$$ + +for each $\hat{f}_i \in \mathcal{H}$, where $\vec{x} = \bigcup_i \mathrm{Var}(h_i[l_i]_{b_i})$, such that $\Omega(t) = L(G)$. No variable treatment is defined for $\omega$-terms, therefore there are no marks. + +Like for classical terms, one may want to unify primal grammars. Since the prime +rewrite systems are canonical, the unification of two primal grammars $G_1 = (\mathcal{F}, \mathcal{H}_1, P_{\mathcal{H}_1}, t_1)$ +and $G_2 = (\mathcal{F}, \mathcal{H}_2, P'_{\mathcal{H}_2}, t_2)$ by means of narrowing becomes possible, although it is undecidable in general. This unification problem can be viewed as the unification of the two +generators $t_1$ and $t_2$ modulo the equational theory presented by the canonical system +$P_{\mathcal{H}} = P_{\mathcal{H}_1} \cup P'_{\mathcal{H}_2}$, which is equivalent to the intersection of some instances of the infinite +sets $L(G_1)$ and $L(G_2)$. In this scope, it would be interesting to know which equational +theories are presentable by prime (or iterative) rewrite systems. + +If the unification by narrowing is decidable, we can complete finite primal grammar +systems $\mathcal{G} = \{\langle \mathcal{F}, \mathcal{H}_i, P_{\mathcal{H}_i}, t_i \mid i = 1, \dots, n \}\}$ just by completing the rewrite systems +$\mathcal{R}(\mathcal{G}) = \{t_i \mid (\mathcal{F}, \mathcal{H}_i, P_{\mathcal{H}_i}, t_i) \in \mathcal{G}\}$, consisting of the generators in $\mathcal{G}$ – which are usual +terms in $\mathcal{T}(\mathcal{F} \cup \mathcal{H}, \mathcal{X})$, – modulo the rewrite system $P_{\mathcal{H}} = \bigcup_{i=1}^n P_{\mathcal{H}_i}$. + +In the sequel, the partially basically enumerated generators, used in Section 5 as folded +forms for iterated families of rules, containing only one noninstantiated counter variable +are called axioms. + +5 Primal grammars for iterated families + +We show how to produce a primal grammar *G*. based on a prime rewrite system *P**H*, for +an iterated family *I*(*S*) of rules originating from a crossed system *S* during completion, +such that *L*(*G*) = *I*(*S*). The application of counters within a primal grammar *G* = +(*F*, *H*, *P**H*, *t*) becomes evident now. The supporting counters, instantiated by zeros in the +axiom *t*, serve as interconnection mechanism between dependent auxiliary symbols inside +of the rules in the prime rewrite system *P**H*. The main counter, namely the only one +remaining noninstantiated in the axiom *t*, serves as the index of elements in *I*(*S*). More +precisely, the instantiation of the main counter in the axiom *t* by *s**n*(0), followed by a +reduction to normal form under the marked rewriting relation ⇌*P**H*, results in the *n*-th +element of the iterated family *I*(*S*). + +Before presenting the theorem concerning this statement, let us consider some exam- +ples to explain the principles of the constructions developed in the sequel. + +**Example 5.1** [ller90b] Consider the forward crossed system + +$$ +d(x' \ominus (x' \otimes y')) \to y' \qquad g(x) \ominus y \to g(x \ominus (x \circ y)) +$$ + +where $a = 1, b = 1$, $\sigma_1 = [x' \mapsto g(x), y' \mapsto y]$, $\sigma_2 = [y \mapsto g(x) \otimes y]$, $\varphi_1 = [x \mapsto g(x)]$, and $\varphi_2 = [y \mapsto g(x) \circ y]$. The iterated family has the form + +$$ +d(g^n(x \ominus (x \circ (g(x) \circ \dots (g^n(x) \otimes y))))) \to y \quad (1) +$$ +---PAGE_BREAK--- + +into left-hand sides of produced rules during the observed divergence. This is captured +by the first part of the prime rewrite system: + +$$ +\begin{align*} +\hat{f}(0, z_y, z_x; x, y) &\rightarrow x \ominus (x \circ \hat{f}_y(z_y, z_x; x, y)) \\ +\hat{f}(s(z), z_y, z_x; x, y) &\rightarrow g(\hat{f}(z, s(z_y), z_x; x, y)) +\end{align*} +$$ + +The folded form of the iterated family (1) is the axiom $d(g(\hat{f}(z, 0, 0; x, y))) \to y$. + +The auxiliary symbol $\hat{f}_y$, capturing the iterated instances of the variable $y$, will be +constructed from the substitutions $\varphi_2$, and $\sigma_2$. The second part of the prime rewrite +system will be + +$$ +\begin{align*} +\hat{f}_y(0, z_x; x, y) &\rightarrow g(\hat{f}_x(z_x; x)) \oslash y \\ +\hat{f}_y(s(z), z_x; x, y) &\rightarrow g(\hat{f}_x(z_x; x)) \circ \hat{f}_y(z, s(z_x); x, y) +\end{align*} +$$ + +originating from the substitutions $\varphi_2$ and $\sigma_2$. + +The same method applies on the variable $x$, producing the rewrite rules for the auxil- +iary symbol $\hat{f}_x$: + +$$ +\hat{f}_x(0; x) \rightarrow x \qquad \hat{f}_x(s(z); x) \rightarrow g(\hat{f}_x(z; x)) +$$ + +We have constructed a prime rewrite system, a mark, and a folded form for the iterated +family (1). + +The impact of marking can be nicely observed in the following example taken from a +specification of the reverse operation on lists. + +**Example 5.2** The proof by consistency of the inductive theorem *rev(rev(x)) = x* within +the system + +$$ +\mathrm{rev}_1(\mathrm{nil}, y) \rightarrow \mathrm{nil} \quad \mathrm{rev}_1(\mathrm{xa.xb.y}) \rightarrow \mathrm{rev}_1(\mathrm{xb.xa.y}) \quad \mathrm{rev}(x) \rightarrow \mathrm{rev}_1(x,\mathrm{nil}) +$$ + +leads to a divergent process with the iterated family + +$$ +\begin{align*} +\text{\textit{rev}}_1(\text{\textit{rev}}_1(\text{\textit{xb}} , \text{\textit{xa}}} , \text{\textit{nil}}) &\rightarrow \text{\textit{xa.xb}} \\ +\text{\textit{rev}}_1(\text{\textit{rev}}_1(\text{\textit{xb}} , \text{\textit{xa1}}} , (\text{\textit{xa}}} , \text{\textit{nil}}) &\rightarrow \text{\textit{xa.(xa1.xb)}} \\ +\text{\textit{rev}}_1(\text{\textit{rev}}_1(\text{\textit{xb}} , \text{\textit{xa2}}} , (\text{\textit{xa1}}} , (\text{\textit{xa}}} , \text{\textit{nil}})) &\rightarrow \text{\textit{xa.(xa1.(xa2.xb))} +}\end{align*} +$$ + +originating from the forward crossed system + +$$ +\operatorname{rev}_1(\operatorname{rev}_1(x.\nil).\nil) \rightarrow x \qquad \operatorname{rev}_1(xa.xb.y) \rightarrow \operatorname{rev}_1(xb.xa.y) +$$ + +where $a = 1$, $b = \Lambda$, $\sigma_1 = [x \mapsto xa.xb]$, $\sigma_2 = [y \mapsto nil]$, $\varphi_1 = [xb \mapsto xa.xb]$, $\varphi_2 = [y \mapsto xa.y]$. The resulting prime system will be + +$$ +\begin{align*} +\hat{f}(0, z_y, z_a, z_b; y, xa, xb) &\rightarrow rev_1(xb, xa_{z_y}, \hat{f}_y(z_y, z_a; y, xa)) \\ +\hat{f}(s(z), z_y, z_a, z_b; y, xa, xb) &\rightarrow \hat{f}(z, s(z_y), z_a, z_b; y, xa, xb) \\ +\hat{f}_y(0, z_a; y, xa) &\rightarrow nil \\ +\hat{f}_{xb}(0, z_a; xa, xb) &\rightarrow xb \\ +\hat{f}_{yb}(s(z_b), z_a; xa, xb) &\rightarrow xa_{s(z_a)}, \hat{f}_{yb}(z_b, s(z_a); xa, xb) +\end{align*} +$$ + +and the axiom $\mathbf{\overline{rev}}_1(\mathbf{\hat{f}}(v, 0, 0; y, xa, xb)) \to xa, \mathbf{\hat{f}}_{xb}(v, 0; xa, xb).$ +---PAGE_BREAK--- + +Example 5.3 [ller90b] Consider the backward crossed system + +$$ +(x \otimes f(y)) \ominus y \rightarrow (x \ominus y) \otimes y \qquad (x' \odot y') \otimes y' \rightarrow x' +$$ + +where $b = 1$, $\sigma_1 = [x \mapsto x \circ f(y)]$, $\sigma_2 = [x' \mapsto x, y' \mapsto f(y)]$, $\varphi_1 = [y \mapsto f(y)]$, and +$\varphi_2 = [x \mapsto x \ominus f(y)]$. The iterated family of rules has the form + +$$ +(((x \circled{f}^{n+1}(y)) \not\sqsubseteq f^n(y)) \not\sqsubseteq f(y)) \not\sqsubseteq y) \circled{y} \rightarrow ((x \not\sqsubseteq f^n(y))) \not\sqsubseteq f(y)) \not\sqsubseteq y +$$ + +We have $t_2\sigma_2 = x$ and $s_1[\cdot]_b = (\cdot \ominus y)$. Iterated instances of $s_1[\cdot]_b$ are pumped onto the root of right-hand sides of produced rules during the observed divergence. This will be captured by a part of the primitive recursive rewrite system as in Example 5.1, only that $t_2$ is replaced now by $s_1$: + +$$ +\hat{g}(0, z_y, z_x; x, y) \rightarrow x \qquad \hat{g}(s(z), z_y, z_x; x, y) \rightarrow \hat{g}(z, s(z_y), z_x; x, y) \ominus \hat{g}_y(z_y; y) +$$ + +Using the previous system for $\hat{g}$, we can produce a semi-product of an axiom from the +iterated family, schematizing the right-hand sides: + +(((x ⊙ fn+1)(y) ⊙ fn(y)) ⊙ ... ⊙ f(y) ⊙ y) ⊙ y → ĝ(sn(0), 0, 0; x ⊙ fn(y), y) + +The auxiliary symbols $\hat{g}_x$ and $\hat{g}_y$, capturing the iterated instances of the variables $x$ and $y$ respectively, are constructed from the substitutions $\varphi_1$, $\varphi_2$, and $\sigma_1$, the same way as in the forward crossed case. + +$$ +\begin{array}{rcl@{\hspace{4em}}rcl} +\hat{g}_x(0, z_y; x, y) & \to & x \circ f(\hat{g}_y(z_y; y)) & & \hat{g}_y(0; y) & \to & y \\ +\hat{g}_x(s(z); z_y; x, y) & \to & \hat{g}_x(z, s(z_y); x, y) \ominus f(\hat{g}_y(z_y; y)) & & \hat{g}_y(s(z); y) & \to & f(\hat{g}_y(z; y)) +\end{array} +$$ + +After considering the previous rewrite rules for $\hat{g}_x$ and $\hat{g}_y$, the iterated family in this example can be derived from the axiom $(\hat{g}_x(z, 0; x, y) \ominus y) \circled{y} \to \hat{g}(z, 0, 0; x \ominus \hat{g}_y(z; y), y)$. + +We have got once more a prime rewrite system, a mark, and a folded form for the +iterated family. + +**Theorem 5.4** For each iterated family *I*(S), originated from a crossed rewrite system S, +there exists a primal grammar *G* = (*F*, *H*, *P**H*, *t*) with an axiom *t*, such that *L*(*G*) = *I*(S). + +**Proof:** The basic ideas of the proof for forward crossed systems is given. The construction for backward crossed systems is similar. + +First of all, let us introduce some more notation: + +$$ +\begin{align*} +\vec{w}_f &= \mathrm{Var}(t_2) & \vec{w}_b &= \mathrm{Var}(s_1) \\ +\vec{x}_1 &= \mathrm{Dom}(\varphi_1) & \vec{y}_1 &= \mathrm{VRan}(\varphi_1) \\ +\vec{x}_2 &= \mathrm{Dom}(\varphi_2) & \vec{y}_2 &= \mathrm{VRan}(\varphi_2) \\ +\vec{x}_{12} &= \vec{x}_1 \cup \vec{x}_2 & \vec{y}_{12} &= \vec{y}_1 \cup \vec{y}_2 \\ +\vec{c}_1 &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{x}_1\} & \alpha_1(\vec{u}; \vec{v}) &= [\mathfrak{x} \mapsto \hat{f}_x(\vec{u}; \vec{v}) \mid x \in \vec{x}_1] \\ +\vec{c}_2 &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{x}_2\} & \alpha_2(\vec{u}; \vec{v}) &= [\mathfrak{x} \mapsto \hat{f}_x(\vec{u}; \vec{v}) \mid x \in \vec{x}_2] \\ +\vec{c}_{12} &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{x}_{12}\} & \alpha_{12}(\vec{u}; \vec{v}) &= [\mathfrak{x} \mapsto \hat{f}_x(\vec{u}; \vec{v}) \mid x \in \vec{x}_{12}] \\ +\vec{d}_1 &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{y}_1\} & \gamma_1 &= [\mathfrak{z} \mapsto s(\mathfrak{z}) \mid z \in \vec{c}_1] \\ +\vec{d}_2 &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{y}_2\} & \gamma_2 &= [\mathfrak{z} \mapsto s(\mathfrak{z}) \mid z \in \vec{c}_2] \\ +\vec{d}_{12} &= \{\mathfrak{z}_x \in \mathcal{X} \mid x \in \vec{y}_{12}\} & \gamma_{12} &= [\mathfrak{z} \mapsto s(\mathfrak{z}) \mid z \in \vec{c}_{12}] \\ +\epsilon_1 &= [\mathfrak{z}_x \mapsto s(\mathfrak{z}_x) \mid x \in \vec{y}_1 - \vec{x}_1] & \epsilon_2 &= [\mathfrak{z}_x \mapsto s(\mathfrak{z}_x) \mid x \in \vec{y}_2 - \vec{x}_2] \\ +q_1 &= (\vec{d}_1 - \vec{c}_1)\epsilon_1 & q_2 &= (\vec{d}_2 - \vec{c}_2)\gamma \\ +V &= Var(t_2[\cdot]_b) - Var(t_2|b) & W &= (\vec{y}_1 - \vec{x}_1) \cap (\vec{y}_2 - \vec{x}_2) +\end{align*} +$$ +---PAGE_BREAK--- + +ular parameterized substitutions introducing the supporting symbols $\hat{f}_x$, and the $\gamma$-s are substitutions for advancing counters. The expression $\Theta_x$ means either $\Theta_1$ if $x \in Dom(\varphi_1)$ or $\Theta_2$ if $x \in Dom(\varphi_2)$, where $\Theta$ stands for one of the indexed symbols. All variables are considered to be global, e.g. $\tilde{c}_1 \cap \tilde{d}_2 = \{z_x \in \mathcal{X} \mid x \in \tilde{x}_1 \cap \tilde{y}_2\}$. + +Moreover, let $\tau_V(v) = [u \mapsto u_v \mid u \in V]$ be the marking substitution for the variables $V$ with the counter expression $z$. + +Suppose that $S = \{s_1 \to t_1, s_2 \to t_2\}$ is the forward crossed system as in Definition 2.1. + +The set $\mathcal{H}$ contains the main symbol $\hat{f}$ for keeping track of the manipulations concerning the term $t_2$, together with the supporting symbols $\hat{f}_x$ for each variable $x \in \tilde{x}_{12}$. + +The prime rewrite system $P_\mathcal{H}$ contains the rewrite rules + +$$ +\begin{aligned} +\hat{f}(0, \tilde{d}_{12}; \tilde{w}_f) &\rightarrow t_2|_b \alpha_2(\tilde{d}_2; \tilde{y}_2) \bullet_{\mathcal{H}} \tau_{V \cup W}(\tilde{c}_2 \cap \tilde{d}_{12}) \\ +\hat{f}(s(z), \tilde{d}_{12}; \tilde{w}_f) &\rightarrow t_2 \alpha_1(\tilde{d}_1; \tilde{y}_1)[\hat{f}(z, \tilde{d}_{12}\gamma_2; \tilde{w}_f)]_b \bullet_{\mathcal{H}} \tau_{V \cup W}(\tilde{c}_2 \cap \tilde{d}_{12}) +\end{aligned} + $$ + +for the main symbol $\hat{f}$ and the rewrite rules + +$$ +\begin{aligned} +\hat{f}_x(0, \tilde{d}_x - \{z_x\}; \tilde{y}_x) &\rightarrow x(\sigma_2 \Delta \alpha_1(\tilde{d}_1\epsilon_1; \tilde{y}_1)) \bullet_H \tau_{V \cup W}(q_x) \\ +\hat{f}_x(s(z_x), \tilde{d}_x - \{z_x\}; \tilde{y}_x) &\rightarrow x(((\varphi_1 \cup \varphi_2) \Delta \alpha_1(\tilde{d}_1\epsilon_1; \tilde{y}_1)) \Delta \alpha_2(\tilde{d}_2\gamma_1; \tilde{y}_2)) \bullet_H \tau_{V \cup W}(q_x) +\end{aligned} + $$ + +for each variable $x \in \tilde{x}_{12}$, and subsequently also for each supporting symbol $\hat{f}_x$. The union $\varphi_1 \cup \varphi_2$ is a substitution because $Dom(\varphi_1) \cap Dom(\varphi_2) = \emptyset$ from Definition 2.1. + +The axiom $t$ is the rule + +$$ s_1\sigma_1\alpha_1(z, \vec{0}; \vec{y}_1)[t_2\sigma_2\alpha_2(z, \vec{0}; \vec{y}_2)]_a[\hat{f}(z, \vec{0}; \vec{w}_f)]_{ab} \rightarrow t_1\sigma_1\alpha_1(z, \vec{0}; \vec{y}_1) $$ + +The rest is proved by induction on $n$, proving that $t[z \mapsto s^n(0)]\downarrow_{P_i}$ is the $n$-th element of $\mathcal{I}(S)$. $\square$ + +Using techniques similar to those of Sattler-Klein [SK91], it is possible to construct a divergent rewrite system for each primal grammar. + +# 6 Conclusion + +A new schematization called *primal grammars* has been introduced, which presents a generalization of *recurrence domains* [C11K90, C1191] and which has similarities with *meta-rules* [Kir89]. In the proof of Theorem 5.4 an exact method was developed on how to construct primal grammars from iterated families of rules, originating from crossed rewrite systems during completion. Such a construction was not known for the recurrence domains. + +Primal grammars can be unified via their generators by narrowing. Subsequently, if the unification by narrowing is decidable, it is possible to complete primal grammar systems. Together with the meta-rules [Kir89] and to a certain extent with the rewrite systems with membership constraints (infinite sets of ground equations are considered only) [Com91], the primal grammars represent the only known formalism permitting completion of infinite sets of rules. +---PAGE_BREAK--- + +I am grateful to Pierre Lescanne who contributed to the readability of the paper. + +References + +[Bac91] L. Bachmair. *Canonical equational proofs*. Birkhäuser, Boston, 1991. + +[C1191] Il. Chen and J. Ilsiang. Logic programming with recurrence domains. In J. Leach Albert, B. Monien, and M. Rodríguez Artalejo, editors, *Proceedings 18th ICALP Conference, Madrid (Spain)*, volume 510 of *Lecture Notes in Computer Science*, pages 20–34. Springer-Verlag, July 1991. + +[C11K90] Il. Chen, J. Ilsiang, and Il.-C. Kong. On finite representations of infinite sequences of terms. In S. Kaplan and M. Okada, editors, *Proceedings 2nd International Workshop on Conditional and Typed Rewriting Systems (CTRS'90), Montreal (Canada)*, volume 516 of *Lecture Notes in Computer Science*, pages 100–114. Springer-Verlag, June 1990. + +[Com91] Il. Comon. Completion of rewrite systems with membership constraints. Research report 699, Laboratoire de Recherche en Informatique, Orsay, France, 1991. + +[DJ90] N. Dershowitz and J.-P. Jouannaud. Rewrite systems. In J. van Leeuwen, editor, *Handbook of Theoretical Computer Science B: Formal Methods and Semantics*, chapter 6, pages 243–309. Elsevier, Amsterdam, 1990. + +[DJ91] N. Dershowitz and J.-P. Jouannaud. Notations for rewriting. *Bulletin of the European Association for Theoretical Computer Science*, 43:162–172, February 1991. + +[F1186] F. Fages and G. Huet. Complete sets of unifiers and matchers in equational theories. *Theoretical Computer Science*, 43(1):189–200, 1986. + +[GKM83] J.V. Guttag, D. Kapur, and D.R. Musser. On proving uniform termination and restricted termination of rewrite systems. *SIAM Journal on Computing*, 12(1):189–214, February 1983. + +[Gra88] B. Gramlich. Unification of term schemes - theory and applications. SEKI Report SR-88-18, Universität Kaiserslautern, Germany, 1988. + +[ller90a] M. Ilermann. Chain properties of rule closures. *Formal Aspects of Computing*, 2(3):207–225, 1990. + +[ller90b] M. Ilermann. Vademecum of divergent term rewriting systems. In "Avancées en Programation" – *Journées ALCET-GROPLAN, Nice (France)*, volume 70, pages 148–164. BIGRE, January 1990. +---PAGE_BREAK--- + +tems. In S. Kaplan and M. Okada, editors, *Proceedings 2nd International Workshop on Conditional and Typed Rewriting Systems (CTRS'90)*, Montreal (Canada), volume 516 of Lecture Notes in Computer Science, pages 143–154. Springer-Verlag, June 1990. + +[Kir89] II. Kirchner. Schematization of infinite sets of rewrite rules generated by divergent completion process. *Theoretical Computer Science*, 67(2-3):303–332, 1989. + +[SK91] A. Sattler-Klein. Divergence phenomena during completion. In R.V. Book, editor, *Proceedings 4th Conference on Rewriting Techniques and Applications (RTA'91), Como (Italy)*, volume 488 of Lecture Notes in Computer Science, pages 374–385. Springer-Verlag, April 1991. \ No newline at end of file diff --git a/samples_new/texts_merged/6426180.md b/samples_new/texts_merged/6426180.md new file mode 100644 index 0000000000000000000000000000000000000000..8d043b95848cc3fbc0c13fb3165a025e2a60aa5f --- /dev/null +++ b/samples_new/texts_merged/6426180.md @@ -0,0 +1,121 @@ + +---PAGE_BREAK--- + +A TECHNIQUE FOR PROVING +INEQUALITIES IN CARDINAL +FUNCTIONS + +by + +R. E. HODEL + +Topology Proceedings + +**Web:** http://topology.auburn.edu/tp/ + +**Mail:** Topology Proceedings +Department of Mathematics & Statistics +Auburn University, Alabama 36849, USA + +**E-mail:** topolog@auburn.edu + +**ISSN:** 0146-4124 + +COPYRIGHT © by Topology Proceedings. All rights reserved. +---PAGE_BREAK--- + +A TECHNIQUE FOR PROVING INEQUALITIES +IN CARDINAL FUNCTIONS + +R. E. Hodel + +**Introduction** + +Let $d, L, c, s, \chi$ and $\psi$ denote the following standard cardinal functions: density, Lindelöf degree, cellularity, spread (= hereditary cellularity), character, and pseudo-character. (For definitions, see [7] or [14].) The following inequalities are basic in the theory of cardinal invariants: (1) if $X$ is Hausdorff, then $|X| \le 2^{c(X)} \chi(X)$; (2) if $X$ is $T_1$, then $|X| \le 2^{s(X)} \psi(X)$; (3) if $X$ is Hausdorff, then $d(X) \le 2^{s(X)}$; (4) if $X$ is Hausdorff, then $|X| \le 2^{2s(X)}$; (5) if $X$ is Hausdorff, then $|X| \le 2^{L(X)} \chi(X)$. (See [11] and [1].) Partition calculus and ramification arguments are used in the original proofs of these five inequalities. + +(See [8] and [9].) Specifically, the Erdös-Rado theorem $(2^{\kappa})^+ + (\kappa^+)_\kappa^2$ is used in the proof of (1) and (2), the Erdös theorem $\kappa + (\kappa, \omega)^2$ is used in the proof of (3), the Erdös-Rado theorem $(2^{\kappa^+}) + (\kappa^+)_\kappa^3$ is used in the proof of (4), and in proving (5) Arhangel'skiǐ uses a difficult ramification argument to construct a free sequence of length $\kappa^+$. + +In [16] Šapirovskii proved a fundamental theorem about the cardinal function s, and from this theorem one easily obtains the two inequalities $d(X) \le 2^{s(X)}$ and $|X| \le 2^{2^{s(X)}}$. Pol [15] has modified Šapirovskii's technique to give proofs of the two inequalities $|X| \le 2^{c(X)} \chi(X)$ and $|X| \le 2^{L(X)} \chi(X)$, and I have used this technique to prove the inequality +---PAGE_BREAK--- + +$|x| \le 2^{S(X)} \psi(X)$. In summary, the work of Pol and Šapirovskiǐ gives an alternate, unified approach to the five inequalities stated above. + +The point I would like to emphasize in this paper is that the Pol-Šapirovskiǐ technique plays a fundamental, unifying role in the theory of cardinal invariants and can be used to prove a wide variety of cardinal function inequali- ties. Specifically, I will illustrate their technique by proving that every $\chi_1$-compact space with a $G_δ$-diagonal has cardinality at most $2^ω$. The generalized version of this inequality is due to Ginsburg and Woods [10]; their proof uses the Erdös-Rado theorem $(2^κ)^+ + (ℓ^+)^2$. In addition, I will survey several other inequalities in cardinal functions, each of which can be proved using the Pol-Šapirovskiǐ tech- nique. + +## The Technique Illustrated + +In order to take advantage of well known terminology, I will just prove the countable version of the Ginsburg-Woods inequality. (The proof I give can easily be extended to higher cardinality.) The following notation is used: if X is a set, $\mathcal{G}$ is a cover of X, and D is a subset of X, then $\text{st}(D, \mathcal{G}) = \bigcup\{\text{st}(x, \mathcal{G}) : x \in D\}$. Recall that a space is $\chi_1$-compact if every uncountable subset has a limit point. + +**Lemma.** Let X be a T₁-space which is χ₁-compact, let $\mathcal{G}$ be an open cover of X, let $C \subseteq X$. Then there is a countable subset D of C such that $C \subseteq \text{st}(D, \mathcal{G})$. + +**Proof.** Suppose false. Construct a subset $E = \{x_\alpha : 0 \le \alpha < \omega_1\}$ of C such that for all $\alpha < \omega_1$, $x_\alpha \notin U_{\beta<\alpha} \text{st}(x_\beta, \mathcal{G})$. +---PAGE_BREAK--- + +Let $p$ be a limit point of $E$, and let $G$ be a member of $\mathcal{G}$ such that $p$ belongs to $G$. Since $p$ is a limit point of $E$ and $X$ is $T_1$, there exists $\alpha$ and $\beta$, $\alpha > \beta$, such that $x_\alpha$ and $x_\beta$ belong to $G$. This contradicts $x_\alpha \notin U_{\beta<\alpha}\text{st}(x_\beta, \mathcal{G})$. + +**Theorem (Ginsburg and Woods).** Let $X$ be an $x_1$-compact space with a $G_\delta$-diagonal. Then $|X| \le 2^\omega$. + +*Proof.* Since $X$ has a $G_\delta$-diagonal, there is a countable sequence $\mathcal{G}_1, \mathcal{G}_2, \dots$ of open covers of $X$ such that if $p$ and $q$ are any two distinct points in $X$, then for some $n < \omega$, $q \notin \text{st}(p, \mathcal{G}_n)$. (See [4].) Construct a sequence $\{E_\alpha : 0 \le \alpha < \omega_1\}$ of subsets of $X$ such that $(1) \ | E_\alpha | \le 2^\omega$, $(2)$ for $1 \le \alpha < \omega_1$, if $\{D_n : n < \omega\}$ is a countable collection of countable subsets of $U_{\beta<\alpha} E_\beta$, and $u_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n) \ne X$, then $E_\alpha - U_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n) \ne \emptyset$. + +Let $E = U_{\alpha<\omega_1} E_\alpha$; since $|E| \le 2^\omega$, the proof is complete if we can show that $E = X$. Suppose not, and let $p \in E$. For each $n < \omega$ let $C_n = \{x : x \in E, p \notin \text{st}(x, \mathcal{G}_n)\}$; clearly $E = U_{n=1}^\infty C_n$. For each $n < \omega$, apply the Lemma to $\mathcal{G}_n$ and $C_n$: there is a countable subset $D_n$ of $C_n$ such that $C_n \subseteq \text{st}(D_n, \mathcal{G}_n)$. Note that $E \subseteq U_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n)$ and $p \notin U_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n)$. Now choose $\alpha < \omega_1$ such that $U_{n=1}^\infty D_n \subseteq U_{\beta<\alpha} E_\beta$. By (2), there is some $q$ in $E_\alpha$ such that $q \notin U_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n)$. This contradicts $E \supseteq U_{n=1}^\infty \text{st}(D_n, \mathcal{G}_n)$. + +## Survey of Other Inequalities + +First we need some definitions. For a $T_1$ space $X$, the point separating weight of $X$, denoted $\text{psw}(X)$, is the smallest infinite cardinal $\kappa$ such that $X$ has a separating open cover $\mathcal{S}$ with the property that every point of $X$ is in +---PAGE_BREAK--- + +at most $\kappa$ members of $\mathcal{S}$. (The cover $\mathcal{S}$ is separating if given any two distinct points $p$ and $q$ in $X$, there is some $S \in \mathcal{S}$ such that $p \in S$, $q \notin S$.) If $psw(X) = \omega$, we say that $X$ has a *point-countable separating open cover*. The extent of $X$, denoted $e(X)$, is the smallest infinite cardinal $\kappa$ such that every closed, discrete subset of $X$ has cardinality at most $\kappa$. (See [7], [13]). Note that for a $T_1$ space $X$, $e(X) = \omega$ if and only if $X$ is $\chi_1$-compact. The weak Lindelöf number of $X$, denoted $wL(X)$, is the smallest infinite cardinal $\kappa$ such that every open cover of $X$ has a subcollection of cardinality $\le \kappa$ whose union is dense in $X$. Note that $wL(X) \le L(X)$ and $wL(X) \le c(X)$. If $wL(X) = \omega$, we say that $X$ is weakly Lindelöf. + +Each of the following inequalities can be proved using the Pol-Šapirovskii technique. (1) If $X$ is $T_1$, then $|X| \le 2^{e(X)}psw(X)$. (2) If $X$ is $T_1$, then $|X| \le psw(X)^{L(X)\psi(X)}$. (3) If $X$ is normal and $T_1$, then $|X| \le 2^{wL(X)X(X)}$. (See [3], [5], and [2] respectively.) + +The countable version of (1) states that an $\chi_1$-compact space with a point-countable separating open cover has cardinality at most $2^\omega$. (In fact, the number of compact subsets has cardinality at most $2^\omega$.) This result should be compared with the Ginsburg-Woods inequality. Two proofs of (1) are given in [3]; the first uses an intersection theorem of Erdős and Rado while the second proof uses the Pol-Šapirovskii technique. (This second proof is also closely related to a construction due to M. E. Rudin [6].) + +Arhangel'skii has asked if every Lindelöf Hausdorff +---PAGE_BREAK--- + +space with countable pseudo-character has cardinality at most $2^{\omega}$, and (2) gives a partial answer to this question. Specifically, the countable version of (2) states that a Lindelöf space having countable pseudo-character and point separating weight at most $2^{\omega}$ has cardinality at most $2^{\omega}$. + +The countable version of (3) states that a weakly Lindelöf first countable Hausdorff space which is also normal has cardinality at most $2^{\omega}$. Except for the normality assumption, inequality (3) unifies the two inequalities $|x| \le 2^{c(X)} \chi(X)$ and $|x| \le 2^{L(X)} \chi(X)$. + +The reader is referred to [2], [5], [15], and [17] for additional inequalities in cardinal functions which can be proved using the Pol-Sapirovskii technique. + +## References + +1. A. V. Arhangel'skii, *The cardinality of first countable bicompacta*, DAN SSSR 187 (1969), 967-970. + +2. M. Bell, J. Ginsburg and G. Woods, *Cardinal inequalities for topological spaces involving the weak Lindelöf number*, Pacific J. Math. 79 (1978), 37-45. + +3. D. K. Burke and R. E. Hodel, *On the number of compact subsets of a topological space*, Proc. Amer. Math. Soc. 58 (1976), 363-368. + +4. J. G. Ceder, *Some generalizations of metric spaces*, Pacific J. Math. 11 (1961), 105-126. + +5. A. Charlesworth, *On the cardinality of topological spaces*, Proc. Amer. Math. Soc. 66 (1977), 138-142. + +6. H. H. Corson and E. Michael, *Metrizability of certain countable unions*, Illinois J. Math. 8 (1964), 351-360. + +7. R. Engelking, *General Topology*, Warsaw, 1977. + +8. P. Erdös and R. Rado, *A partition calculus in set theory*, Bull. Amer. Math. Soc. 62 (1965), 427-489. + +9. ________, *Intersection theorems for systems of sets*, J. London Math. Soc. 35 (1960), 85-90. +---PAGE_BREAK--- + +10. J. Ginsburg and G. Woods, *A cardinal inequality for topological spaces involving closed discrete sets*, Proc. Amer. Math. Soc. **64** (1977), 357-360. + +11. A. Hajnal and I. Juhász, *Discrete subspaces of topological spaces*, Indag. Math. **29** (1967), 343-356. + +12. R. E. Hodel, *New proof of a theorem of Hajnal and Juhász on the cardinality of topological spaces*, Bull. Acad. Polon. Sci. Sér. Sci. Math. Astronom. Phys. **24** (1976), 999-1000. + +13. ________, *On a theorem of Arhangel'skii concerning Lindelöf p-spaces*, Can. J. Math. **27** (1975), 459-468. + +14. I. Juhász, *Cardinal functions in topology*, Math. Centr. Amsterdam, 1971. + +15. R. Pol, *Short proofs of two theorems on cardinality of topological spaces*, Bull. Acad. Polon. Sci. Sér. Sci. Math. Astronom. Phys. **22** (1974), 1245-1249. + +16. B. Šapirovskii, *Discrete subspaces of topological spaces*. Weight, tightness and Souslin number, DAN SSSR **202** (1972), 779-782. + +17. ________, *Canonical sets and character*. Density and weight in compact spaces*, Soviet Math. Dokl. **15** (1974), 1282-1287. + +Duke University + +Durham, North Carolina 27706 \ No newline at end of file diff --git a/samples_new/texts_merged/6470527.md b/samples_new/texts_merged/6470527.md new file mode 100644 index 0000000000000000000000000000000000000000..c55b8cae90f58b9c9e572f5ba5b9f7cb80af8170 --- /dev/null +++ b/samples_new/texts_merged/6470527.md @@ -0,0 +1,315 @@ + +---PAGE_BREAK--- + +# MECHANISM DESIGN AND MOTION PLANNING OF PARALLEL-CHAIN NONHOLONOMIC MANIPULATOR + +Li, L. + +School of Mechanical Engineering, Baoji University of Arts and Sciences, Baoji 721016, China +E-Mail: leeliang@126.com + +## Abstract + +Inspired by the nonholonomic theory, this paper proposes a parallel-chain nonholonomic manipulator with a chainable kinetics model. To build the manipulator, the friction disc motion synthesis and decomposition mechanism was taken as the joint transmission component. Based on Chow's theorem, the kinetics model of the manipulator was proved as nonholonomic and controllable. Then, the system's configuration coordinates were mapped from the joint space to the chain space via coordinate transformation, and the manipulator motion was planned in the chain space. Through two simulation experiments, it is proved that all joints of the proposed manipulator can move to the target configuration within the specified time. To sum up, the author successfully built an underactuated manipulator that can drive the motion of four joints with two motors. The research findings lay the basis for the development of small lightweight manipulators. + +(Received, processed and accepted by the Chinese Representative Office.) + +**Key Words:** Nonholonomic, Parallel-Chain, Chain Transformation, Motion Planning + +## 1. INTRODUCTION + +In analytical mechanics, a nonholonomic system refers to a system whose constraint equations contain the derivative of the coordinates with respect to time. In other words, the system speed or acceleration is under constraint. The nonholonomic mechanical system is underactuated, as it has fewer degrees of freedom (DoFs) than the number of dimensions in its configuration space. Hence, a multi-dimensional motion in the configuration space can be determined by a few control inputs, making it possible to design compact, lightweight multi-joint manipulators. The research into nonholonomic manipulator carries practical implications for the development of assistive robots like small robots, medical robots and multi-fingered dexterous hands. + +In the field of robotics, the research into nonholonomic system mainly concentrates on the control of existing nonholonomic robots, such as wheeled mobile robots, spherical robots and underwater robots [1-3]. Owing to the motion nonlinearity of nonholonomic robots, it is necessary to develop a unique path planning method for each nonholonomic system, adding to the difficulty in the motion control of new nonholonomic robots. + +In reality, many kinematics models of existing nonholonomic robots (e.g. wheeled mobile robots and trailer systems) can be converted into the chained model, a drift-free controllable nonholonomic system model. A system whose kinematics equations can be described with a chained model is called a chained system. Such a system boasts excellent properties (nilpotent and smooth), and simple structured mathematical model. In view of these advantages, many scholars have created nonholonomic robots with chainable kinematics model. For example, Nakamura proposed an underactuated manipulator based on a friction ball vector synthesis and decomposition mechanism [4]. The manipulator supports path planning via the control method of a chained system, as its kinematics model can be converted into a chained model. Under the diffeomorphism of chained transformation, paper [5] designs the gear steering connection mechanism for nonpowered trailer, and constructs a chainable wheeled mobile trailer system that can accurately track the target trajectory. Yamaguchi developed a 4 DoFs +---PAGE_BREAK--- + +wheeled mobile robot capable of chained transformation [6-8]; the wheeled mobile mechanism is controlled precisely with the drive angle and azimuth of the traction robot and the angle of the active steering system mounted on the connecting rod. + +Based on the previous research into a parallel-chain type chainable nonholonomic manipulator [9-11], this paper puts forward a two-motor parallel-chain four-joint nonholonomic manipulator. In the parallel-chain manipulator, the friction disc motion synthesis and decomposition mechanism serve as the joint transmission component, and the motion is transferred by dual universal joint in parallel-chain mode. Compared to the parallel-chain manipulator, the proposed manipulator, with a concise structure and a small power loss, offers an effective solution to the conflict between the number of drive units and manipulator mass in multi-joint manipulator. + +The remainder of this paper is organized as follows: Section 2 introduces the design of the parallel-chain nonholonomic manipulator; Section 3 establishes the kinematics model of the manipulator, demonstrates the manipulator controllability, and analyses the chain transformation features; Section 4 plans a path that maps back to the joint space in the chain space based on the control law of time polynomial motion planning; Section 5 concludes that the proposed manipulator can move from the initial configuration to the target configuration within the specific time under the control law of the chained system, and outperforms the parallel-chain manipulator in trajectory simplicity and motion efficiency. + +# 2. PARALLEL-CHAIN NONHOLONOMIC MANIPULATOR +## MECHANISM + +### 2.1 Motion principle of friction disc + +As shown in Fig. 1, when the friction wheel with the radius $r$ rotates around axis $I$ at the angular velocity $W_i$, there is only pure rolling between the friction wheel and the friction disc; then, the friction disc will rotate around axis $O$ at the angular velocity $W_o$. The friction wheel and the friction disc are perpendicular to each other. Let $M$ be the contact point between the friction wheel and the friction disc. The friction wheel can also rotate relative to the friction disc around the connecting line between its own axis and point $M$. When the rotation angle reaches $\alpha$, the linear velocities of the friction wheel and the friction disc were plotted into a vector diagram (Fig. 1 b). + +Figure 1: Friction disc motion synthesis and decomposition mechanism. + +Then, the following equation holds: $V_o = W_o R = V_i \cos \alpha = W_i r \cos \alpha$. +---PAGE_BREAK--- + +Thus, we have: + +$$W_o = \frac{r}{R} w_i \cos \alpha \quad (1)$$ + +where R is the distance between point M and the centre of friction disc; $V_i$ and $V_o$ are the linear velocities of the friction wheel and the friction disc at point M, respectively. + +It can be seen that the transmission ratio between the friction wheel and the friction disc can be controlled by adjusting the angle $\alpha$. Hence, $\alpha$ was defined as the transmission angle. + +The rolling-induced relative motion of the friction wheel on the friction disc depends on the relative change of configuration. Based on the relative configuration-variable structure, the designed friction disc motion synthesis and decomposition mechanism is subjected to the nonholonomic constraint [12-15]. + +## 2.2 Design of parallel-chain nonholonomic manipulator + +A friction disc mechanism was arranged at each joint of the manipulator. In the mechanism, the friction wheel and the friction disc are permanently connected to the front and rear joints, respectively. The transmission ratio between the two components changes with the included angle between them (i.e. the joint angle). Fig. 2 illustrates the structure of parallel-chain four-joint manipulator. + +Figure 2: Mechanism of parallel-chain four-joint manipulator. + +The rotation of motor 2 directly drives joint 1 to rotate about the axis by the angle $\theta_1$. Since friction wheel 1 is fixed to the frame through the side plate and friction plate 1 is fixed to the first joint, motor 2 controls the rotation angle $\theta_1$ of joint 1 as if a transmission angle $\theta_1$ is added to the friction transmission of the friction wheel and the friction disc. + +Motor 1 transmits its energy in two directions. In one direction, the motor drives the friction wheel through gears, the friction wheel drives the friction disc via rolling friction, and the friction disc drives joint 2 to rotate by the angle $\theta_2$ through the synchronous belt; meanwhile, the motor adds a transmission angle $\theta_2$ between the friction wheel and the friction disc at joint 2. In the other direction, motor 1 transmits its energy to the nearest rear joint via the dual universal joint, so that each rear joint can transmit energy to its next rear joint in turns. + +In this way, the four joints can be driven by two motors. The prototype of the parallel-chain four-joint manipulator is presented in Fig. 3. +---PAGE_BREAK--- + +Figure 3: Prototype of parallel-chain four-joint manipulator. + +The following issues call for special attention in the production and assembly of the prototype: + +(1) To ensure effective, reliable and accurate transmission of motion and force, there should be sufficient friction between the friction wheel and the friction disc. Hence, the material should have a large friction coefficient. Besides, a certain amount of positive pressure should be applied to point M, such that there is no relative sliding but pure rolling between the friction wheel and the friction. + +(2) As shown in Fig. 4 a, point M should be placed on the axis of the joint. Otherwise, the friction wheel will slide on the friction disc when the joint rotates to a certain angle. The resulting change in the distance R between point M and the centre of the friction disc will reduce the transmission accuracy. + +(3) The input shaft and the output shaft of the dual universal joint should have the same rotational angular velocity. In other words, the centreline OO of the dual universal joint must be consistent with the joint axis. Moreover, the intermediate shaft should be retractable, so as to compensate for the change in the axial distance between the input and output shafts caused by the rotation of manipulator joints (Fig. 4 b). + +(4) For the compactness and lightweight of the whole structure, the periphery of the connecting rod should be made into large rounded corner and the central part of the rod should be grooved, without sacrificing the strength and rigidity. In the horizontal direction, the main energy transmission chain (dual universal joint) and the motion transmission chain (friction wheel and friction disc) should be arranged at the same distance from the edge of the manipulator. The distance should approximate the spacing between the two transmission chains. In the vertical direction, the two transmission chains should be placed symmetrically about the connecting rod. All these arrangements ensure that the centre of mass of the manipulator is close to its geometric centre, thereby improving the kinetic performance of the manipulator. + +Figure 4: a) location of point M, b) structure of dual universal joint. +---PAGE_BREAK--- + +# 3. KINEMATICS ANALYSIS AND CHAIN TRANSFORMATION + +## 3.1 Kinematics modelling + +The configuration space of the four-joint nonholonomic manipulator hinges on the joint rotation angle $\theta_i$ ($i=1, 2, 3, 4$) and the angular displacement $\varphi$ of the friction wheel. Hence, the generalized coordinate vector of the manipulator system was defined as $q = [q_1, q_2, q_3, q_4, q_5] = [\varphi, \theta_1, \theta_2, \theta_3, \theta_4]$, and the control inputs as the angular velocities of the two motors $u_1$ and $u_2$. According to the kinematics relationship, the kinematics model of the parallel-chain four-joint manipulator can be derived as: + +$$ +\begin{bmatrix} \dot{q}_1 \\ \dot{q}_2 \\ \dot{q}_3 \\ \dot{q}_4 \\ \dot{q}_5 \end{bmatrix} = +\begin{bmatrix} \varphi \\ \dot{\theta}_1 \\ \dot{\theta}_2 \\ \dot{\theta}_3 \\ \dot{\theta}_4 \end{bmatrix} = +\begin{bmatrix} 1 & 0 \\ 0 & 1 \\ \frac{r}{R}\cos\theta_1 & 0 \\ \frac{r}{R}\cos\theta_2 & 0 \\ \frac{r}{R}\cos\theta_3 & 0 \end{bmatrix} +\begin{bmatrix} u_1 \\ u_2 \end{bmatrix} = [p_1(q) \enspace p_2(q)] +\begin{bmatrix} u_1 \\ u_2 \end{bmatrix} +\quad (2) +$$ + +where *r* is the radius of the friction wheel. + +## 3.2 Controllability analysis + +Eq. (2) describes a drift-free control system. For such a drift-free symmetric affine system, the reachable space is expanded from the distribution $\Delta(q) = \text{span}\{p_1, p_2\}$. + +According to the controllability conditions of nonholonomic systems (Chow's theorem) [16], a drift-free affine system is controllable if its reachable distribution $\Delta_p(q) = \text{span}\{p_1, p_2, [p_1, p_2], [p_1, [p_1, p_2]], ...\}$ is in full rank. Note that $[p_1, p_2]$ and $[p_1, [p_1, p_2]]$ are the Lie bracket operations on vectors $p_1, p_2$ and $p_1, [p_1, p_2]$, respectively. Then, we have $[p_1, p_2] = \frac{\partial p_2 q}{\partial q} p_1(q) - \frac{\partial p_1(q)}{\partial q} p_2(q)$. + +Thus, the reachable space of the parallel-chain nonholonomic four-joint manipulator can be expressed as: + +$$ +\Delta_p (q) = \operatorname{span} \{ p_1, p_2, [p_1, p_2], [p_1, [p_1, p_2]], [p_1, [p_1, [p_1, p_2]]] \} = +\begin{bmatrix} +1 & 0 & 0 & 0 & 0 \\ +0 & 1 & 0 & 0 & 0 \\ +k c_1 & 0 & k s_1 & 0 & 0 \\ +k c_2 & 0 & 0 & k^2 s_1 s_2 & k^3 s_1 c_1 c_2 \\ +k c_3 & 0 & 0 & 0 & k^3 s_1 s_2 s_3 +\end{bmatrix} +\quad (3) +$$ + +where $k = \frac{r}{R}$, $c_i = \cos \theta_i$, $s_i = \sin \theta_i \neq 0$ ($i = 1, 2, 3$). + +It can be derived from Eq. (3) that $\dim \Delta_p(q) = 5$ if $\sin\theta_1 \neq 0$, $\sin\theta_2 \neq 0$ and $\sin\theta_3 \neq 0$, that is, $\sin\theta_i \neq 0$ ($i = 1, 2, 3$). In this case, the rank of the matrix equals the number of dimensions in the configuration space. In other words, the reachable space expanded from the system is involutive, which satisfies the controllability rank condition. Therefore, the parallel-chain four-joint nonholonomic manipulator is nonholonomic and controllable in the five-dimensional reachable space, as long as its work space satisfies $\theta_i \neq 0$ ($i = 1, 2, 3$). In this case, the motion of the five configuration variables can be controlled with two motors. + +## 3.3 Analysis of chain transformation features + +After investigating a wheeled mobile robot system with *n* trailers, Sørdalen proposed the conditions and methods for the chain transformation of a drift-free affine system with a triangular configuration [17], similar to Eq. (2): +---PAGE_BREAK--- + +$$ +\left\{ +\begin{array}{l} +\dot{q}_1 = u_1 \\ +\dot{q}_2 = u_2 & i \in \{3, \dots, n\} \\ +\dot{q}_1 = f_i(q_{i-1})u_1 +\end{array} +\right. +$$ + +If the smooth function $f_i(q_{i-1})$ satisfies $\left. \frac{\partial f_i(q_{i-1})}{\partial q_{i-1}} \right|_{q=q_0} \neq 0$ ($\forall i \in \{3, 4, \dots, n\}$) in the neighbourhood of $q_0$, there exist diffeomorphic coordinate transformation and input transformation such that the system can be converted to a chained system. + +If $\theta_i \neq 0$ ($i=1, 2, 3$), then the chain transformation and input feedback transformation of the four-joint nonholonomic manipulator can be expressed as: + +$$ +\left\{ +\begin{aligned} +Z_5 &= \theta_4 \\ +Z_4 &= k \cos \theta_3 \\ +Z_3 &= -k^2 \cos \theta_2 \sin \theta_3 \\ +Z_2 &= k^3 (\cos \theta_1 \sin \theta_2 \sin \theta_3 - \cos^2 \theta_2 \cos \theta_3) \\ +Z_1 &= \varphi +\end{aligned} +\right. +\tag{4} +$$ + +$$ +\begin{equation} +\begin{cases} +v_1 = \dot{z}_1 = \dot{\varphi} = u_1 \\ +v_2 = \dot{z}_2 = k^4 c_2 (3c_1 s_2 s_3 + s_3 c_2^2 + s_3 c_2^2) u_1 - k^3 s_1 s_2 s_3 u_2 +\end{cases} +\tag{5} +\end{equation} +$$ + +# 4. MOTION PLANNING FOR PARALLEL-CHAIN FOUR-JOINT NONHOLONOMIC MANIPULATOR + +The basic idea of the motion planning for chainable nonholonomic manipulator is to map the initial configuration $q^i$ and target configuration $q^f$ of the system into the initial configuration $z^i$ and target configuration $z^f$ of the chain space, forming a path from the initial configuration $z^i$ to the target configuration $z^f$, and then map the path to the joint space through inverse chain transformation. + +The relative mature motion planning methods for chained systems include piecewise constant input method, trigonometric function input method, polynomial input method, and switching control method. Among them, the polynomial input method stands out for its simple integration operation and the ability to control all variables to move to the target configuration along a smooth trajectory. The polynomial expression of the time-variation of two control inputs is: + +$$ +\begin{equation} +\begin{cases} +V_1(t) = b_1 \\ +V_2(t) = b_2 + b_3t + b_4t^2 +\end{cases} +\tag{6} +\end{equation} +$$ + +The motion planning aims to find a bounded control input $u(t)$ such that the system reaches the target configuration $z^f$ from the initial configuration $z^i$ over the specified time $T$. In other words, the system satisfies the following constraints: + +$$ +\left\{ +\begin{array}{l} +f_1 = Z_2(T) - Z_2^f = 0 \\ +f_2 = Z_3(T) - Z_3^f = 0 \\ +f_3 = Z_4(T) - Z_4^f = 0 \\ +f_4 = Z_5(T) - Z_5^f = 0 +\end{array} +\right. +\qquad (7) +$$ + +Through integration, the chained system can be expressed as: +---PAGE_BREAK--- + +$$ +\left\{ +\begin{aligned} +z_2(T) &= b_2 T + \frac{T^2}{2} b_3 + \frac{T^3}{3} b_4 + z_2^i \\ +z_3(T) &= \frac{T^2}{2} b_1 b_2 + \frac{T^3}{6} b_1 b_3 + \frac{T^4}{12} b_1 b_4 + T z_2^i b_1 + z_3^i \\ +z_4(T) &= \frac{T^3}{6} b_1^2 b_2 + \frac{T^4}{24} b_1^2 b_3 + \frac{T^5}{60} b_1^2 b_4 + \frac{T^2}{2} b_1^2 z_2^i + z_3^i T b_1 + z_4^i \\ +z_5(T) &= \frac{T^4}{24} b_1^3 b_2 + \frac{T^5}{120} b_1^3 b_3 + \frac{T^6}{360} b_1^3 b_4 + \frac{T^3}{6} b_1^3 z_2^i + \frac{T^2}{2} z_3^i b_1^2 + T z_4^i b_1 + z_5^i +\end{aligned} +\right. +\quad (8) +$$ + +Substituting Eq. (8) into Eq. (7), we have a set of nonlinear equations about $b_1, b_2, b_3$ and $b_4$. The Newton iteration form of the equation set is: + +$$ +b^{(k+1)} = b^{(k)} - [f'(b^{(k)})]^T F(b^{(k)}) +$$ + +where $F'(b)$ is the Jacobian matrix of $F(b)$; $[F'(b)]^+$ is the pseudo-inverse of $F'(b)$. Let $b = [b_1, b_2, b_3, b_4]^T$ and $F = [f_1, f_2, f_3, f_4]^T$. + +Given the initial value $b^{(0)}$, $b$ can be calculated by the iteration Eq. (9). Then, the trajectory of $z_i^{(t)}$ can be acquired by substituting $b$ into Eq. (8). Through the inverse chain transformation of Eq. (4), we can obtain the expression of the angular displacement of each joint with respect to the $z$-variable. Thus, the motion curves of the angular displacement of the four joints can be expressed as: + +$$ +\left\{ +\begin{array}{l} +\theta_4 = Z_5 \\ +\theta_3 = \arcos(Z_4/K) \\ +\theta_2 = \arcos(-\displaystyle\frac{Z_3}{K^2 \sin\theta_3}) \\ +\theta_1 = \arcos(\displaystyle\frac{\displaystyle\frac{Z_2}{K^3} + \cos\theta_3(\cos\theta_2)^2}{\sin\theta_2 \sin\theta_3}) +\end{array} +\right. +\qquad (10) +$$ + +# 5. SIMULATION EXPERIMENTS + +Experiment 1: + +Let the initial configuration $\theta^i = [\theta_1^i \ \theta_2^i \ \theta_3^i \ \theta_4^i]^T$ of an parallel-chain four-joint nonholonomic manipulator be $[5^0 \ 5^0 \ 5^0 \ 5^0]^T$ and the target configuration of that manipulator be $\theta^f = [\theta_1^f \ \theta_2^f \ \theta_3^f \ \theta_4^f]^T = [15^0 \ 15^0 \ 15^0 \ 15^0]^T$. + +Substituting the configurations into Eq. (4), the boundary conditions in the chain space +can be derived as $z^i = [z_1^i \ z_2^i \ z_3^i \ z_4^i]^T = [-0.1958 \ -0.0297 \ 0.5822 \ 0.0873]^T$ and $z^f = +[z_1^f \ z_2^f \ z_3^f \ z_4^f]^T = [-0.1670 \ -0.0854 \ 0.5645 \ 0.2618]^T$. + +Figure 5: Trajectory, a) of variable *z* in the chain space, b) of each joint in the joint space. +---PAGE_BREAK--- + +Let the motion time $T = 20$ s and $b^{(0)} = [0.1 \ 0.1 \ 0.1 \ 0.1]^T$. The termination condition of the system iteration was set with the error at the termination time: + +$$e = \sqrt{(z_2(T) - Z_2^g)^2 + (z_3(T) - Z_3^g)^2 + (z_4(T) - Z_4^g)^2 + (z_5(T) - Z_5^g)^2} < 10^{-6}.$$ + +Then, Eq. (9) was solved by Newton iteration method. Through 9 iterations, we have $b = [b_1 \ b_2 \ b_3 \ b_4]^T = [0.0151831 \ 0.0007583 \ 0.0000772 \ -0.0000007]^T$. Substituting $b$ into Eq. (8), we have the time-variation curve of variable $z$ (Fig. 5 a). According to Eq. (10), the path in the chain space can be mapped back to the joint space via inverse transformation. Under the time polynomial input control, the output of the four joints of the nonholonomic manipulator is as shown in Fig. 5 b. + +At $T=20$ s, $\theta_1=14.9999999^{\circ}$, $\theta_2=14.9999999^{\circ}$, $\theta_3=14.9999999^{\circ}$ and $\theta_4=14.9999999^{\circ}$. + +Let the error of target configuration be: $e = \frac{\theta^r - \theta^i}{\theta^g - \theta^i}$ + +where $\theta^r$ is the actual displacement of joint rotation. At this time, target configuration error of each joint is $e_{\theta_1}=0.0000001\%$, $e_{\theta_2}=0.0000001\%$, $e_{\theta_3}=0.0000001\%$ and $e_{\theta_4}=0.0000001\%$. The simulation results show that, under the time polynomial input control, all joints have smooth trajectories except for a slight fluctuation of joint 1 in the initial phase, and arrive at the target configuration. + +### Experiment 2: + +Let the initial configuration of the proposed manipulator $\theta^i = [\theta_1^i \ \theta_2^i \ \theta_3^i \ \theta_4^i]^T$ be $[20^{\circ} \ 20^{\circ} \ 20^{\circ} \ 20^{\circ}]^T$ and its target configuration be $\theta^f = [\theta_1^f \ \theta_2^f \ \theta_3^f \ \theta_4^f]^T = [10^{\circ} \ 10^{\circ} \ 10^{\circ} \ 10^{\circ}]^T$. Suppose the motion time $T = 20$ s. Through simulation, the time-variation trajectories of the chain variable and joint variable are as shown in Figs. 6 a and 6 b, respectively. + +Figure 6: Trajectory, a) of variable $z$ in the chain space, b) of each joint in the joint space. + +At $T = 20$ s, $\theta_1 = 10.000000000016^{\circ}$, $\theta_2 = 10.000000000016^{\circ}$, $\theta_3 = 10^{\circ}$ and $\theta_4 = 10^{\circ}$. The simulation results show that each joint of the manipulator has a smooth trajectory and arrives at the target configuration within the specified time. + +Comparing the results of the two simulation experiments, it is clear that, all joints of the parallel-chain four-joint manipulator can move accurately from the initial configuration to the target configuration within the specified time, when the input is controlled by the time polynomial obtained through Newton iteration. The motion of each joint is stable, with virtually no large fluctuation. Therefore, the Newton iteration-based polynomial input control is a feasible motion planning method for the parallel-chain four-joint nonholonomic manipulator. +---PAGE_BREAK--- + +# 6. CONCLUSIONS + +Considering the friction disc motion synthesis and decomposition mechanism, this paper proposes a chainable-type parallel-chain four-joint nonholonomic manipulator based on the parallel-chain nonholonomic manipulator. According to the nonlinear control theory, the author proved that the reachable space expanded from the manipulator system satisfies the involution distribution, i.e. the system is controllable. Then, the nonholonomic motion planning was transformed into the solution to nonlinear equation set, using the time polynomial input method of the chained system. The unknown coefficients of the time polynomial were solved by Newton iteration method. After that, two simulation experiments were performed on the motion between initial and target configurations. The results show that all joints of the proposed manipulator can move stably and accurately from the initial configuration to the target configuration within the specified time. + +Nevertheless, there is no guarantee that the planned path between the initial configuration and the target configuration in the chain space can be transformed back into the joint space without singularity, especially when the joint variables are coupled tightly due to the increase in the number of joints on the manipulator. Thus, the key to the path planning of nonholonomic manipulator lies in the existence of the solution to inverse transformation of the planed path from the chain space to joint space. In the future research, the author will construct the mathematical expression of the geometric and topological features of the nonholonomic path, identify the conditions for the path between adjacent configurations to converge into the chain space, and establish the existence criterion of the inverse transformation solution for the nonholonomic path. + +# ACKNOWLEDGEMENT + +This work is supported by the Special Scientific Research Plan of Shaanxi Provincial Department of Education (17JK0048), and the Specialized Research Fund for the Doctor Program of Baoji University of Arts and Sciences (ZK16044). + +# REFERENCES + +[1] Zhai, J.-Y.; Song, Z.-B. (2018). Adaptive sliding mode trajectory tracking control for wheeled mobile robots, *International Journal of Control*, 8 pages, doi:10.1080/00207179.2018.1436194 + +[2] Van Loock, W.; Pipeleers, G.; Diehl, M.; De Schutter, J.; Swevers, J. (2014). Optimal path following for differentially flat robotic systems through a geometric problem formulation, *IEEE Transactions on Robotics*, Vol. 30, No. 4, 980-985, doi:10.1109/TRO.2014.2305493 + +[3] Li, L. (2017). Nonholonomic motion planning using trigonometric switch inputs, *International Journal of Simulation Modelling*, Vol. 16, No. 1, 176-186, doi:10.2507/IJSIMM16(1)CO5 + +[4] Chung, W.-J.; Nakamura, Y. (2002). Design and control of a chained form manipulator, *International Journal of Robotics Research*, Vol. 21, No. 5-6, 389-408, doi:10.1177/027836402761393351 + +[5] Nakamura, Y.; Ezaki, H.; Tan, Y.-G.; Chung, W. (2001). Design of steering mechanism and control of nonholonomic trailer systems, *IEEE Transactions on Robotics and Automation*, Vol. 17, No. 3, 367-374, doi:10.1109/70.938393 + +[6] Yamaguchi, H.; Mori, M.; Kawakami, A. (2011). Control of a five-axle, three-steering coupled-vehicle system and its experimental verification, *IFAC Proceedings Volumes*, Vol. 44, No. 1, 12976-12984, doi:10.3182/20110828-6-IT-1002.01455 + +[7] Yamaguchi, H. (2012). Dynamical analysis of an undulatory wheeled locomotor: a trident steering walker, *IFAC Proceedings Volumes*, Vol. 45, No. 22, 157-164, doi:10.3182/20120905-3-HR-2030.00064 +---PAGE_BREAK--- + +[8] Yamaguchi, H. (2007). A path following feedback control law for a trident steering walker, *Transactions of the Society of Instrument and Control Engineers*, Vol. 43, No. 7, 562-571, doi:10.9746/ve.sicetr1965.43.562 + +[9] Dobrin, C.; Bondrea, I.; Pîrvu, B.-C. (2015). Modelling and simulation of collaborative processes in manufacturing, *Academic Journal of Manufacturing Engineering*, Vol. 13, No. 3, 18-25 + +[10] Tan, Y.-G.; Li, L.; Liu, M.-Y.; Chen, G.-L. (2012). Design and path planning for controllable underactuated manipulator, *International Journal of Advancements in Computing Technology*, Vol. 4, No. 2, 212-221, doi:10.4156/ijact.vol4 issue 2.26 + +[11] Li, L.; Tan, Y.-G.; Li, Z. (2014). Nonholonomic motion planning strategy for underactuated manipulator, *Journal of Robotics*, Vol. 2014, Paper 743857, 10 pages, doi:10.1155/2014/743857 + +[12] Djedai, H.; Mdouki, R.; Mansouri, Z.; Aouissi, M. (2017). Numerical investigation of three-dimensional separation control in an axial compressor cascade, *International Journal of Heat and Technology*, Vol. 35, No. 3, 657-662, doi:10.18280/ijht.350325 + +[13] Tan, Y.-G.; Jiang, Z.-Q.; Zhou, Z.-D. (2006). A nonholonomic motion planning and control based on chained form transformation, *Proceedings of the 2006 IEEE/RSJ International Conference on Intelligent Robots and Systems*, 3149-3153, doi:10.1109/IROS.2006.282337 + +[14] Pamuk, M. T.; Savaş, A.; Seçgin, Ö.; Arda, E. (2018). Numerical simulation of transient heat transfer in friction-stir welding, *International Journal of Heat and Technology*, Vol. 36, No. 1, 26-30, doi:10.18280/ijht.360104 + +[15] Medina, Y. C.; Fonticiella, O. M. C., Morales, O. F. G. (2017). Design and modelation of piping systems by means of use friction factor in the transition turbulent zone, *Mathematical Modelling of Engineering Problems*, Vol. 4, No. 4, 162-167, doi:10.18280/mmep.040404 + +[16] Li, Z. X. (1997). *A Mathematical Introduction to Robot Manipulation*, China Machine Press, Beijing + +[17] Sørdalen, O. J. (1993). Conversion of the kinematics of a car with n trailers into a chained form, *Proceedings of the 1993 IEEE International Conference on Robotics and Automation*, Vol. 1, 382-387, doi:10.1109/ROBOT.1993.292011 \ No newline at end of file diff --git a/samples_new/texts_merged/6743834.md b/samples_new/texts_merged/6743834.md new file mode 100644 index 0000000000000000000000000000000000000000..fdec9cfecea87a0ead2237590343f3d38b8622fa --- /dev/null +++ b/samples_new/texts_merged/6743834.md @@ -0,0 +1,93 @@ + +---PAGE_BREAK--- + +# Solutions Complex Analysis Stein Shakarchi + +When people should go to the ebook stores, search creation by shop, shelf by shelf, it is truly problematic. This is why we provide the ebook compilations in this website. It will agreed ease you to look guide **solutions complex analysis stein shakarchi** as you such as. + +By searching the title, publisher, or authors of guide you truly want, you can discover them rapidly. In the house, workplace, or perhaps in your method can be all best place within net connections. If you take aim to download and install the solutions complex analysis stein shakarchi, it is totally easy then, back currently we extend the link to purchase and make bargains to download and install solutions complex analysis stein shakarchi hence simple! + +is one of the publishing industry's leading distributors, providing a comprehensive and impressively high-quality range of fulfilment and print services, online book reading and download. + +## Solutions Complex Analysis Stein Shakarchi + +SOLUTIONS/HINTS TO THE EXERCISES FROM COMPLEX ANALYSIS BY STEIN AND SHAKARCHI 3 Solution 3.zn= seicφ implies that z= s1n ei(φ +2πik), where k= 0,1,…,n- 1 and s1 n is the real nth root of the positive number s. There are nsolutions as there should be since we are finding the roots of a degree n polynomial in the algebraically closed field C. + +## SOLUTIONS/HINTS TO THE EXERCISES FROM COMPLEX ANALYSIS BY ... + +Chapter 1. Preliminaries to Complex Analysis 1.1 Complex numbers and the complex plane 1.1.1 Basic properties 1.1.2 Convergence 5.1.3 Sets in the complex plane 5.2 Functions on the complex plane 8.2.1 Continuous functions 8.2.2 Holomorphic functions 8.2.3 Power series 14.3 Integration along curves 18.4 Exercises 24 Chapter 2. +---PAGE_BREAK--- + +**Complex Analysis (Princeton Lectures in Analysis, Volume II)** + +Complex Analysis (Elias M. Stein, Rami Shakarchi) + +**(PDF) Complex Analysis (Elias M. Stein, Rami Shakarchi ...** + +solutions-complex-analysis-stein-shakarchi 1/1 Downloaded from datacenterdynamics.com.br on October 27, 2020 by guest [MOBI] Solutions Complex Analysis Stein Shakarchi Yeah, reviewing a book solutions complex analysis stein shakarchi could increase your close links listings. This is just one of the solutions for you to be successful. + +**Solutions Complex Analysis Stein Shakarchi ...** + +Stein and Shakarchi move from an introduction addressing Fourier series and integrals to in-depth considerations of complex analysis; measure and integration theory, and Hilbert spaces; and, finally, further topics such as functional analysis, distributions and elements of probability theory. + +**Stein And Shakarchi Complex Analysis Manual Solution ...** + +SOLUTIONS/HINTS TO THE EXERCISES FROM COMPLEX ANALYSIS BY STEIN AND SHAKARCHI 3 Solution 3.zn = $\text{sei}\u3c6$ implies that $z = s \ 1 \ n \ \text{ei}(\u3c6 \ n + 2\pi i k)$, where $k = 0, 1, \dots, n-1$ and $s \ 1 \ n$ is the real nth root of the positive number s. + +**solution to complex analysis stein shakarchi - Análise Complex** + +Solutions Complex Analysis Stein Shakarchi Solutions Complex Analysis Stein Shakarchi 3 Solution 3zn= $\text{sei}\varphi$ implies that $z=s1n\text{ei}(\varphi+2\pi ik)$, where $k=0,1,\dots,n-1$ and $s1n$ is the real nth root of the positive number s There are nsolutions as there should be since we are finding the roots of a degree n polynomial in the algebraically Fourier Analysis Solutions Stein Shakarchi Stein Shakarchi Real Analysis Solutions FROM COMPLEX ANALYSIS BY STEIN AND + +**Read Online Real Analysis Stein Shakarchi Solutions** + +Stein And Shakarchi Complex Analysis Manual Solution. ... The starting point is the simple idea of extending a function initially given for real values of the argument to one that is defined when +---PAGE_BREAK--- + +the argument is complex. ... + +**Stein Real Analysis Solution - costamagarakis.com** + +Fourier Analysis Solutions Stein Shakarchi The Princeton Lectures in Analysis is a series of four mathematics textbooks, each covering a different area of mathematical analysis. They were written by Elias M. Stein and Rami Shakarchi and published by Princeton University Press between 2003 and 2011. + +**Download Stein Shakarchi Real Analysis** + +and the textbook is Complex Analysis by Stein and Shakarchi (ISBN13: 978-0-691-11385-2). Note to students: it's nice to include the statement of the problems, but I leave that up to you. I am only skimming the solutions. I will occasionally add some comments or mention alternate solutions. If + +**Math 302: Solutions to Homework - Williams College** + +Princeton Lectures in Analysis. The Princeton Lectures in Analysis is a series of four mathematics textbooks, each covering a different area of mathematical analysis. They were written by Elias M. Stein and Rami Shakarchi and published by Princeton University Press between 2003 and 2011. They are, in order, Fourier Analysis: An Introduction; Complex Analysis; Real Analysis: Measure Theory, Integration, and Hilbert Spaces; and Functional Analysis: Introduction to Further Topics in Analysis. + +**Princeton Lectures in Analysis - Wikipedia** + +June 22nd, 2018 - Download and Read Stein Shakarchi Fourier Analysis Solutions Stein Shakarchi Fourier Analysis Solutions Give us 5 minutes and we will show you the best book to read today " COMPLEX ANALYSIS BY ELIAS M STEIN ANSWERS + +**Fourier Analysis Solutions Stein Shakarchi** + +Problem 4 (3.2 in Stein-Shakarchi) Integrate over the upper semicircular contour; the integral over the semicircular part is 0 since the degree of the denominator is greater than 2. Therefore the desired integral is just the sum of all residues that lie in the upper semicircular contour. The poles are the 4-th + +**Solution to Stein Complex Analysis | Holomorphic** +---PAGE_BREAK--- + +**Function ...** + +Numerous examples and applications throughout its four planned volumes, of which Complex Analysis is the second, highlight the far-reaching consequences of certain ideas in analysis to other fields... + +**Complex Analysis by Elias M. Stein, Rami Shakarchi - Books ...** + +Real Analysis: Measure Theory, Integration, and Hilbert Spaces +Elias M. Stein and Rami Shakarchi. Real Analysis is the third volume in the Princeton Lectures in Analysis, a series of four textbooks that aim to present, in an integrated manner, the core areas of analysis. Here the focus is on the development of measure and... + +**Rami Shakarchi | Princeton University Press** + +and Shakarchi Real Analysis Solution(Stein………………) - कौशल The Princeton Lectures in Analysis is a series of four mathematics textbooks, each covering a different area of mathematical analysis.They were written by Elias M. Stein and Rami Shakarchi Stein Real Analysis Solution - food.whistleblower.org + +**Real Analysis Stein Shakarchi Solutions** + +Harvard Mathematics Department : Home page + +**Harvard Mathematics Department : Home page** + +Veja gratis o arquivo Stein & Shakarchi - Complex Analysis - Solutions enviado para a disciplina de Análise Complexa +Categoria: Exercício - 5 - 30060137 + +Copyright code: d41d8cd98f00b204e9800998ecf8427e. \ No newline at end of file diff --git a/samples_new/texts_merged/6813453.md b/samples_new/texts_merged/6813453.md new file mode 100644 index 0000000000000000000000000000000000000000..e9df90b3b81ee31a374bfa0d468938dce31a63f5 --- /dev/null +++ b/samples_new/texts_merged/6813453.md @@ -0,0 +1,826 @@ + +---PAGE_BREAK--- + +# Multipartite entanglement and high-precision metrology + +Géza Tóth* + +Department of Theoretical Physics, The University of the Basque Country, P.O. Box 644, E-48080 Bilbao, Spain; + +IKERBASQUE, Basque Foundation for Science, E-48011 Bilbao, Spain; and + +Research Institute for Solid State Physics and Optics, Hungarian Academy of Sciences, P.O. Box 49, H-1525 Budapest, Hungary + +(Received 14 October 2011; published 16 February 2012) + +We present several entanglement criteria in terms of the quantum Fisher information that help to relate various forms of multipartite entanglement to the sensitivity of phase estimation. We show that genuine multipartite entanglement is necessary to reach the maximum sensitivity in some very general metrological tasks using a two-arm linear interferometer. We also show that it is needed to reach the maximum average sensitivity in a certain combination of such metrological tasks. + +DOI: 10.1103/PhysRevA.85.022322 + +PACS number(s): 03.67.Bg, 03.65.Ud, 42.50.St + +## I. INTRODUCTION + +There has been a rapid development in the technology of quantum experiments with photons [1–6], trapped ions [7,8], and cold atoms [9]. In many of the experiments the goal is to create a state with genuine multipartite entanglement [1–8]. From the operational point of view, the meaning of such an entanglement is clear [7,10]. An *N*-qubit quantum state is a quantum state with genuine *N*-partite entanglement cannot be prepared by mixing *N*-qubit pure states, in which some groups of particles have not interacted. Thus, the experiment presents something qualitatively new compared to an (*N* − 1)-qubit experiment. There is an extensive literature on the detection of such entanglement (e.g., see Ref. [11] for a review). + +One of the important applications of entangled multipartite quantum states is sub-shot-noise metrology [12]. In metrology, as can be seen in Fig. 1, one of the basic tasks is phase estimation connected to the unitary dynamics of a linear interferometer + +$$ \varrho_{\text{output}} = e^{-i\theta J_{\vec{n}}} \varrho e^{+i\theta J_{\vec{n}}}, \quad (1) $$ + +where $\varrho$ is the input state of the interferometer, while $\varrho_{\text{output}}$ is the output state, and $J_{\vec{n}}$ is a component of the collective angular momentum in the direction $\vec{n}$. The important question is how well we can estimate the small angle $\theta$ measuring $\varrho_{\text{output}}$. For such an interferometer the phase estimation sensitivity, assuming *any* type of measurement, is limited by the quantum Cramér-Rao bound as [13,14] + +$$ \Delta\theta \ge \frac{1}{\sqrt{F_Q[\varrho, J_{\vec{n}}]}}, \quad (2) $$ + +where $F_Q$ is the quantum Fisher information. The relationship between phase estimation sensitivity and entanglement in linear interferometers has already been examined [15], and an entanglement condition has been formulated with the sensitivity of the phase estimation, that is, with the quantum Fisher information. It has been found that some entangled states provide a better sensitivity in phase estimation than separable states. It has also been proven that not all entangled states are useful for phase estimation, at least in a linear interferometer [16]. Moreover, in another context, it has been noted that multipartite entanglement, not only simple + +nonseparability, is needed for extreme spin squeezing [17]. While this finding is not directly related to the theory of the quantum Fisher information, it does show that multipartite entanglement is needed for a large sensitivity in certain concrete metrological tasks. + +A question arises: Would it be possible to relate genuine multipartite entanglement or any other type of multipartite entanglement to the quantum Fisher information? Apart from the point of view of metrology, this is also interesting from the point of view of entanglement criteria. Bipartite entanglement criteria generalized for the multipartite case typically detect any, that is, not necessarily genuine, multipartite entanglement. In fact, so far conditions for genuine multipartite entanglement were mostly linear in operator expectation values (e.g., entanglement witnesses [18–21] or Bell inequalities [22–26]). There are also criteria quadratic in operator expectation values [27–29], a strong criterion based on the elements of the density matrix [30,31] and some spin squeezing inequalities [32–34]. For us, a starting point can be that existing entanglement conditions based on the Wigner-Yanase skew information [35] can also detect genuine multipartite entanglement and many properties of the skew information and the quantum Fisher information are similar. + +In this paper, we examine what advantage states with various forms of multipartite entanglement offer over separable states in metrology. We show that in order to have the maximal sensitivity in certain metrological tasks, $\varrho$ must be genuinely multipartite entangled. It can also happen that an entangled state does not provide a sensitivity for phase estimation larger than the sensitivity achievable by separable states for any $J_{\vec{n}}$; however, the average sensitivity of phase estimation is still larger than for separable states. Thus, when asking about the advantage of entangled states over separable ones in phase estimation, it is not sufficient to consider the sensitivity in a single metrological task. + +Now we are in a position to formulate our first main results; the proofs are given later. + +*Observation 1.* For *N*-qubit separable states, the values of $F_Q[\varrho, J_l]$ for $l = x,y,z$ are bounded as + +$$ \sum_{l=x,y,z} F_Q[\varrho, J_l] \le 2N. \quad (3) $$ + +Here $J_l = \frac{1}{2} \sum_{k=1}^N \sigma_l^{(k)}$, where $\sigma_l^{(k)}$ are the Pauli spin matrices for qubit ($k$). Later we also show that Eq. (3) is a condition + +*toth@alumni.nd.edu +---PAGE_BREAK--- + +FIG. 1. (Color online) The basic problem of linear interferometry. The parameter $\theta$ must be estimated by measuring $\rho_{\text{output}}$. + +for the average sensitivity of the interferometer. All states violating Eq. (3) are entangled. Note that, according to Ref. [15], for separable states we have + +$$F_Q[\varrho, J_l] \le N. \quad (4)$$ + +**Observation 2.** For quantum states, the quantum Fisher information is bounded by above as + +$$\sum_{l=x,y,z} F_Q[\varrho, J_l] \le N(N+2). \quad (5)$$ + +Greenberger-Horne-Zeilinger states (GHZ states, [36]) and N-qubit symmetric Dicke states with $\frac{N}{2}$ excitations saturate Eq. (5). Note that the above symmetric Dicke state has been investigated recently due to its interesting entanglement properties [4,6,32]. It has also been noted that the above Dicke state gives an almost maximal phase measurement sensitivity in two orthogonal directions [16]. In general, pure symmetric states for which $\langle J_l \rangle = 0$ for $l = x, y, z$ saturate Eq. (5). + +Next we consider *k*-producible states [35,37]. A pure state is *k* producible if it is a tensor product of at most *k*-qubit states [37]. A mixed state is *k* producible if it is a mixture of pure *k*-producible states. + +**Observation 3.** For N-qubit *k*-producible states, the quantum Fisher information is bounded from above by + +$$F_Q[\varrho, J_l] \le nk^2 + (N-nk)^2, \quad (6)$$ + +where *n* is the integer part of $\frac{N}{k}$. A condition similar to Eq. (6) has appeared in Ref. [35] for the Wigner-Yanase skew information. + +**Observation 4.** For N-qubit *k*-producible states, the sum of three Fisher information terms is bounded from above by [38] + +$$\sum_{l=x,y,z} F_Q[\varrho, J_l] \le \begin{cases} nk(k+2) + (N-nk)(N-nk+2) & \text{if } N-nk \neq 1, \\ nk(k+2)+2 & \text{if } N-nk = 1, \end{cases} \quad (7)$$ + +where *n* is the integer part of $\frac{N}{k}$. Any state that violates this bound is not *k* producible and contains (*k* + 1)-particle entanglement. + +Next we consider criteria that show that the quantum state is not biseparable. A pure state is biseparable if it can be written as a tensor product of two multipartite states [10]. A mixed state is biseparable if it can be written as a mixture of biseparable pure states. The bounds for biseparable states for the left-hand- side of Eqs. (6) and (7) can be obtained from Observations 3 and 4 after taking $n=1$ and maximizing the bounds in those + +Observations over $k = [\frac{N}{2}], [\frac{N}{2}] + 1, ..., N - 1$, where $[\frac{N}{2}]$ is the smallest integer not smaller than $\frac{N}{2}$. Hence, we obtain + +$$F_Q[\varrho, J_l] \le (N-1)^2 + 1, \quad (8a)$$ + +$$\sum_{l=x,y,z} F_Q[\varrho, J_l] \le N^2 + 1. \quad (8b)$$ + +Any state that violates Eqs. (8a) or (8b) is genuine multipartite entangled. + +The inequalities presented in Observations 1–3 correspond to planes in the ($F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z]$) space as can be seen in Fig. 1 for $N=6$ particles. These observations show that for fully separable states only a very small fraction of the ($F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z]$) space is allowed. This is also true for states with several forms of multipartite entanglement, for example, *k*-producible states with $k \ll N$. To reach the maximal phase sensitivity, genuine multipartite entanglement is needed. + +The paper is organized as follows. In Sec. II, we prove Observations 1 and 2. In Sec. III, we prove Observations 3 and 4. In Sec. IV, we examine the characteristics of the states corresponding to interesting points in the ($F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z]$) space and determine which regions correspond to quantum states of different degrees of entanglement. In Sec. V, we discuss some similarities to entanglement detection with uncertainty relations. In Appendix A, we present a unified framework to derive entanglement conditions independent from the coordinate system chosen. In Appendix B, we give some details of our calculations. + +## II. PROOF OF OBSERVATIONS 1 AND 2 + +First, let us review some of the central notions concerning metrology and the quantum Fisher information. For calculating many quantities, it is sufficient to know the following two relations [13–15,39] for the quantum Fisher information. + +(1) For a pure state $\varrho$, we have $F[\varrho, J_l] = 4(\Delta J_l)_\varrho^2$. + +(2) $F[\varrho, J_l]$ is convex in the state; that is, $F[p_1\varrho_1 + p_2\varrho_2, J_l] \le p_1F[\varrho_1, J_l] + p_2F[\varrho_2, J_l]$. + +From these two statements, it also follows that $F[\varrho, J_l] \le 4(\Delta J_l)_\varrho^2$. + +There is also an explicit formula for computing the quantum Fisher information for a given state $\varrho$ and a collective observable $J_\vec{n}$ for any $\vec{n}$ as [16] + +$$F_Q[\varrho, J_\vec{n}] = \vec{n}^T \Gamma_C \vec{n}. \quad (9)$$ + +Thus, the $\Gamma_C$ matrix carries all the information needed to compute $F_Q[\varrho, J_\vec{n}]$ for any direction $\vec{n}$. It is defined as [16] + +$$[\Gamma_C]_{ij} = 2 \sum_{l,m} (\lambda_l + \lambda_m) \left( \frac{\lambda_l - \lambda_m}{\lambda_l + \lambda_m} \right)^2 \langle l | J_i | m \rangle \langle m | J_j | l \rangle, \quad (10)$$ + +where the sum is over the terms for which $\lambda_l + \lambda_m \neq 0$, and the density matrix has the decomposition + +$$\varrho = \sum_k \lambda_k |k\rangle\langle k|. \quad (11)$$ +---PAGE_BREAK--- + +Note that for pure states $[\Gamma_C]_{ij} = \langle J_i J_j + J_j J_i \rangle / 2 - \langle J_i \rangle \langle J_j \rangle$ [16]. Later, we present entanglement conditions with $\Gamma_C$, besides entanglement conditions with $F[\varrho, J_l]$. + +*Proof of Observation 1.* First we show that Observation 1 is true for pure states. We use here the theory of entanglement detection based on uncertainty relations [40]. According to this theory, for every *N*-qubit pure product state of the form + +$$|\Psi_P\rangle = \bigotimes_{n=1}^{N} |\Psi_n\rangle, \quad (12)$$ + +the variance of the collective observable $J_l$ is the sum of the variances of the single-qubit observables $j_l^{(n)} = \frac{1}{2}\sigma_l^{(n)}$ for the single-qubit states $|\Psi_n\rangle$. Thus, we have for the sum of the variances of the three angular momentum components [41] + +$$ +\begin{align*} +\sum_{l=x,y,z} (\Delta J_l)^2_{|\Psi_P\rangle} &= \frac{1}{4} \sum_{l=x,y,z} \sum_{n=1}^{N} (\Delta \sigma_l)_{|\Psi_n\rangle}^2 \\ +&= \frac{1}{4} \sum_{n=1}^{N} \left(3 - \langle \sigma_x^{(n)} \rangle^2 - \langle \sigma_y^{(n)} \rangle^2 - \langle \sigma_z^{(n)} \rangle^2\right) = \frac{N}{2}. +\end{align*} +$$ + +For the mixture of product states, that is, for mixed separable states, Eq. (3) follows from the convexity of the Fisher information. ■ + +Next we show that Eq. (3) can be interpreted as a condition on the average sensitivity of the interferometer. First, note that Eq. (3) can be reformulated with the eigenvalues of $\Gamma_C$ as + +$$\mathrm{Tr}(\Gamma_C) \le 2N. \quad (13)$$ + +Then, using Eq. (9), we obtain + +$$\mathrm{avg}_{\vec{n}}(F_Q[\varrho, J_{\vec{n}}]) = \mathrm{avg}_{\vec{n}}\{\mathrm{Tr}[\Gamma_C(\vec{n}\vec{n}^T)]\} = \mathrm{Tr}(\Gamma_C \frac{1}{3}), \quad (14)$$ + +where averaging is over all three-dimensional unit vectors. Thus, Eq. (3) can be rewritten as a condition for the average sensitivity of the interferometer as + +$$\mathrm{avg}_{\vec{n}}(F_Q[\varrho, J_{\vec{n}}]) \le \frac{2}{3} N. \quad (15)$$ + +Let us calculate now the maximum of the left-hand side of Eq. (3). + +*Proof of Observation 2.* We have to use that the quantum Fisher is never larger than the corresponding variance, + +$$\sum_{l=x,y,z} F(\varrho, J_l) \le 4 \sum_{l=x,y,z} (\Delta J_l)^2, \quad (16)$$ + +and that the sum of the variances are bounded from above + +$$4 \sum_{l=x,y,z} (\Delta J_l)^2 \le 4 \sum_{l=x,y,z} |J_l|^2 \le N(N+2). \quad (17)$$ + +For pure states, Eq. (16) is saturated. The second inequality of Eq. (17) appears as a fundamental equation in the theory of angular momentum. For symmetric states with $\langle J_l \rangle = 0$ for $l = x, y, z$, both inequalities of Eq. (17) are saturated. Hence, GHZ states and Dicke states with $\frac{N}{2}$ excitations saturate Eq. (5). ■ + +### III. BOUNDS FOR MULTIPARTITE ENTANGLEMENT + +In this section we present the proof of Observations 3 and 4 and also compute some bounds for other types of multipartite entanglement. For that, we use ideas similar to the ones in the proof of Observation 1. + +*Proof of Observation 3.* Let us consider pure states that are the tensor product of at most *k*-qubit entangled states, + +$$|\Psi_{k-\text{producible}}\rangle = |\Psi_1^{(N_1)}\rangle \otimes |\Psi_2^{(N_2)}\rangle \otimes |\Psi_3^{(N_3)}\rangle \otimes |\Psi_4^{(N_4)}\rangle \otimes \dots, \quad (18)$$ + +where $N_m \le k$ is the number of qubits for the $m$th term in the product. Hence, based on using $(\Delta J_l)^2|_{\Psi_m^{(N_m)}}^2 \le \frac{N_m^2}{4}$ for the $N_m$-qubit units, we obtain + +$$(\Delta J_l)^2|_{\Psi_{k-\text{producible}}} = \sum_m (\Delta J_l)^2|_{\Psi_m^{(N_m)}} \le \sum_m \frac{N_m^2}{4}.$$ + +For the case when $k$ is a divisor of $N$, the largest variance can be obtained for a state for which all $N_m = k$. Hence, for the state Eq. (18) we obtain + +$$(\Delta J_l)^2 \le \frac{N}{k} \times \frac{k^2}{4}. \quad (19)$$ + +If $k$ is not a divisor of $N$ then at least one of the states in the tensor product of Eq. (18) will have fewer than $k$ qubits. The maximum for the sum of the variances is obtained if all but a single state has $k$ qubits. Considering this, we obtain Eq. (6). The strong dependence of the bounds on $k$ in Eq. (6) indicates that for high-precision metrology states containing many-partite entanglement are needed. ■ + +*Proof of Observation 4.* Let us consider pure states that are the tensor product of at most *k*-qubit entangled states of the form Eq. (18) Hence, based on using Eq. (5) for the *k*-qubit units, we obtain + +$$ +\begin{align} +& \sum_{l=x,y,z} (\Delta J_l)^2_{|\Psi_{k-\text{producible}}\rangle} \\ +&= \sum_m \sum_{l=x,y,z} (\Delta J_l)^2_{|\Psi_m^{(N_m)}\rangle} &\le \sum_m \frac{N_m(N_m+2)}{4}. \tag{20} +\end{align} +$$ + +For the case when $k$ is a divisor of $N$, the largest variance can be obtained for a state for which all $N_m = k$. Hence, for the state Eq. (18) we obtain + +$$\sum_{l=x,y,z} (\Delta J_l)^2 \leq \frac{N k(k+2)}{4}. \quad (21)$$ + +If $k$ is not a divisor of $N$, then at least one of the states in the tensor product of Eq. (18) will have fewer than $k$ qubits. The maximum for the sum of the variances is obtained if all but a single state has $k$ qubits. Considering this, we obtain Eq. (7). We have to use that for pure states of $N \ge 2$ qubits, we have $\sum_k (\Delta J_k)^2 \le \frac{N(N+2)}{4}$, while for $N=1$ we have a better bound $\sum_k (\Delta J_k)^2 \le \frac{1}{2}$. ■ + +*Bound for states with a given number of unentangled particles.* Next, we obtain bound for systems that contain a given number of unentangled particles. A pure state is told to contain *M* unentangled particles if it can be written as [37,42] + +$$\bigotimes_{k=1}^{M} |\Psi_k\rangle \otimes |\Psi_{M+1,\dots,N}\rangle. \quad (22)$$ + +We say that a mixed state contains at least *M* unentangled particles if it can be prepared by mixing pure states with *M* or more unentangled particles. +---PAGE_BREAK--- + +Many interesting quantum states are highly entangled, but +still contain only two-particle entanglement. Nevertheless, it +is still important to know how large fraction of the particles +remain unentangled since the number of unentangled particles +is directly connected to metrological usefulness of the state. + +*Observation 5.* For states with at least *M* unentangled particles, the quantum Fisher information is bounded from above by + +$$ +\sum_{l=x,y,z} F_Q[\varrho, J_l] \le M + (N-M)(N-M+2). \quad (23) +$$ + +Proof. For a pure state of the form Eq. (22), we have + +$$ +\sum_{l=x,y,z} (\Delta J_l)^2 \le \frac{M}{4} + \frac{(N-M)(N-M+2)}{4}. \quad (24) +$$ + +Any state that violates Eq. (23) has fewer than *M* unentangled +particles. The validity of Eq. (23) for mixed states is due to the +convexity of the quantum Fisher information. +■ + +So far, we presented entanglement conditions in terms of $F_Q[\varrho, J_l]$ for $l = x, y, z$. A more general approach is constructing entanglement conditions with the $\Gamma_C$ matrix defined in Eq. (10). In Appendix A, we present unified framework for determining entanglement conditions for $\Gamma_C$. + +IV. INTERESTING POINTS IN THE +$(F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z])$ SPACE + +In this section, we discuss which part of the +($F_Q[\varrho, J_x]$, $F_Q[\varrho, J_y]$, $F_Q[\varrho, J_z]$) space contains points corre- +sponding to states with different degrees of entanglement. This +is important since, apart from finding inequalities for states of +various types of entanglement, we have to show that there are +states that fulfill these inequalities. + +For that, let us see first the interesting points of the +($F_Q[\varrho, J_x]$, $F_Q[\varrho, J_y]$, $F_Q[\varrho, J_z]$) space and the corresponding +quantum states, shown in Fig. 2. + +(i) A completely mixed state, + +$$ +\rho_C = \frac{1}{2^N}, \qquad (25) +$$ + +corresponds to the point C(0,0,0) in the ($F_Q[\rho, J_x]$, $F_Q[\rho, J_y]$, $F_Q[\rho, J_z]$) space. + +(ii) Product states of the form + +$$ +|\Psi\rangle_{S_l} = \left|+\frac{1}{2}\right\rangle_l^{\otimes N/2} \otimes \left|-\frac{1}{2}\right\rangle_l^{\otimes N/2} \quad (26) +$$ + +for *l* = x, y, z correspond to the points Sx(0, N, N), Sy(N, 0, N), +and Sz(0, N, N), respectively. + +(iii) An *N*-qubit symmetric Dicke state with $\frac{N}{2}$ excitations in the *z* basis is defined as + +$$ +|\mathcal{D}_N^{(N/2)}\rangle = \left(\begin{array}{c} N \\ N/2 \end{array}\right)^{-1/2} \sum_k P_k \{|0\rangle^{\otimes N/2} \otimes |1\rangle^{\otimes N/2}\}, \quad (27) +$$ + +where $\sum_k P_k$ denotes summation over all possible +permutations. Such a state corresponds to the point +$D_z(\frac{N(N+2)}{2}, \frac{N(N+2)}{2}, 0)$. Dicke states in the $x$ and $y$ +bases correspond to the points $D_x(0, \frac{N(N+2)}{2}, \frac{N(N+2)}{2})$ and +$D_y(\frac{N(N+2)}{2}, 0, \frac{N(N+2)}{2})$, respectively. + +FIG. 2. (Color online) Interesting points in the ($F_Q[\varrho, J_x]$, $F_Q[\varrho, J_y]$, $F_Q[\varrho, J_z]$) space for $N=6$ particles. Points corresponding to separable states satisfy Eq. (3) and are not above the $S_x-S_y-S_z$ plane. Points corresponding to biseparable states satisfy Eq. (8b) and are not above the $G_x-G_y-G_z$ plane. All states corresponding to points above the $G_x-G_y-G_z$ plane are genuine multipartite entangled. For the coordinates of the $S_l$, $G_l$, $D_l$, and $C$ points, see Sec. IV. + +(iv) An *N*-qubit GHZ state in the *z* basis is defined as + +$$ +|\Psi\rangle_{GHZ_z} = \frac{1}{\sqrt{2}}(|0\rangle^{\otimes N} + |1\rangle^{\otimes N}). \quad (28) +$$ + +It corresponds to the point (N,N,N²). GHZ states in the *x* and *y* bases correspond to points (N²,N,N) and (N,N²,N), respectively. + +(v) Finally, the tensor product of a single-qubit state and a Dicke state of the form + +$$ +|\Psi\rangle_{GZ} = |1\rangle \otimes |\mathcal{D}_{N-1}^{N/2-1}\rangle \quad (29) +$$ + +corresponds to the point $G_z(\frac{N^2}{2} + \frac{1}{2}, \frac{N^2}{2} + \frac{1}{2}, 0)$ [43]. States corresponding to the points $G_x$ and $G_y$ can be obtained from $|\Psi\rangle_{GZ}$ by basis transformations. After considering individual points, we now show that there are two-dimensional objects in the ($F_Q[\varrho, J_x]$, $F_Q[\varrho, J_y]$, $F_Q[\varrho, J_z]$) space such that for all of their points there is a corresponding separable or entangled quantum state. + +(vi) For all points in the $S_x, S_y, S_z$ polytope, there is a corresponding pure product state for even *N*. Given $F[\varrho, J_l]$ for $l = x, y, z$, such a state is defined as + +$$ +\rho = \left[ \frac{1}{2} + \frac{1}{2} \sum_{l=x,y,z} c_l \sigma_l \right]^{\otimes N/2} \otimes \left[ \frac{1}{2} - \frac{1}{2} \sum_{l=x,y,z} c_l \sigma_l \right]^{\otimes N/2}, \quad (30) +$$ + +where $c_l^2 = 1 - \frac{F_Q[\varrho, J_l]}{N}$, where $\sum_l c_l^2 = 1$. + +(vii) For all points in the $D_x, D_y, D_z$ polytope, there is a corresponding quantum state if *N* is divisible by 4. To see this, let us consider the following quantum states for even *N*: + +$$ +|\Psi_{\text{even}}\rangle = \sum_{n=0,2,4,\dots,N/2-2} c_n \frac{1}{\sqrt{2}} (|\mathcal{D}_N^{(n)}\rangle + |\mathcal{D}_N^{-(n)}\rangle) \\ +\qquad + c_{N/2} |\mathcal{D}_N^{(N/2)}\rangle, \tag{31} +$$ +---PAGE_BREAK--- + +FIG. 3. (Color online) Randomly chosen points in the $(F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z])$ space corresponding to states of the form Eq. (32) for $N=8$. All the points are in the plane of $D_x$, $D_y$, and $D_z$. + +where $c_n$ are complex coefficients. States Eq. (31) are special cases of symmetric states with an even parity [44]. For $|\Psi_{\text{even}}\rangle$, we have $\langle J_l\rangle = 0$ for $l=x,y,z$. Finally, $\langle J_l J_m + J_m J_l\rangle = 0$ if $l \neq m$; thus, for $|\Psi_{\text{even}}\rangle$ the matrix $\Gamma_C$ is diagonal. Let us now assume that $N$ is a multiple of 4 and consider the states of the form + +$$ |\Psi(\alpha_x, \alpha_y, \alpha_z)\rangle = \alpha_x |D_N^{(N/2)}\rangle_x + \alpha_y |D_N^{(N/2)}\rangle_y + \alpha_z |D_N^{(N/2)}\rangle_z, \quad (32) $$ + +where $\alpha_l$ are complex coefficients. (Note that $|D_N^{(N/2)}\rangle_l$ are not pairwise orthogonal.) Simple analytical calculations show that such states are a subset of the states Eq. (31) [45]. The states (32) fill the polytope $D_x$, $D_y$, and $D_z$, which is demonstrated for $N=8$ in Fig. 3 [46] (see also Appendix B). Thus, there is a quantum state corresponding to all points of this polytope. + +Next we examine, how to obtain states corresponding to three-dimensional polytopes. For that we use that when mixing two states, the points corresponding to the mixed state are on a curve in the $(F_Q[\varrho, J_x], F_Q[\varrho, J_y], F_Q[\varrho, J_z])$ space. In the general case, this curve is not a straight line. For the case of mixing a pure state with the completely mixed state, the curve is a straight line. Such a state is defined as + +$$ \varrho^{(\text{mixed})}(p) = p\varrho + (1-p)\frac{1}{2^N}. \quad (33) $$ + +Using Eq. (10), after simple calculations we have + +$$ \Gamma_C^{(\text{mixed})}(p) = \frac{p^2}{p + (1-p)2^{-(N-1)}} \Gamma_C^{(\varrho)}. \quad (34) $$ + +Hence, we can state the following. + +*Observation 6.* If *N* is even, then there is a separable state for each point in the $S_x, S_y, S_z, C$ polytope. + +*Proof.* This is because there is a pure product state corresponding to any point in the $S_x, S_y, S_z$ polytope. When mixing any of these states with the completely mixed state, we obtain states that correspond to points on the line connecting the pure state to point C. ■ + +*Observation 7.* If *N* is divisible by 4, then for all the points of the $D_x, D_y, D_z, G_x, G_y, G_z$ polytope, there is a quantum state with genuine multipartite entanglement. + +*Proof.* There is a quantum state for all points in the $D_x, D_y, D_z$ polytope. Mixing them with the completely mixed state, states corresponding to all points of the $C, D_x, D_y, D_z$ polytope can be obtained. Based on Observation 2, states corresponding to the points in the $D_x, D_y, D_z, G_x, G_y, G_z$ polytope are genuine multipartite entangled. ■ + +Finally, note that all the quantum states we presented in this section have a diagonal $\Gamma_C$ matrix. Thus, our statements remain true even if the three coordinate axes in Fig. 2 correspond to the three eigenvalues of $\Gamma_C$. + +V. DISCUSSION + +The criterion in Eq. (3) contains several quantum Fisher information terms. It can happen that a state does not violate the criterion Eq. (4), but it violates the criterion Eq. (3). In this case, for a single metrological task of the type we considered in this paper its entanglement does not make it possible to outperform the metrology with separable states. However, if the state is used for several metrological tasks, then it makes it possible to achieve such an *average* sensitivity that would be not possible for separable states. + +A related example is the proposal of using multipartite singlets for differential magnetometry [47]. Singlets are useful for differential magnetometry because they are insensitive to homogeneous fields, that is, $F[\varrho, J_l] = 0$ for $l = x, y, z$, which is the same as for the completely mixed state. However, when considering operators other than $J_\pi$, singlets turn out to be very sensitive, which is not the case for the completely mixed state. Thus, singlets can provide an advantage over separable states if the combination of two metrological tasks are considered. + +It is instructive to compare the necessary condition for separability Eq. (3) to the condition presented in Refs. [41,48], + +$$ \sum_{l=x,y,z} (\Delta J_l)^2 \le \frac{N}{2}. \quad (35) $$ + +Clearly, if a pure state is detected by Eq. (35), it is not detected by Eq. (3), and vice versa. In fact, Eqs. (35) and (3) together detect all entangled pure multiqubit states except for the ones for which + +$$ \sum_{l=x,y,z} (\Delta J_l)^2 = \frac{N}{2}. \quad (36) $$ + +Of course, the two conditions also detect some mixed entangled states in the vicinity of the pure entangled states. + +It is an interesting question whether multipartite states having a positive partial transpose for all bipartitions can violate any of the above entanglement criteria with the quantum Fisher information. Violating Eq. (3) would certainly mean that such bound entangled states are useful for certain metrological applications. To find such states, if they exist, might be difficult as typically bound entangled states are strongly mixed and the quantum Fisher information is convex. + +Concerning multipartite entanglement, Observation 3 shows that for a single metrological task, genuine multipartite entanglement is needed to reach the maximum sensitivity. +---PAGE_BREAK--- + +Observation 4 demonstrates that even for the maximum +average sensitivity for the metrological tasks considered can +be reached only by states possessing genuine multipartite +entanglement. + +Finally, the definition of quantum Fisher information used in Ref. [15], while widely considered “the” quantum Fisher information, is not the only possible definition [49]. The Wigner-Yanase skew information is another possibility [50–52]. This quantity equals the variance for pure states, and it is also convex in the state. This has already been used to define entanglement criteria with the skew information [35,53]. Thus, all previous statements can easily be transformed into criteria with the skew information. + +VI. CONCLUSIONS + +In summary, we showed that genuine multipartite en- +tanglement, or in general, multipartite entanglement more +demanding than simple inseparability, is needed to achieve +a maximal accuracy using multipartite quantum states for +metrology. We also considered several relations with the +quantum Fisher information and determined the corresponding +bounds for various forms of entanglement. + +*Note added in proof.* Independently from our work, another paper on the relationship between multipartite entanglement and Fisher information has been prepared [54]. + +ACKNOWLEDGMENTS + +We thank O. Gühne and D. Petz for discussions. We thank the European Union (ERC Starting Grant GEDENTQOPT and CHIST-ERA QUASAR), the Spanish MICINN (Project No. FIS2009-12773-C02-02), the Basque Government (Project No. IT4720-10), and the support of the National Research Fund of Hungary OTKA (Contract No. K83858). + +APPENDIX A: ENTANGLEMENT CONDITIONS +FOR THE $\Gamma_C$ MATRIX + +In this Appendix, we present a unified framework to derive entanglement conditions for the $\Gamma_C$ matrix. For that aim, we use ideas from the derivation of the covariance matrix criterion [55,56] and the entanglement criteria for Gaussian multimode states [57,58]. We recall that a separable state is a mixture of pure product states [59], + +$$ \varrho_{\text{sep}} = \sum_k p_k \rho_{\text{pure product},k}. \quad (\text{A1}) $$ + +Due to the convexity of the quantum Fisher information [15], +we have + +$$ F[\varrho_{\text{sep}}, J_n] \le \sum_k p_k F[\rho_{\text{pure product},k}, J_n]. \quad (\text{A2}) $$ + +Thus, for every separable state there must be a set of $p_k$ +and $\rho_{\text{pure product},k}$ fulfilling Eq. (A2). Hence, we can say the + +following. For every separable state, there is a set of $p_k$ and +$\rho_{\text{pure product},k}$ such that + +$$ \Gamma_C^{(\text{sep})} \le \sum_k p_k \Gamma_C^{(\text{pure product},k)}. \quad (\text{A3}) $$ + +Any state for which there are not such a set of probabilities +and pure product density matrices is entangled [60]. + +It is known that for N-qubit pure product states we have the following two constraints for the variances of the three angular momentum components, + +$$ \sum_{l=x,y,z} (\Delta J_l)^2 = \frac{N}{2}, \quad (\text{A4a}) $$ + +$$ (\Delta J_m)^2 \leq \frac{N}{4}, \quad (\text{A4b}) $$ + +which has been used to derive entanglement criteria with the three variances [41,42,48,61]. Equation (A4a) also appeared in the proof of Observation 1. Based on Eq. (A4), the conditions for the eigenvalues of $\Gamma_C^{(\text{pure product})}$ are clearly + +$$ \begin{gathered} \sum_{l=x,y,z} \Lambda_l^{(\text{pure product})} = 2N, \\ 0 \le \Lambda_m^{(\text{pure product})} \le N \end{gathered} \quad (\text{A5}) $$ + +for $m = x, y, z$. Using now our knowledge about $\Gamma_C^{(\text{pure product},k)}$, the condition Eq. (A3) leads to the following equations for the eigenvalues of $\Gamma_C^{(\text{sep})}$: + +$$ \sum_{l=x,y,z} \Lambda_l^{(\text{sep})} \le 2N, \quad (\text{A6a}) $$ + +$$ 0 \le \Lambda_m^{(\text{sep})} \le N, \quad (\text{A6b}) $$ + +for $m = x, y, z$. Equation (A6) can be reformulated with $\Gamma_C$ +as + +$$ \operatorname{Tr}(\Gamma_C^{(\text{sep})}) \le 2N, \quad (\text{A7a}) $$ + +$$ \Lambda_{\max}(\Gamma_C^{(\text{sep})}) \le N, \quad (\text{A7b}) $$ + +where $\Lambda_{\max}(A)$ is the largest eigenvalue of A. Equation (A7b) has appeared in Ref. [16]. + +Hence, quantum states fulfilling Eq. (A3) must fulfill Eq. (A7). In Observation 1 and also for the criterion Eq. (4), the most entangled states are detected if $F[\varrho_{\text{sep}}, J_l]$ correspond to the three eigenvalues of $\Gamma_C$. For this case, Eq. (A7a) is equivalent to Observation 1 and Eq. (A7b) is equivalent to Eq. (4). + +In a similar manner, conditions for multipartite entangle- +ment can also be obtained. Thus, analogously to Observation 3 +and Observation 4, for *N*-qubit *k*-producible states, we +obtain + +$$ +\begin{aligned} +& \mathrm{Tr}(\Gamma_C^{(\mathrm{sep})}) \\ +& \le \begin{cases} nk(k+2)+(N-nk)(N-nk+2) & \text{if } N-nk \ne 1, \\ nk(k+2)+2 & \text{if } N-nk = 1, \end{cases} +\end{aligned} +\quad (\text{A8a}) $$ + +$$ \Lambda_{\max}(\Gamma_C^{(\text{sep})}) \le nk^2 + (N - nk)^2, \quad (\text{A8b}) $$ + +where *n* is the largest integer such that *nk* ≤ *N*. We +can obtain the bounds for biseparability setting *n* = 1 and +---PAGE_BREAK--- + +$k = N - 1$. Any state that violates one of the criteria for $n = 1$ and $k = N - 1$ is genuine multipartite entangled. The inequalities (A8a) and (A8b) are essentially the criteria of Observations 3 and 4 rewritten in a coordinate system independent way. + +## APPENDIX B: $\Gamma_C$ MATRIX FOR THE STATE EQ. (32) + +In this Appendix, we compute the $\Gamma_C$ matrix for the superposition of three Dicke states given in Eq. (32). We show that for any point in the $D_x$, $D_y$, $D_z$ triangle in Fig. 3 there is a corresponding state of this type. + +First we need to know that + +$$ k \langle D_N^{(N/2)} | J_l^2 | D_N^{(N/2)} \rangle_m = \begin{cases} \frac{N(N+2)}{8} & \text{if } k=m \neq l, \\ Q & \text{if } k \neq m \text{ and } m \neq l \text{ and } k \neq l, \\ 0 & \text{otherwise} \end{cases} \quad (\text{B1}) $$ + +for $k,l,m \in \{x,y,z\}$. In the second line on the right-hand side of Eq. (B1), $Q = {}_x\langle D_N^{(N/2)} | J_y^2 | D_N^{(N/2)} \rangle_z$. Since the state vector of $|D_N^{(N/2)}\rangle_x$ and $|D_N^{(N/2)}\rangle_z$ all have real elements, and $J_y^2$ also have only real elements for even $N$, $Q$ is also real. Its precise value is not important for proving the main statement of this section. The last line on the right-hand side of Eq. (B1) is due to the fact that $J_l|D_N^{(N/2)}\rangle_l = 0$. + +Hence, the $\Gamma_C$ matrix for state Eq. (32) is a diagonal matrix, with + +$$ \Gamma_{C,xx} = (|\alpha_y|^2 + |\alpha_z|^2) \frac{N(N+2)}{2} + 2 \operatorname{Re}(\alpha_y^* \alpha_z Q). \quad (\text{B2}) $$ + +The elements $\Gamma_{C,yy}$ and $\Gamma_{C,zz}$ can be obtained in a similar way, after relabeling the coordinates. Clearly, for $(\alpha_x, \alpha_y, \alpha_z) = (1,0,0)$, the state Eq. (32) corresponds to the $D_x$ point in Fig. 3. Similarly, $(\alpha_x, \alpha_y, \alpha_z) = (0,1,0)$ and $(0,0,1)$ correspond to the $D_y$ and $D_z$ points, respectively. With an appropriate choice of phases for $\alpha_i$, a state with $|\alpha_x\rangle = |\alpha_y\rangle = |\alpha_z\rangle$ corresponds to the center of the $D_x$, $D_y$, $D_z$ triangle. Moreover, a state with $\alpha_x = i\alpha_y$ and $\alpha_z = 0$ corresponds to a point halfway between $D_x$ and $D_y$. In a similar manner, states of the form Eq. (32) can be obtained for the points halfway between $D_x$ and $D_z$, and $D_y$ and $D_z$. + +Similar arguments show that with the appropriate choice of the absolute values and phases of $\alpha_k$, it is possible to get all the matrices, + +$$ \begin{align} \Gamma_c ={}& \alpha'_x \operatorname{diag} \left( 0, \frac{N(N+2)}{2}, \frac{N(N+2)}{2} \right) \nonumber \\ & + \alpha'_y \operatorname{diag} \left( \frac{N(N+2)}{2}, 0, \frac{N(N+2)}{2} \right) \nonumber \\ & + \alpha'_z \operatorname{diag} \left( \frac{N(N+2)}{2}, \frac{N(N+2)}{2}, 0 \right), \tag{B3} \end{align} $$ + +with $0 \le \alpha'_l \le 1$ and $\alpha'_x + \alpha'_y + \alpha'_z = 1$. That is, we can get any point corresponding of the $D_x$, $D_y$, $D_z$ triangle in Fig. 3. + +[1] J.-W. Pan, D. Bouwmeester, M. Daniell, H. Weinfurter, and A. Zeilinger, Nature (London) **403**, 515 (2000). + +[2] M. Bourennane, M. Eibl, C. Kurtsiefer, S. Gaertner, H. Weinfurter, O. Gühne, P. Hyllus, D. Bruß, M. Lewenstein, and A. Sanpera, Phys. Rev. Lett. **92**, 087902 (2004). + +[3] N. Kiesel, C. Schmid, U. Weber, G. Tóth, O. Gühne, R. Ursin, and H. Weinfurter, Phys. Rev. Lett. **95**, 210502 (2005). + +[4] N. Kiesel, C. Schmid, G. Tóth, E. Solano, and H. Weinfurter, Phys. Rev. Lett. **98**, 063604 (2007). + +[5] W. Wieczorek, R. Krischek, N. Kiesel, P. Michelberger, G. Tóth, and H. Weinfurter, Phys. Rev. Lett. **103**, 020504 (2009); G. Tóth, W. Wieczorek, R. Krischek, N. Kiesel, P. Michelberger, and H. Weinfurter, New J. Phys. **11**, 083002 (2009). + +[6] R. Prevedel, G. Cronenberg, M. S. Tame, M. Paternostro, P. Walther, M. S. Kim, and A. Zeilinger, Phys. Rev. Lett. **103**, 020503 (2009); S. Campbell, M. S. Tame, and M. Paternostro, New J. Phys. **11**, 073039 (2009). + +[7] C. A. Sackett et al., Nature (London) **404**, 256 (2000). + +[8] H. Häffner et al., Nature (London) **438**, 643 (2005). + +[9] O. Mandel, M. Greiner, A. Widera, T. Rom, T. W. Hänsch, and I. Bloch, Nature (London) **425**, 937 (2003). + +[10] A. Acín, D. Bruß, M. Lewenstein, and A. Sanpera, Phys. Rev. Lett. **87**, 040401 (2001). + +[11] O. Gühne and G. Tóth, Phys. Rep. **474**, 1 (2009). + +[12] V. Giovannetti, S. Lloyd, and L. Maccone, Science **306**, 1330 (2004). + +[13] A. S. Holevo, Probabilistic and Statistical Aspect of Quantum Theory (North-Holland, Amsterdam, 1982). + +[14] C. W. Helstrom, *Quantum Detection and Estimation Theory* (Academic Press, New York, 1976). + +[15] L. Pezzé and A. Smerzi, Phys. Rev. Lett. **102**, 100401 (2009). + +[16] P. Hyllus, O. Gühne, and A. Smerzi, Phys. Rev. A **82**, 012337 (2010). + +[17] A. Sørensen and K. Mølmer, Phys. Rev. Lett. **86**, 4431 (2001). + +[18] For entanglement witnesses, see M. Horodecki, P. Horodecki, and R. Horodecki, Phys. Lett. A **223**, 1 (1996); B. M. Terhal, ibid. **271**, 319 (2000); M. Lewenstein, B. Kraus, J. I. Cirac, and P. Horodecki, Phys. Rev. A **62**, 052310 (2000); D. Bruß, J. I. Cirac, P. Horodecki, F. Hulpke, B. Kraus, M. Lewenstein, and A. Sanpera, J. Mod. Opt. **49**, 1399 (2002); for the detection of genuine multipartite entanglement, see M. Bourennane, M. Eibl, C. Kurtsiefer, S. Gaertner, H. Weinfurter, O. Gühne, P. Hyllus, D. Bruß, M. Lewenstein, and A. Sanpera, Phys. Rev. Lett. **92**, 087902 (2004); G. Tóth and O. Gühne, ibid. **94**, 060501 (2005); G. A. Durkin and C. Simon, ibid. **95**, 180402 (2005). + +[19] It has also been worked out how to detect the genuine multipartite entanglement that can be obtained in a selected part of a very large quantum system through local operations around the boundary of that selected part. This makes it possible to study multipartite entanglement in the three-, four-, and five-particle blocks of a large quantum system and produce an entanglement map. See E. Alba, G. Tóth, and J. J. García-Ripoll, Phys. Rev. A **82**, 062321 (2010). +---PAGE_BREAK--- + +[20] Recently, via semidefinite programming, it has become possible to find an entanglement witness detecting genuine multipartite entanglement for a given quantum state. See B. Jungnitsch, T. Moroder, and O. Gühne, Phys. Rev. Lett. 106, 190502 (2011). + +[21] For device independent entanglement witnesses for multipartite entanglement, see J.-D. Bancal, N. Gisin, Y.-C. Liang, and S. Pironio, Phys. Rev. Lett. 106, 250404 (2011). + +[22] J. S. Bell, Physics (Long Island City, NY) 1, 195 (1964). + +[23] N. D. Mermin, Phys. Rev. Lett. 65, 1838 (1990). + +[24] N. Gisin and H. Bechmann-Pasquinucci, Phys. Lett. A 246, 1 (1998). + +[25] M. Seevinck and J. Uffink, Phys. Rev. A 65, 012107 (2001). + +[26] D. Collins, N. Gisin, S. Popescu, D. Roberts, and V. Scarani, Phys. Rev. Lett. 88, 170405 (2002). + +[27] K. Nagata, M. Koashi, and N. Imoto, Phys. Rev. Lett. 89, 260401 (2002). + +[28] J. Uffink, Phys. Rev. Lett. 88, 230406 (2002). + +[29] J. I. de Vicente and M. Huber, Phys. Rev. A 84, 062306 (2011). + +[30] M. Seevinck and O. Gühne, New J. Phys. 12, 053002 (2010). + +[31] M. Huber, F. Mintert, A. Gabriel, and B. C. Hiesmayr, Phys. Rev. Lett. 104, 210501 (2010). + +[32] G. Tóth, J. Opt. Soc. Am. B 24, 275 (2007). + +[33] G. Vitagliano, P. Hyllus, I. L. Egusquiza, and G. Tóth, Phys. Rev. Lett. 107, 240502 (2011). + +[34] L.-M. Duan, Phys. Rev. Lett. 107, 180502 (2011). + +[35] Z. Chen, Phys. Rev. A 71, 052302 (2005). + +[36] D. M. Greenberger, M. A. Horne, A. Shimony, and A. Zeilinger, Am. J. Phys. 58, 1131 (1990). + +[37] O. Gühne, G. Tóth, and H. J. Briegel, New J. Phys. 7, 229 (2005). + +[38] We thank P. Hyllus for pointing out that the $N-nk = 1$ case is special. + +[39] S. L. Braunstein and C. M. Caves, Phys. Rev. Lett. 72, 3439 (1994). + +[40] For the general theory of entanglement detection with uncertainty relations, see H. F. Hofmann and S. Takeuchi, Phys. Rev. A 68, 032103 (2003); O. Gühne, Phys. Rev. Lett. 92, 117903 (2004). + +[41] G. Tóth, Phys. Rev. A 69, 052327 (2004). + +[42] G. Tóth, C. Knapp, O. Gühne, and H. J. Briegel, Phys. Rev. A 79, 042334 (2009). + +[43] For the values of $(\Delta J_l)^2$ for $l=x,y,z$ for Dicke states, see Eq. (25) of Ref. [42]. + +[44] X. Yin, X. Wang, J. Ma, and X. Wang, J. Phys. B: At. Mol. Opt. Phys. 44, 015501 (2011). + +[45] In Ref. [44], it has been shown that for states with an even parity $\langle J_z J_l + J_l J_z \rangle = 0$ for $l=x,y$. For states of the form Eq. (31), $\langle J_x J_y + J_y J_x \rangle = 0$ due to $|\Psi_{even}\rangle = \sigma_x^{\otimes N} |\Psi_{even}\rangle$. Equation (32) is of the form Eq. (31) because for this state $|\Psi(\alpha_x,\alpha_y,\alpha_z)\rangle = \sigma_x^{\otimes N} |\Psi(\alpha_x,\alpha_y,\alpha_z)\rangle$, and the overlap of this state with symmetric Dicke states with an odd number of l's is zero, which can be seen as follows. When writing $|D_N^{(N/2)}\rangle_x$ in the x basis, we find that it is an equal superposition of several computational basis states in the x basis. If $|b_1,b_2,...,b_N\rangle_x$ appears in this superposition, so does $|\bar{b}_1,\bar{b}_2,...,\bar{b}_N\rangle_x$, where $b \in \{0,1\}$ and $\bar{b}$ denotes the logical inversion. All the terms of the superposition have $N/2$'s and $N/2$'s. + +[46] The calculations have been made with QUBIT4MATLAB v3.0. + +[47] G.Tóth, Comput. + +[48] M. + +[49] D. + +[50] D. + +[51] S.-L. + +[52] E. + +[53] Zh. + +[54] P. + +W. + +W. + +W. + +W. + +C. + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer, + +Schwemmer + +[55] O. + +[56] O. + +O. + +Gittsovich + +[57] L.-M. + +G. + +J. + +Cirac + +P. + +Zoller + +P. + +Lett. + +84 + +79 + +[58] R. + +[59] R. + +[60] Note that this idea can also be applied for the covariance matrix defined as $[\Gamma]_{ij} = \langle J_i J_j + J_j J_i \rangle / 2 - \langle J_i J_j \rangle$. Due to the concavity of the variance, for any separable state there must be a set of $p_k$ and $\rho_{\text{pureproduct},k}$ such that $\Gamma^{(\text{sep})} \ge \sum_k p_k \Gamma^{(\text{pureproduct},k)}$. + +[61] G.Tóth, C. + +O. + +Gühne + +H. + +J. + +Briegel + +Phys. + +99 + +99 + +99 + +99 + +99 + +99 + +99 + +99 + +99 + +99 + +99 + +99 \ No newline at end of file diff --git a/samples_new/texts_merged/6859646.md b/samples_new/texts_merged/6859646.md new file mode 100644 index 0000000000000000000000000000000000000000..7f787b531bd4bb8ab3e5a0d0a794f8d9ab429252 --- /dev/null +++ b/samples_new/texts_merged/6859646.md @@ -0,0 +1,2485 @@ + +---PAGE_BREAK--- + +# Secondary School Examination-2020 +## Marking Scheme - MATHEMATICS STANDARD + +**Subject Code: 041 Paper Code: 30/2/1, 30/2/2, 30/2/3** + +### General instructions + +1. You are aware that evaluation is the most important process in the actual and correct assessment of the candidates. A small mistake in evaluation may lead to serious problems which may affect the future of the candidates, education system and teaching profession. To avoid mistakes, it is requested that before starting evaluation, you must read and understand the spot evaluation guidelines carefully. Evaluation is a 10-12 days mission for all of us. Hence, it is necessary that you put in your best efforts in this process. + +2. Evaluation is to be done as per instructions provided in the Marking Scheme. It should not be done according to one's own interpretation or any other consideration. Marking Scheme should be strictly adhered to and religiously followed. However, while evaluating, answers which are based on latest information or knowledge and/or are innovative, they may be assessed for their correctness otherwise and marks be awarded to them. In class-X, while evaluating two competency based questions, please try to understand given answer and even if reply is not from marking scheme but correct competency is enumerated by the candidate, marks should be awarded. + +3. The Head-Examiner must go through the first five answer books evaluated by each evaluator on the first day, to ensure that evaluation has been carried out as per the instructions given in the Marking Scheme. The remaining answer books meant for evaluation shall be given only after ensuring that there is no significant variation in the marking of individual evaluators. + +4. Evaluators will mark (√) wherever answer is correct. For wrong answer 'X' be marked. Evaluators will not put right kind of mark while evaluating which gives an impression that answer is correct and no marks are awarded. This is **most common mistake which evaluators are committing**. + +5. If a question has parts, please award marks on the right-hand side for each part. Marks awarded for different parts of the question should then be totaled up and written in the left-hand margin and encircled. This may be followed strictly. + +6. If a question does not have any parts, marks must be awarded in the left-hand margin and encircled. This may also be followed strictly. + +7. If a student has attempted an extra question, answer of the question deserving more marks should be retained and the other answer scored out. + +8. No marks to be deducted for the cumulative effect of an error. It should be penalized only once. + +9. A full scale of marks 0-80 marks as given in Question Paper) has to be used. Please do not hesitate to award full marks if the answer deserves it. + +10. Every examiner has to necessarily do evaluation work for full working hours i.e. 8 hours every day and evaluate 20 answer books per day in main subjects and 25 answer books per day in other subjects (Details are given in Spot Guidelines). + +11. Ensure that you do not make the following common types of errors committed by the Examiner in the past: +* Leaving answer or part thereof unassessed in an answer book. +* Giving more marks for an answer than assigned to it. +* Wrong totaling of marks awarded on a reply. +* Wrong transfer of marks from the inside pages of the answer book to the title page. +* Wrong question wise totaling on the title page. +* Wrong totaling of marks of the two columns on the title page. +* Wrong grand total. +* Marks in words and figures not tallying. +* Wrong transfer of marks from the answer book to online award list. +* Answers marked as correct, but marks not awarded. (Ensure that the right tick mark is correctly and clearly indicated. It should merely be a line. Same is with the X for incorrect answer.) +* Half or a part of answer marked correct and the rest as wrong, but no marks awarded. + +12. While evaluating the answer books if the answer is found to be totally incorrect, it should be marked as cross (X) and awarded zero (0) Marks. + +13. Any unassessed portion, non-carrying over of marks to the title page, or totaling error detected by the candidate shall damage the prestige of all the personnel engaged in the evaluation work as also of the Board. Hence, in order to uphold the prestige of all concerned, it is again reiterated that the instructions be followed meticulously and judiciously. + +14. The Examiners should acquaint themselves with the guidelines given in the Guidelines for spot Evaluation before starting the actual evaluation. + +15. Every Examiner shall also ensure that all the answers are evaluated, marks carried over to the title page, correctly totaled and written in figures and words. + +16. The Board permits candidates to obtain photocopy of the Answer Book on request in an RTI application and also separately as a part of the re-evaluation process on payment of the processing charges. +---PAGE_BREAK--- + +QUESTION PAPER CODE 30/2/1 +EXPECTED ANSWER/VALUE POINTS +SECTION - A + +Question numbers 1 to 10 are multiple choice questions of 1 mark each. + +You have to select the correct choice : + +Marks + +Q.No. + +1. The sum of exponents of prime factors in the prime-factorisation of 196 is + (a) 3 + (b) 4 + (c) 5 + (d) 2 + **Ans:** (b) 4 + +1 + +2. Euclid's division Lemma states that for two positive integers a and b, there exists unique integer q and r satisfying a = bq + r, and + (a) $0 < r < b$ + (b) $0 < r \leq b$ + (c) $0 \leq r < b$ + (d) $0 \leq r \leq b$ + **Ans:** (c) $0 \leq r < b$ + +1 + +3. The zeroes of the polynomial $x^2 - 3x - m(m+3)$ are + (a) $m, m+3$ + (b) $-m, m+3$ + (c) $m, -(m+3)$ + (d) $-m, -(m+3)$ + **Ans:** (b) $-m, m+3$ + +1 + +4. The value of k for which the system of linear equations $x + 2y = 3$, $5x + ky + 7 = 0$ is inconsistent is + (a) $-\frac{14}{3}$ + (b) $\frac{2}{5}$ + (c) 5 + (d) 10 + **Ans:** (d) 10 + +1 + +5. The roots of the quadratic equation $x^2 - 0.04 = 0$ are + (a) $\pm 0.2$ + (b) $\pm 0.02$ + (c) 0.4 + (d) 2 + **Ans:** (a) $\pm 0.2$ + +1 + +6. The common difference of the A.P. $\frac{1}{p}$, $\frac{1-p}{p}$, $\frac{1-2p}{p}$, ... is + (a) 1 + (b) $\frac{1}{p}$ + (c) -1 + (d) $\frac{-1}{p}$ + **Ans:** (c) -1 + +1 + +7. The $n^{th}$ term of the A.P. a, 3a, 5a, ... is + (a) na + (b) $(2n-1)a$ + (c) $(2n+1)a$ + (d) 2na + **Ans:** (b) $(2n-1)a$ + +1 + +8. The point P on x-axis equidistant from the points A(-1, 0) and B(5, 0) is + (a) (2, 0) + (b) (0, 2) + (c) (3, 0) + (d) (2, 2) + **Ans:** (a) (2, 0) + +1 + +9. The co-ordinates of the point which is reflection of point (-3, 5) in x-axis are + (a) (3, 5) + (b) (3, -5) + (c) (-3, -5) + (d) (-3, 5) + **Ans:** (c) (-3, -5) + +1 +---PAGE_BREAK--- + +10. + +If the point P (6, 2) divides the line segment joining A(6, 5) and B(4, y) in the ratio 3 : 1, then the value of y is + +(a) 4 + +(b) 3 + +(c) 2 + +(d) 1 + +**Ans:** 1 mark be awarded to everyone + +1 + +In Q. Nos. 11 to 15, fill in the blanks. Each question is of 1 mark. + +11. + +In fig. 1, MN || BC and AM : MB = 1 : 2, then $\frac{ar(\Delta AMN)}{ar(\Delta ABC)} = \underline{\hspace{2cm}}$ + +Fig. 1 + +**Ans:** $\frac{1}{9}$ + +1 + +12. + +In given Fig. 2, the length PB = _______ cm. + +**Ans:** 4 + +13. + +In $\triangle ABC$, AB = $6\sqrt{3}$ cm, AC = 12 cm and BC = 6 cm, then $\angle B = \underline{\hspace{2cm}}$. + +**Ans:** 90° + +OR + +Two triangles are similar if their corresponding sides are ______. + +**Ans:** proportional + +1 + +1 + +14. + +The value of $(\tan 1^\circ \tan 2^\circ \dots \tan 89^\circ)$ is equal to ______. + +**Ans:** 1 + +15. + +In Fig. 3, the angles of depressions from the observing positions O₁ and O₂ respectively of the object A are ______, ______. + +Fig. 3 + +**Ans:** 30°, 45° + +$\frac{1}{2} + \frac{1}{2}$ +---PAGE_BREAK--- + +Q. Nos. 16 to 20 are short answer type questions of 1 mark each. + +16. If $\sin A + \sin^2 A = 1$, then find the value of the expression $(\cos^2 A + \cos^4 A)$. + +$$ +\begin{array}{l} +\text{Ans: } \sin A = 1 - \sin^2 A \\ +\qquad \sin A = \cos^2 A +\end{array} +$$ + +$$ \cos^2 A + \cos^4 A = \sin A + \sin^2 A = 1 $$ + +1/2 + +1/2 + +17. In Fig. 4 is a sector of circle of radius 10.5 cm. Find the perimeter of the sector. (Take $\pi = \frac{22}{7}$) + +Fig. 4 + +$$ +\begin{aligned} +\text{Ans: Perimeter} &= 2r + \frac{\pi r \theta}{180^\circ} \\ +&= 2 \times 10.5 + \frac{22}{7} \times 10.5 \times \frac{60^\circ}{180^\circ} \\ +&= 21 + 11 = 32 \text{ cm} +\end{aligned} +$$ + +1/2 + +1/2 + +18. If a number x is chosen at random from the numbers -3, -2, -1, 0, 1, 2, 3, then find the probability of x² < 4. + +$$ +\begin{align*} +\text{Ans: Number of Favourable outcomes} &= 3 \text{ i.e., } \{-1, 0, 1\} \quad \therefore P(x^2 < 4) = \frac{3}{7} +\end{align*} +$$ + +OR + +What is the probability that a randomly taken leap year has 52 Sundays ? + +$$ +\text{Ans: } P(52 \text{ Sundays}) = \frac{5}{7} +$$ + +1 + +19. Find the class-marks of the classes 10-25 and 35-55. + +$$ +\text{Ans: Class Marks } \frac{10+25}{2} = 17.5; \frac{35+55}{2} = 45 +$$ + +1/2+1/2 + +20. A die is thrown once. What is the probability of getting a prime number. + +$$ +\begin{array}{l} +\text{Ans: Number of prime numbers} = 3 \text{ i.e. ; } \{2, 3, 5\} \\[1em] +P(\text{Prime Number}) = \frac{3}{6} \text{ or } \frac{1}{2} +\end{array} +$$ + +1/2 + +1/2 +---PAGE_BREAK--- + +SECTION - B + +Q. Nos. 21 to 26 carry 2 marks each + +21. A teacher asked 10 of his students to write a polynomial in one variable on a paper and then to handover the paper. The following were the answers given by the students: + +$$2x + 3, 3x^2 + 7x + 2, 4x^3 + 3x^2 + 2, x^3 + \sqrt{3x} + 7, 7x + \sqrt{7}, 5x^3 - 7x + 2,$$ + +$$2x^2 + 3 - \frac{5}{x}, 5x - \frac{1}{2}, ax^3 + bx^2 + cx + d, x + \frac{1}{x}.$$ + +Answer the following questions : + +(i) How many of the above ten, are not polynomials ? + +(ii) How many of the above ten, are quadratic polynomials ? + +Ans: (i) 3 + +(ii) 1 + +1 + +1 + +22. In Fig. 5, ABC and DBC are two triangles on the same base BC. If AD intersects BC at O, show that + +$$\frac{ar(\Delta ABC)}{ar(\Delta DBC)} = \frac{AO}{DO}$$ + +Fig. 5 + +Ans: + +Draw $AX \perp BC$, $DY \perp BC$ +$\triangle AOX \sim \triangle DOY$ + +$$\frac{AX}{DY} = \frac{AO}{DO} \quad \dots (i)$$ + +$$\frac{ar(\triangle ABC)}{ar(\triangle DBC)} = \frac{\frac{1}{2} \times BC \times AX}{\frac{1}{2} \times BC \times DY}$$ + +$$\frac{AX}{DY} = \frac{AO}{DO} \text{ (From (i))}$$ + +OR + +In Fig. 6, if $AD \perp BC$, then prove that $AB^2 + CD^2 = BD^2 + AC^2$. + +Fig. 6 + +Ans: In rt $\triangle ABD$ + +$AB^2 = BD^2 + AD^2$ ... (i) + +In rt $\triangle ADC$ + +$CD^2 = AC^2 - AD^2$ ... (ii) + +Adding (i) & (ii) + +$$AB^2 + CD^2 = BD^2 + AC^2$$ + +1/2 + +1/2 + +1/2 + +1/2 + +1/2 + +1 +---PAGE_BREAK--- + +23. Prove that $1 + \frac{\cot^2 \alpha}{1 + \cos \alpha} = \cos \alpha \sec \alpha$ + +$$ +\begin{align*} +\text{Ans: L.H.S} &= 1 + \frac{\cos \sec^2 \alpha - 1}{1 + \cos \sec \alpha} \\ +&= 1 + \frac{(\cos \sec \alpha - 1)(\cos \sec \alpha + 1)}{\cos \sec \alpha + 1} \\ +&= \cos \sec \alpha = R.H.S +\end{align*} +$$ + +OR + +$$ +\sin^2 \theta + \tan^2 \theta = \sec^2 \theta - \tan^2 \theta +$$ + +$$ +\begin{align*} +\text{Ans: L.H.S} &= \tan^4 \theta + \tan^2 \theta \\ +&= \tan^2 \theta (\tan^2 \theta + 1) \\ +&= (\sec^2 \theta - 1) (\sec^2 \theta) = \sec^4 \theta - \sec^2 \theta = R.H.S +\end{align*} +$$ + +24. The volume of a right circular cylinder with its height equal to the radius is $25\frac{1}{7}$ cm³. Find the height of the cylinder. (Use $\pi = \frac{22}{7}$) + +$$ +\text{Ans: Let height and radius of cylinder } x \text{ cm} +$$ + +$$ +V = \frac{176}{7} \text{cm}^3 +$$ + +$$ +\frac{22}{7} \times x^2 \times x = \frac{176}{7} +$$ + +$$ +x^{3}=8 \Rightarrow x=2 +$$ + +∴ height of cylinder = 2 cm + +25. A child has a die whose six faces show the letters as shown below : + +The die is thrown once. What is the probability of getting (i) A, (ii) D ? + +$$ +\text{Ans: (i) } P(A) = \frac{2}{6} \text{ or } \frac{1}{3} \qquad (\text{ii) } P(D) = \frac{1}{6} +$$ + +1+1 + +26. Compute the mode for the following frequency distribution : + + + + + + + + + + + + + + + + + + + + + + +
Size of items
(in cm)
0-44-88-1212-1616-2020-2424-28
Frequency5791712106
+ +$$ +\text{Ans: } l = 12 \quad f_0 = 9 \quad f_1 = 17 \quad f_2 = 12 \quad h = 4 +$$ + +$$ +\text{Mode} = 12 + \frac{17-9}{34-9-12} \times 4 = 14.46 \text{ cm (Approx)} +$$ + +$$ +\frac{1}{1+\frac{1}{2}} +$$ +---PAGE_BREAK--- + +SECTION - C + +Question numbers 27 to 34 carry 3 marks each. + +27. If $2x + y = 23$ and $4x - y = 19$, find the value of $(5y - 2x)$ and $\left(\frac{y}{x} - 2\right)$ + +**Ans:** $2x + y = 23, 4x - y = 19$ +Solving, we get $x = 7, y = 9$ + +$5y - 2x = 31, \frac{y}{x} - 2 = \frac{-5}{7}$ + +OR + +Solve for x: $\frac{1}{x+4} - \frac{1}{x+7} = \frac{11}{30}, x \neq -4, 7$ + +**Ans:** + +$$ \begin{aligned} \frac{1}{x+4} - \frac{1}{x-7} &= \frac{11}{30} \\ &\Rightarrow \frac{-11}{(x+4)(x-7)} = \frac{11}{30} \end{aligned} $$ + +$$ \Rightarrow x^2 - 3x + 2 = 0 $$ + +$$ \Rightarrow (x-2)(x-1) = 0 $$ + +$$ \Rightarrow x = 2, 1 $$ + +The Following solution should also be accepted + +$$ \begin{aligned} \frac{1}{x+4} - \frac{1}{x+7} &= \frac{11}{30} \\ &\Rightarrow \frac{x+7-x-4}{(x+4)(x-7)} = \frac{11}{30} \\ &\Rightarrow 11x^2 + 121x + 218 = 0 \end{aligned} $$ + +Here, D = 5049 + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ x = -\frac{(a+c)(b+c-2a)}{6(b-a)} $$ + +**Ans:** + +Here $d = b - a$ + +Let c be the n-th term +$\therefore c = a + (n-1)(b-a)$ +$$ n = -\frac{c+b-2a}{b-a} $$ +$$ S_n = -\frac{(c+b-2a)(a+c)}{6(b-a)} $$ + +$$ n = -\frac{(c+b-2a)}{(b-a)} $$ + +$$ S_n = -\frac{(c+b-2a)(a+c)}{(b-a)} $$ +---PAGE_BREAK--- + +OR + +Solve the equation : 1 + 4 + 7 + 10 + ... + x = 287. + +**Ans:** Let sum of n terms = 287 + +$$ \frac{n}{2} [2 \times 1 + (n-1)3] = 287 $$ + +$$ \frac{1}{2} $$ + +$$ 3n^2 - n - 574 = 0 $$ + +$$ \frac{1}{2} $$ + +$$ (3n + 41)(n - 14) = 0 $$ + +$$ \frac{1}{2} $$ + +$$ n = 14 \left( \text{Reject } n = \frac{-41}{3} \right) $$ + +$$ \frac{1}{2} $$ + +$$ x = a_{14} = 1 + 13 \times 3 = 40 $$ + +$$ 1 $$ + +29. In a flight of 600 km, an aircraft was slowed down due to bad weather. The average speed of the trip was reduced by 200 km/hr and the time of flight increased by 30 minutes. Find the duration of flight. + +**Ans:** Let actual speed = x km/hr +A.T.Q + +$$ \frac{600}{x - 200} - \frac{600}{x} = \frac{1}{2} $$ + +$$ 1 $$ + +$$ x^2 - 200x - 240000 = 0 $$ + +$$ (x - 600)(x + 400) = 0 $$ + +$$ x = 600 \text{ (x = -400 Rejected)} $$ + +$$ \frac{1}{2} $$ + +$$ \text{Duration of flight} = \frac{600}{600} = 1 \text{ hr} $$ + +$$ \frac{1}{2} $$ + +30. If the mid-point of the line segment joining the points A(3, 4) and B(k, 6) is P(x, y) and $x + y - 10 = 0$, find the value of k. + +**Ans:** + +$$ A \left( \frac{\text{mid point of A}}{k}, \frac{\text{mid point of A}}{6} \right) $$ + +$$ x = \frac{3+k}{2}, \quad y=5 $$ + +$$ \frac{1}{2} + \frac{1}{2} $$ + +$$ x + y - 10 = 0 \Rightarrow \frac{3+k}{2} + 5 - 10 = 0 $$ + +$$ \Rightarrow k = 7 $$ + +$$ 1 $$ + +OR + +Find the area of triangle ABC with A(1, -4) and the mid-points of sides through A being (2, -1) and (0, -1). + +**Ans:** B(3, 2), C(-1, 2) + +Area = $\frac{1}{2}|1(2-2)+3(2+4)-1(-4-2)| = 12$ squnits + +$$ \frac{1}{2} + \frac{1}{2} $$ + +$$ 1+1 $$ +---PAGE_BREAK--- + +31. In Fig. 7, if $\triangle ABC \sim \triangle DEF$ and their sides of lengths (in cm) are marked along them, then find the lengths of sides of each triangle. + +Fig. 7 + +**Ans:** As $\triangle ABC \sim \triangle DEF$ + +$$ \frac{2x-1}{18} = \frac{3x}{6x} $$ + +$x = 5$ + +AB = 9 cm DE = 18 cm + +BC = 12 cm EF = 24 cm + +CA = 15 cm FD = 30 cm + +1/2+1/2 + +32. If a circle touches the side BC of a triangle ABC at P and extended sides AB and AC at Q and R, respectively, prove that + +$$AQ = \frac{1}{2}(BC + CA + AB)$$ + +**Ans:** + +Correct Fig + +$$ \begin{aligned} AQ &= \frac{1}{2} (2AQ) \\ &= \frac{1}{2} (AQ + AQ) \\ &= \frac{1}{2} (AQ + AR) \\ &= \frac{1}{2} (AB + BQ + AC + CR) \\ &= \frac{1}{2} (AB + BC + CA) \end{aligned} $$ + +$\therefore$ [BQ = BP, CR = CP] + +1/2 + +33. If $\sin \theta + \cos \theta = \sqrt{2}$, prove that $\tan \theta + \cot \theta = 2$. + +$$ \text{Ans: } \sin \theta + \cos \theta = \sqrt{2} $$ + +$$ \begin{array}{l} \tan \theta + 1 = \sqrt{2} \sec \theta \\ \\ \text{Sq. both sides} \\ \tan^2 \theta + 1 + 2 \tan \theta = 2\sec^2 \theta \\ \\ \tan^2 \theta + 1 + 2 \tan \theta = 2(1 + \tan^2 \theta) \\ \\ 2 \tan \theta = \tan^2 \theta + 1 \\ \\ 2 = \tan \theta + \cot \theta \end{array} $$ + +1 + +1 + +1 + +1 +---PAGE_BREAK--- + +**34.** The area of a circular play ground is 22176 cm². Find the cost of fencing this ground at the rate of 50 per metre. + +**Ans:** Let the radius of playground be r cm + +$$ \pi r^2 = 22176 \text{ cm}^2 $$ + +$$ r = 84 \text{ cm} $$ + +1 + +$$ \text{Circumference} = 2\pi r = 2 \times \frac{22}{7} \times 84 = 528 \text{ cm} $$ + +1 + +$$ \text{Cost of fencing} = \frac{50}{100} \times 528 = 264 $$ + +1 + +### SECTION - D + +Question numbers 35 to 40 carry 4 marks each. + +**35.** Prove that $\sqrt{5}$ is an irrational number. + +**Ans:** Let $\sqrt{5}$ be a rational number. + +$$ \sqrt{5} = \frac{p}{q}, p \& q \text{ are coprimes } & \& q \neq 0 \\ 5q^2 = p^2 \Rightarrow 5 \text{ divides } p^2 \Rightarrow 5 \text{ divides } p \text{ also Let } p = 5a, \text{ for some integer } a \\ 5q^2 = 25a^2 \Rightarrow q^2 = 5a^2 \Rightarrow 5 \text{ divides } q^2 \Rightarrow 5 \text{ divides } q \text{ also} $$ + +∴ 5 is a common factor of p, q, which is not possible as +p, q are coprimes. + +Hence assumption is wrong $\sqrt{5}$ is irrational no. + +1 + +1 + +1 + +1 + +**36.** It can take 12 hours to fill a swimming pool using two pipes. If the pipe of larger diameter is used for four hours and the pipe of smaller diameter for 9 hours, only half of the pool can be filled. How long would it take for each pipe to fill the pool separately? + +**Ans:** Let time taken by pipe of larger diameter to fill the tank be x hr +Let time taken by pipe of smaller diameter to fill the tank be y hr +A.T.Q + +$$ \frac{1}{x} + \frac{1}{y} = \frac{1}{12}, \quad \frac{4}{x} + \frac{9}{y} = \frac{1}{2} $$ + +1+1 + +Solving we get x = 20 hr y = 30 hr + +1+1 + +**37.** Draw a circle of radius 2 cm with centre O and take a point P outside the circle such that OP = 6.5 cm. From P, draw two tangents to the circle. + +**Ans:** Correct construction of circle of radius 2 cm +Correct construction of tangents. + +1 + +3 + +OR + +Construct a triangle with sides 5 cm, 6 cm and 7 cm and then construct another triangle whose sides are $\frac{3}{4}$ times the corresponding sides of the first triangle. + +**Ans:** Correct construction of given triangle +Construction of Similar triangle + +1 + +3 +---PAGE_BREAK--- + +**38.** From a point on the ground, the angles of elevation of the bottom and the top of a tower fixed at the top of a 20 m high building are 45° and 60° respectively. Find the height of the tower. + +**Ans:** Let height of tower = h m + +In rt. $\Delta BCD \tan 45^\circ = \frac{BC}{CD}$ + +$$ +\left. +\begin{array}{l} +1 = \frac{20}{CD} \\ +CD = 20 \text{ m} +\end{array} +\right\} +$$ + +In rt. $\Delta ACD \tan 60^\circ = \frac{AC}{CD}$ + +$$ \sqrt{3} = \frac{20+h}{20} $$ + +$$ h = 20(\sqrt{3}-1)m $$ + +corr fig. 1 + +1 + +1 + +1 + +**39.** Find the area of the shaded region in Fig. 8, if PQ = 24 cm, PR = 7 cm and O is the centre of the circle. + +Fig. 8 + +**Ans:** + +$\angle P = 90^\circ \ RQ = \sqrt{(24)^2 + 7^2} = 25 \text{ cm}, r = \frac{25}{2} \text{ cm}$ + +$$ \left. +\begin{array}{l} +\text{Area of shaded portion} = \text{Area of semi circle} - \ar(\Delta PQR) \\ += \frac{1}{2} \times \frac{22}{7} \times \left(\frac{25}{2}\right)^2 - 84 \\ += 161.54 \text{ cm}^2 +\end{array} +\right\} $$ + +$$ +\begin{array}{l} +\frac{1}{2} \\ +2 \\ +\frac{1}{2} +\end{array} +$$ + +OR + +Find the curved surface area of the frustum of a cone, the diameters of whose circular ends are 20 m and 6 m and its height is 24 m. + +**Ans:** + +$R = 10 \text{ m}$ $r = 3 \text{ m}$ $h = 24 \text{ m}$ + +$$ l = \sqrt{(24)^2 + (10-3)^2} = 25 \text{ m} $$ + +$$ CSA = \pi(10 + 3)25 = 325 \pi \text{ m}^2 $$ + +$$ +\begin{array}{l} +\frac{1}{2}+1\frac{1}{2} \\ +1 \\ +1+1 +\end{array} +$$ + +**40.** The mean of the following frequency distribution is 18. The frequency f in the class interval 19 – 21 is missing. Determine f. + +
Class interval11 – 1313 – 1515 – 1717 – 1919 – 2121 – 2323 – 25
Frequency36913f54
+---PAGE_BREAK--- + +**Ans:** + +
C.Ifxxf
11-1331236
13-1561484
15-17916144
17-191318234
19-21f2020f
21-23522110
23-2542496
40+f704 + 20f
+ +$$ \text{Mean} = \frac{\sum xf}{\sum f} \Rightarrow 18 = \frac{704+20f}{40+f} \Rightarrow f=8 $$ + +OR + +The following table gives production yield per hectare of wheat of 100 farms of a village : + +
Production yield40-4545-5050-5555-6060-6565-70
No. of farms4616203024
+ +Change the distribution to a 'more than' type distribution and draw its ogive. + +**Ans:** + +
Production yieldNumber of farms
More than or equal to 40100
More than or equal to 4596
More than or equal to 5090
More than or equal to 5574
More than or equal to 6054
More than or equal to 6524
+ +Plotting of points (40, 100) (45, 96) (50, 90) (55, 74) (60, 54) (65, 24) join to get ogive. + +2 + +2 + +2 + +2 +---PAGE_BREAK--- + +QUESTION PAPER CODE 30/2/2 +EXPECTED ANSWER/VALUE POINTS +SECTION - A + +Question numbers 1 to 10 are multiple choice questions of 1 mark each. + +You have to select the correct choice : + +Marks + +Q.No. + +1. The value of k for which the system of linear equations x + 2y = 3, 5x + ky + 7 = 0 is inconsistent is + +(a) $-\frac{14}{3}$ + +(b) $\frac{2}{5}$ + +(c) 5 + +(d) 10 + +Ans: (d) 10 + +1 + +2. The zeroes of the polynomial $x^2 - 3x - m(m+3)$ are + +(a) m, m + 3 + +(b) -m, m + 3 + +(c) m, -(m + 3) + +(d) -m, -(m + 3) + +Ans: (b) -m, m + 3 + +1 + +3. Euclid's division Lemma states that for two positive integers a and b, there exists unique integer q and r satisfying $a = bq + r$, and + +(a) $0 < r < b$ + +(b) $0 < r \leq b$ + +(c) $0 \leq r < b$ + +(d) $0 \leq r \leq b$ + +Ans: (c) $0 \leq r < b$ + +1 + +4. The sum of exponents of prime factors in the prime-factorisation of 196 is + +(a) 3 + +(b) 4 + +(c) 5 + +(d) 2 + +Ans: (b) 4 + +1 + +5. If the point P(6, 2) divides the line segment joining A(6, 5) and B(4, y) in the ratio 3 : 1, then the value of y is + +(a) 4 + +(b) 3 + +(c) 2 + +(d) 1 + +Ans: 1 mark be awarded to everyone + +1 + +6. The co-ordinates of the point which is reflection of point (-3, 5) in x-axis are + +(a) (3, 5) + +(b) (3, -5) + +(c) (-3, -5) + +(d) (-3, 5) + +Ans: (c) (-3, -5) + +1 + +7. The point P on x-axis equidistant from the points A(-1, 0) and B(5, 0) is + +(a) (2, 0) + +(b) (0, 2) + +(c) (3, 0) + +(d) (2, 2) + +Ans: (a) (2, 0) + +1 + +8. The $n^{th}$ term of the A.P. a, 3a, 5a, ... is + +(a) na + +(b) $(2n-1)a$ + +(c) $(2n+1)a$ + +(d) 2na + +Ans: (b) $(2n-1)a$ + +1 + +9. The common difference of the A.P. $\frac{1}{p}, \frac{1-p}{p}, \frac{1-2p}{p}, ...$ is + +(a) 1 + +(b) $\frac{1}{p}$ + +(c) -1 + +(d) $-\frac{1}{p}$ + +Ans: (c) -1 + +1 +---PAGE_BREAK--- + +10. The roots of the quadratic equation $x^2 - 0.04 = 0$ are + +(a) ± 0.2 + +(b) ± 0.02 + +(c) 0.4 + +(d) 2 + +Ans: (a) ± 0.2 + +In Q. Nos. 11 to 15, fill in the blanks. Each question is of 1 mark. + +11. In Fig. 1, the angles of depressions from the observing positions O₁ and O₂ respectively of the object A are ______, ______. + +Fig. 1 + +Ans: 30°, 45° + +$\frac{1}{2} + \frac{1}{2}$ + +12. In Fig. 2, MN || BC and AM : MB = 1 : 2, then $\frac{\text{ar}(ΔAMN)}{\text{ar}(ΔABC)} = $ ______. + +Fig. 2 + +Ans: $\frac{1}{9}$ + +13. In given Fig. 3, the length PB = ______ cm. + +Fig. 3 + +Ans: 4 + +14. In ΔABC, AB = $6\sqrt{3}$ cm, AC = 12 cm and BC = 6 cm, then ∠B = ______. + +Ans: 90° + +OR +Two triangles are similar if their corresponding sides are ______. + +1 + +1 + +15. The value of sin 23° cos 67° + cos 23° sin 67° is ______. + +Ans: proportional + +1 + +1 +---PAGE_BREAK--- + +Q. Nos. 16 to 20 are short answer type questions of 1 mark each. + +16. In Fig. 4 is a sector of circle of radius 10.5 cm. Find the perimeter of the sector. (Take $\pi = \frac{22}{7}$) + +Fig. 4 + +**Ans:** Perimeter $= 2r + \frac{\pi r \theta}{180^{\circ}}$ +$= 2 \times 10.5 + \frac{22}{7} \times 10.5 \times \frac{60^{\circ}}{180^{\circ}}$ +$= 21 + 11 = 32 \text{ cm}$ + +1/2 + +1/2 + +17. If a number x is chosen at random from the numbers -3, -2, -1, 0, 1, 2, 3, then find the probability of x² < 4. + +**Ans:** Number of Favourable outcomes = 3 i.e., {-1, 0, 1} : P(x² < 4) = $\frac{3}{7}$ + +1/2+1/2 + +OR + +What is the probability that a randomly taken leap year has 52 Sundays ? + +**Ans:** P(52 Sundays) = $\frac{5}{7}$ + +1 + +18. A die is thrown once. What is the probability of getting a prime number. + +**Ans:** Number of prime numbers = 3 i.e. {2, 3, 5} + +P(Prime Number) = $\frac{3}{6}$ or $\frac{1}{2}$ + +1/2 + +1/2 + +19. If tan A = cot B, then find the value of (A + B). + +**Ans:** $\tan A = \tan (90^\circ - B)$ +$\therefore A + B = 90^\circ$ + +1/2 + +1/2 + +20. Find the class marks of the classes 15 – 35 and 45 – 60. + +**Ans:** +$$\frac{15+35}{2} = 25$$ + +$$\frac{45+60}{2} = 52.5$$ + +1/2 + +1/2 + +SECTION - B + +Q. Nos. 21 to 26 carry 2 marks each + +21. A teacher asked 10 of his students to write a polynomial in one variable on a paper and then to handover the paper. The following were the answers given by the students: +---PAGE_BREAK--- + +$$2x+3, 3x^2+7x+2, 4x^3+3x^2+2, x^3+\sqrt{3x}+7, 7x+\sqrt{7}, 5x^3-7x+2,$$ + +$$2x^2 + 3 - \frac{5}{x}, 5x - \frac{1}{2}, ax^3 + bx^2 + cx + d, x + \frac{1}{x}.$$ + +Answer the following questions : + +(i) How many of the above ten, are not polynomials ? + +(ii) How many of the above ten, are quadratic polynomials ? + +**Ans:** (i) 3 + +(ii) 1 + +1 + +1 + +**22. Compute the mode for the following frequency distribution :** + + + + + + + + + + + + + + + + + + + + + + +
+ Size of items (in cm) + + 0 - 4 + + 4 - 8 + + 8 - 12 + + 12 - 16 + + 16 - 20 + + 20 - 24 + + 24 - 28 +
+ Frequency + + 5 + + 7 + + 9 + + 17 + + 12 + + 10 + + 6 +
+ +1/2 + +$$ +\text{Mode} = 12 + \frac{17-9}{34-9-12} \times 4 = 14.46 \text{ cm (Approx)} +$$ + +$$ +1 + \frac{1}{2} +$$ + +**23.** In Fig. 5, ABC and DBC are two triangles on the same base BC. If AD intersects BC at O, show that + +$$ +\frac{\text{ar}(\Delta \text{ABC})}{\text{ar}(\Delta \text{DBC})} = \frac{\text{AO}}{\text{DO}} +$$ + +Fig. 5 + +$$ +\frac{\text{AX}}{\text{DY}} = \frac{\text{AO}}{\text{DO}} \quad \dots (i) +$$ + +$$ +\frac{\text{ar}(\Delta \text{ABC})}{\text{ar}(\Delta \text{DBC})} = \frac{\frac{1}{2} \times \text{BC} \times \text{AX}}{\frac{1}{2} \times \text{BC} \times \text{DY}} +$$ + +$$ +\frac{\mathrm{AX}}{\mathrm{DY}}=\frac{\mathrm{AO}}{\mathrm{DO}} \quad (\text { From } (1)) +$$ + +OR + +In Fig. 6, if AD ⊥ BC, then prove that AB² + CD² = BD² + AC². + +Fig. 6 +---PAGE_BREAK--- + +**Ans:** In rt $\triangle$ ABD + +$AB^2 = BD^2 + AD^2$ ... (i) + +1/2 + +In rt $\triangle$ ADC + +$CD^2 = AC^2 - AD^2$ ... (ii) + +1/2 + +Adding (i) & (ii) + +$AB^2 + CD^2 = BD^2 + AC^2$ + +1 + +**24.** Prove that $1 + \frac{\cot^2 \alpha}{1 + \cos \alpha} = \cos \alpha$ + +**Ans:** L.H.S = $1 + \frac{\cos ec^2\alpha - 1}{1 + \cos ec \alpha}$ + +1/2 + +$$ +\begin{aligned} +&= 1 + \frac{(\cos ec \alpha - 1)(\cos ec \alpha + 1)}{\cos ec \alpha + 1} \\ +&= \cosec \alpha = R.H.S +\end{aligned} + $$ + +1 + +1/2 + +OR + +Show that $\tan^4\theta + \tan^2\theta = \sec^4\theta - \sec^2\theta$ + +**Ans:** L.H.S = $\tan^4\theta + \tan^2\theta$ + +$$ +\begin{aligned} +&= \tan^2\theta (\tan^2\theta + 1) \\ +&= (\sec^2\theta - 1)(\sec^2\theta) = \sec^4\theta - \sec^2\theta = R.H.S +\end{aligned} + $$ + +1/2 + +1+1/2 + +**25.** A child has a die whose six faces show the letters as shown below : + +A B C D E + +The die is thrown once. What is the probability of getting (i) A, (ii) D ? + +**Ans:** (i) P(A) = $\frac{2}{6}$ or $\frac{1}{3}$ + +(ii) P(D) = $\frac{3}{6}$ or $\frac{1}{2}$ + +1+1 + +**26.** A solid is in the shape of a cone mounted on a hemisphere of same base radius. If the curved surface areas of the hemispherical part and the conical part are equal, then find the ratio of the radius and the height of the conical part. + +**Ans:** CSA of conical part = CSA of hemispherical part + +$$ +\begin{aligned} +& \pi rl = 2\pi r^2 \\ +& \sqrt{r^2 + h^2} = 2r \\ +& h^2 = 3r^2 \\ +& \frac{r}{h} = \frac{1}{\sqrt{3}} \Rightarrow \text{ratio is } 1 : \sqrt{3} +\end{aligned} + $$ + +1/2 + +1/2 + +1/2 + +1/2 +---PAGE_BREAK--- + +**SECTION - C** + +**Question numbers 27 to 34 carry 3 marks each.** + +27. In Fig. 7, if $\triangle ABC \sim \triangle DEF$ and their sides of lengths (in cm) are marked along them, then find the lengths of sides of each triangle. + +Fig. 7 + +**Ans:** As $\triangle ABC \sim \triangle DEF$ + +$$ \frac{2x-1}{18} = \frac{3x}{6x} $$ + +$1$ + +$x = 5$ + +1 + +AB = 9 cm DE = 18 cm + +BC = 12 cm EF = 24 cm + +CA = 15 cm FD = 30 cm + +$$ \frac{1}{2} + \frac{1}{2} = \frac{1}{2} $$ + +28. If a circle touches the side BC of a triangle ABC at P and extended sides AB and AC at Q and R, respectively, prove that + +$$ AQ = \frac{1}{2} (BC + CA + AB) $$ + +**Ans:** + +Correct Fig + +$$ AQ = \frac{1}{2} (2AQ) $$ + +$$ \frac{1}{2} $$ + +$$ = \frac{1}{2} (AQ + AQ) $$ + +$$ = \frac{1}{2} (AQ + AR) $$ + +$$ = \frac{1}{2} (AB + BQ + AC + CR) $$ + +$$ 1 $$ + +$$ = \frac{1}{2} (AB + BC + CA) $$ + +$$ 1 $$ + +$$ \therefore [BQ = BP, CR = CP] $$ + +29. The area of a circular play ground is $22176 \text{ cm}^2$. Find the cost of fencing this ground at the rate of 50 per metre. + +**Ans:** Let the radius of playground be r cm + +$$ \pi r^2 = 22176 \text{ cm}^2 $$ + +$$ r = 84 \text{ cm} $$ + +$$ \frac{22}{7} $$ + +Circumference = $2\pi r = 2 \times \frac{22}{7} \times 84 = 528 \text{ cm}$ + +$$ 1 $$ +---PAGE_BREAK--- + +Cost of fencing = $\frac{50}{100} \times 528 = 264$ + +30. + +If $2x + y = 23$ and $4x - y = 19$, find the value of $(5y - 2x)$ and $(\frac{y}{x} - 2)$ + +**Ans:** $2x + y = 23, 4x - y = 19$ +Solving, we get $x = 7, y = 9$ + +$5y - 2x = 31, \frac{y}{x} - 2 = \frac{-5}{7}$ + +1 + +1+1 + +$\frac{1}{2}+1\frac{1}{2}$ + +OR + +Solve for x: $\frac{1}{x+4} - \frac{1}{x+7} = \frac{11}{30}, x\# = -4, 7$ + +**Ans:** + +$$ +\begin{align*} +\frac{1}{x+4} - \frac{1}{x-7} &= \frac{11}{30} \\ +&\Rightarrow \frac{-11}{(x+4)(x-7)} = \frac{11}{30} +\end{align*} +$$ + +$$ +\Rightarrow x^2 - 3x + 2 = 0 +$$ + +$$ +\Rightarrow (x-2) (x-1) = 0 +$$ + +$$ +\Rightarrow x = 2, 1 +$$ + +The Following solution should also be accepted + +$$ +\begin{align*} +\frac{1}{x+4} - \frac{1}{x+7} &= \frac{11}{30} \\ +&\Rightarrow \frac{x+7-x-4}{(x+4)(x-7)} = \frac{11}{30} +\end{align*} +$$ + +$$ +\Rightarrow 11x^2 + 121x + 218 = 0 +$$ + +Here, D = 5049 + +$$ +x = \frac{-121 \pm \sqrt{5049}}{22} +$$ + +$\frac{1}{2}$ + +31. + +If the mid-point of the line segment joining the points A(3, 4) and B(k, 6) is P(x, y) and $x + y - 10 = 0$, find the value of k. + +**Ans:** + +$$ +A \left( \frac{\text{P}}{(3, 4)}, \left( \frac{\text{P}}{(x, y)}, \frac{\text{P}}{(K, 6)} \right) \right) +$$ + +$$ +x = \frac{3+k}{2} \quad y = 5 +$$ + +$$ +x + y - 10 = 0 \Rightarrow \frac{3+k}{2} + 5 - 10 = 0 +$$ + +$$ +\Rightarrow k = 7 +$$ + +OR + +Find the area of triangle ABC with A(1, -4) and the mid-points of sides through A being (2, -1) and (0, -1). + +**Ans:** B(3, 2), C(-1, 2) + +$$ +\text{Area} = \frac{1}{2} |(1(2-2) + 3(2+4) - 1(-4-2))| = 12 \text{ sq units} +$$ + +$\frac{1}{2}+1\frac{1}{2}$ + +$1+1$ +---PAGE_BREAK--- + +32. If in an A.P., the sum of first m terms is n and the sum of its first n terms is m, then prove that the sum of its first (m + n) terms is $-(m + n)$. + +**Ans:** +$S_m = n$ and $S_n = m$ + +$$2a + (m-1)d = \frac{2n}{m} \quad \dots(i) \qquad 2a + (n-1)d = \frac{2m}{n} \quad \dots(ii)$$ + +1 + +Solving (i) & (ii), $a = \frac{m^2+n^2+mn-n-m}{mn}$ & $d = \frac{-2(n-m)}{mn}$ + +1 + +$$S_{m+n} = \frac{m+n}{2} \left[ \frac{2 \times m^2 + n^2 + mn - n - m}{mn} \right] + (m+n-1) \left\{ \frac{-2(n+m)}{mn} \right\}$$ + +$$= (-1)(m+n)$$ + +1/2 +1/2 + +OR + +Find the sum of all 11 terms of an A.P. whose middle term is 30. + +**Ans:** +Middle term = $\left(\frac{11+1}{2}\right)^{\text{th}}$ term = $a_6 = 30$ + +1 + +$$S_{11} = \frac{11}{2}[2a + 10d]$$ + +$$= 11(a + 5d)$$ + +$$= 11 a_6 = 11 \times 30 = 330$$ + +1/2 +1/2 +1 + +33. A fast train takes 3 hours less than a slow train for a journey of 600 km. If the speed of the slow train is 10 km/h less than that of the fast train, find the speed of each train. + +**Ans:** +Let the speeds of fast train & slow train be x km/hr +& (x - 10) km/hr respectively. +A.T.Q. + +$$\frac{600}{x-10} - \frac{600}{x} = 3$$ + +$$x^2 - 10x - 2000 = 0$$ + +$$(x - 50)(x + 40) = 0$$ + +$x = 50$ or $-40$ + +Speed is always positive, So, $x = 50$ + +1/2 + +∴ Speed of fast train & slow train are 50 km/hr & 40 km/hr respectively. + +1/2 + +34. If $1 + \sin^2\theta = 3 \sin\theta \cos\theta$, prove that $\tan\theta = 1$ or $\frac{1}{2}$ + +**Ans:** +$$\frac{1+\sin^2\theta}{\cos^2\theta} = \frac{3\sin\theta \cdot \cos\theta}{\cos^2\theta} \text{ (Dividing both sides by } \cos^2\theta\text{)}$$ + +$$\sec^2\theta + \tan^2\theta = 3\tan\theta$$ + +$$(1 + \tan^2\theta) + \tan^2\theta = 3\tan\theta$$ + +$$2\tan^2\theta - 3\tan\theta + 1 = 0$$ + +$$(\tan\theta - 1)(2\tan\theta - 1) = 0$$ + +1/2 +1/2 +1/2 +1/2 +1/2 +---PAGE_BREAK--- + +$$ \tan \theta = 1 \text{ or } \frac{1}{2} $$ + +## SECTION - D + +**Question numbers 35 to 40 carry 4 marks each.** + +**35.** The mean of the following frequency distribution is 18. The frequency f in the class interval 19 – 21 is missing. Determine f. + +
Class interval11 - 1313 - 1515 - 1717 - 1919 - 2121 - 2323 - 25
Frequency36913f54
+ +**Ans:** +C.I +11-13 +13-15 +15-17 +17-19 +19-21 +21-23 +23-25 +f +3 +6 +9 +13 +f +5 +4 +x +12 +14 +16 +18 +20 +22 +24 +\underline{40+f} +xf +36 +84 +144 +234 +20f +110 +96 +\underline{704 + 20f} + +$$ \text{Mean} = \frac{\sum xf}{\sum f} \Rightarrow 18 = \frac{704+20f}{40+f} \Rightarrow f=8 $$ + +OR + +The following table gives production yield per hectare of wheat of 100 farms of a village : + +
Production yield40-4545-5050-5555-6060-6565-70
No. of farms4616203024
+ +Change the distribution to a 'more than' type distribution and draw its ogive. + +**Ans:** + +
Production yieldNumber of farms
More than or equal to 40100
More than or equal to 4596
More than or equal to 5090
More than or equal to 5574
More than or equal to 6054
More than or equal to 6524
+ +Plotting of points (40, 100) (45, 96) (50, 90) (55, 74) (60, 54) (65, 24) join to get ogive. + +$$ \tan \theta = 1 \text{ or } \frac{1}{2} $$ + +2 + +2 + +2 + +2 +---PAGE_BREAK--- + +**36.** Find the area of the shaded region in Fig. 8, if PQ = 24 cm, PR = 7 cm and O is the centre of the circle. + +Fig. 8 + +$$ +\begin{aligned} +\text{Ans: } \angle P = 90^\circ \text{ RQ} &= \sqrt{(24)^2 + 7^2} = 25 \text{ cm}, r = \frac{25}{2} \text{ cm} \\ +&= \frac{1}{2} \times \frac{22}{7} \times \left(\frac{25}{2}\right)^2 - 84 \\ +&= 161.54 \text{ cm}^2 +\end{aligned} +$$ + +OR + +Find the curved surface area of the frustum of a cone, the diameters of whose circular ends are 20 m and 6 m and its height is 24 m. + +$$ +\begin{array}{l} +\text{Ans: } R = 10 \text{ m} \quad r = 3 \text{ m} \quad h = 24 \text{ m} \\[1em] +l = \sqrt{(24)^2 + (10-3)^2} = 25 \text{ m} \\ +CSA = \pi(10 + 3)25 = 325 \pi \text{ m}^2 +\end{array} +$$ + +**37.** Prove that $\sqrt{5}$ is an irrational number. + +$$ +\begin{array}{l} +\text{Ans: Let } \sqrt{5} \text{ be a rational number.} \\ +\sqrt{5} = \frac{p}{q}, p \text{ & q are coprimes & } q \neq 0 \\ +5q^2 = p^2 \Rightarrow 5 \text{ divides } p^2 \Rightarrow 5 \text{ divides } p \text{ also Let } p = 5a, \text{ for some integer } a \\ +5q^2 = 25a^2 \Rightarrow q^2 = 5a^2 \Rightarrow 5 \text{ divides } q^2 \Rightarrow 5 \text{ divides } q \text{ also} \\ +\therefore 5 \text{ is a common factor of } p, q, \text{ which is not possible as } \\ +\text{p, q are coprimes.} \\ +\text{Hence assumption is wrong } \sqrt{5} \text{ is irrational no.} +\end{array} +$$ + +**38.** It can take 12 hours to fill a swimming pool using two pipes. If the pipe of larger diameter is used for four hours and the pipe of smaller diameter for 9 hours, only half of the pool can be filled. How long would it take for each pipe to fill the pool separately ? + +$$ +\begin{array}{l} +\text{Ans: Let time taken by pipe of larger diameter to fill the tank be x hr} \\ +\text{Let time taken by pipe of smaller diameter to fill the tank be y hr} \\ +\text{A.T.Q} \\ +\\ +\displaystyle \frac{1}{x} + \frac{1}{y} = \frac{1}{12}, \quad \frac{4}{x} + \frac{9}{y} = \frac{1}{2} \\ +\\ +\text{Solving we get } x = 20 \text{ hr } y = 30 \text{ hr} +\end{array} +$$ +---PAGE_BREAK--- + +**39.** Draw two tangents to a circle of radius 4 cm, which are inclined to each other at an angle of 60°. + +**Ans:** Correct construction of circle of radius 4 cm + +Correct construction of tangents + +OR + +Construct a triangle ABC with sides 3 cm, 4 cm and 5 cm. Now, construct another triangle whose sides are $\frac{4}{5}$ times the corresponding sides of ΔABC. + +**Ans:** Correct construction of triangle with sides 3 cm, 4 cm & 5 cm + +Correct construction of similar triangle + +**40.** The angle of elevation of the top of a building from the foot of a tower is 30° and the angle of elevation of the top of a tower from the foot of the building is 60°. If the tower is 50 m high, then find the height of the building. + +**Ans:** Correct figure +Let the height of building be h m + +$$ \text{In rt. } \triangle \text{BCD, } \tan 60^\circ = \frac{50}{BC} $$ + +$$ \Rightarrow BC = \frac{50}{\sqrt{3}} \quad \dots (i) $$ + +$$ \text{In rt. } \triangle \text{ABC, } \tan 30^\circ = \frac{h}{BC} $$ + +$$ \Rightarrow \quad \frac{1}{\sqrt{3}} = \frac{h}{50/\sqrt{3}} \quad (\text{from (i)}) $$ + +$$ \therefore h = \frac{50}{3} \text{ or } 16\frac{2}{3} \text{ or } 16.67 \text{ m} $$ +---PAGE_BREAK--- + +QUESTION PAPER CODE 30/2/3 +EXPECTED ANSWER/VALUE POINTS +SECTION - A + +Question numbers 1 to 10 are multiple choice questions of 1 mark each. + +You have to select the correct choice : + +Marks + +Q.No. + +1. The point P on x-axis equidistant from the points A(-1, 0) and B(5, 0) is + +(a) (2, 0) + +(b) (0, 2) + +(c) (3, 0) + +(d) (2, 2) + +Ans: (a) (2, 0) + +1 + +2. The co-ordinates of the point which is reflection of point (-3, 5) in x-axis are + +(a) (3, 5) + +(b) (3, -5) + +(c) (-3, -5) + +(d) (-3, 5) + +Ans: (c) (-3, -5) + +1 + +3. If the point P (6, 2) divides the line segment joining A(6, 5) and B(4, y) in the ratio 3 : 1, then the value of y is + +(a) 4 + +(b) 3 + +(c) 2 + +(d) 1 + +Ans: 1 mark be awarded to everyone + +1 + +4. The sum of exponents of prime factors in the prime-factorisation of 196 is + +(a) 3 + +(b) 4 + +(c) 5 + +(d) 2 + +Ans: (b) 4 + +1 + +5. Euclid's division Lemma states that for two positive integers a and b, there exists unique integer q and r satisfying $a = bq + r$, and + +(a) $0 < r < b$ + +(b) $0 < r \leq b$ + +(c) $0 \leq r < b$ + +(d) $0 \leq r \leq b$ + +Ans: (c) $0 \leq r < b$ + +1 + +6. The zeroes of the polynomial $x^2 - 3x - m(m+3)$ are + +(a) m, m + 3 + +(b) -m, m + 3 + +(c) m, -(m + 3) + +(d) -m, -(m + 3) + +Ans: (b) -m, m + 3 + +1 + +7. The value of k for which the system of linear equations $x + 2y = 3$, $5x + ky + 7 = 0$ is inconsistent is + +(a) $-\frac{14}{3}$ + +(b) $\frac{2}{5}$ + +(c) 5 + +(d) 10 + +Ans: (d) 10 + +1 + +8. The roots of the quadratic equation $x^2 - 0.04 = 0$ are + +(a) $\pm 0.2$ + +(b) $\pm 0.02$ + +(c) 0.4 + +(d) 2 + +Ans: (a) $\pm 0.2$ + +1 + +9. The common difference of the A.P. $\frac{1}{p}$, $\frac{1-p}{p}$, $\frac{1-2p}{p}$, ... is + +(a) 1 + +(b) $\frac{1}{p}$ + +(c) -1 + +(d) $-\frac{1}{p}$ + +Ans: (c) -1 + +1 +---PAGE_BREAK--- + +10. The $n^{th}$ term of the A.P. a, 3a, 5a, ... is + +(a) na + +(b) (2n - 1)a + +(c) (2n + 1) a + +(d) 2na + +**Ans:** (b) (2n - 1)a + +1 + +In Q. Nos. 11 to 15, fill in the blanks. Each question is of 1 mark. + +11. In Fig. 1, the angles of depressions from the observing positions O₁ and O₂ respectively of the object A are __________, _________. + +Fig. 1 + +**Ans:** 30°, 45° + +$\frac{1}{2} + \frac{1}{2}$ + +12. In $\triangle ABC$, AB = $6\sqrt{3}$ cm, AC = 12 cm and BC = 6 cm, then $\angle B = $ ________. + +**Ans:** 90° + +OR + +Two triangles are similar if their corresponding sides are ________. + +**Ans:** proportional + +1 + +1 + +13. In given Fig. 2, the length PB = _______ cm. + +Fig. 2 + +**Ans:** 4 + +1 + +14. In Fig. 3, MN || BC and AM : MB = 1 : 2, then $\frac{ar(\triangle AMN)}{ar(\triangle ABC)} = $ ________. + +Fig. 3 + +**Ans:** $\frac{1}{9}$ + +1 + +15. The value of sin 32° cos 58° + cos 32° sin 58° is + +**Ans:** 1 + +1 +---PAGE_BREAK--- + +OR + +The value of $\frac{\tan 35^\circ}{\tan 55^\circ} + \frac{\cot 78^\circ}{\tan 12^\circ}$ is ______. + +**Ans:** 2 + +1 + +Q. Nos. 16 to 20 are short answer type questions of 1 mark each. + +16. A die is thrown once. What is the probability of getting a prime number. + +**Ans:** Number of prime numbers = 3 i.e. {2, 3, 5} + +$\text{P(Prime Number)} = \frac{3}{6} \text{ or } \frac{1}{2}$ + +1/2 + +1/2 + +17. If a number x is chosen at random from the numbers -3, -2, -1, 0, 1, 2, 3, then find the probability of $x^2 < 4$. + +**Ans:** Number of Favourable outcomes = 3 i.e., {-1, 0, 1} $\therefore P(x^2 < 4) = \frac{3}{7}$ + +1/2+1/2 + +OR + +What is the probability that a randomly taken leap year has 52 Sundays ? + +**Ans:** $P(52 \text{ Sunday}) = \frac{5}{7}$ + +1 + +18. If $\sin A + \sin^2 A = 1$, then find the value of the expression ($\cos^2 A + \cos^4 A$). + +**Ans:** +$$ +\begin{cases} +\sin A = 1 - \sin^2 A \\ +\sin A = \cos^2 A +\end{cases} +\text{ } +\begin{array}{l} +\cos^2 A + \cos^4 A = \sin^2 A + \sin^2 A = 1 +\end{array} +$$ + +1/2 + +1/2 + +19. Find the area of the sector of a circle of radius 6 cm whose central angle is 30°. +(Take $\pi = 3.14$) + +**Ans:** Area = $3.14 \times (6)^2 \times \frac{30^\circ}{360^\circ}$ += $9.42 \text{ cm}^2$ + +1/2 + +1/2 + +20. Find the class marks of the classes 20 – 50 and 35 – 60. + +**Ans:** +$$ \frac{20+50}{2} = 35 $$ + +$$ \frac{35+60}{2} = 47.5 $$ + +1/2 + +1/2 + +SECTION - B + +Q. Nos. 21 to 26 carry 2 marks each. + +21. A teacher asked 10 of his students to write a polynomial in one variable on a paper and then to handover the paper. The following were the answers given by the students: + +$2x + 3$, $3x^2 + 7x + 2$, $4x^3 + 3x^2 + 2$, $x^3 + \sqrt{3x} + 7$, $7x + \sqrt{7}$, $5x^3 - 7x + 2$, +$2x^2 + 3 - \frac{5}{x}$, $5x - \frac{1}{2}$, $ax^3 + bx^2 + cx + d$, $x + \frac{1}{x}$ +---PAGE_BREAK--- + +Answer the following questions : + +(i) How many of the above ten, are not polynomials ? + +(ii) How many of the above ten, are quadratic polynomials ? + +**Ans:** (i) 3 + +(ii) 1 + +1 + +1 + +22. A child has a die whose six faces show the letters as shown below : + +The die is thrown once. What is the probability of getting (i) A, (ii) D ? + +**Ans:** (i) $P(A) = \frac{2}{6}$ or $\frac{1}{3}$ + +(ii) $P(D) = \frac{1}{6}$ + +1+1 + +23. In Fig. 4, ABC and DBC are two triangles on the same base BC. If AD intersects BC at O, show that + +$$\frac{ar(\Delta ABC)}{ar(\Delta DBC)} = \frac{AO}{DO}$$ + +Fig. 4 + +**Ans:** + +Draw $AX \perp BC$, $DY \perp BC$ +$\Delta AOX \sim \Delta DOY$ + +$$\frac{AX}{DY} = \frac{AO}{DO} \quad \dots(i)$$ + +$$\frac{ar(\triangle ABC)}{ar(\triangle DBC)} = \frac{\frac{1}{2} \times BC \times AX}{\frac{1}{2} \times BC \times DY}$$ + +$$\frac{AX}{DY} = \frac{AO}{DO} \text{ (From (i))}$$ + +OR + +In Fig. 5, if $AD \perp BC$, then prove that $AB^2 + CD^2 = BD^2 + AC^2$. + +**Ans:** +In rt $\triangle ABD$ $AB^2 = BD^2 + AD^2$ ... (i) +In rt $\triangle ADC$ $CD^2 = AC^2 - AD^2$ ... (ii) +Adding (i) & (ii) +$$AB^2 + CD^2 = BD^2 + AC^2$$ + +1/2 + +1/2 + +1/2 + +1/2 + +1/2 + +1 +---PAGE_BREAK--- + +24. + +Prove that $1 + \frac{\cot^2 \alpha}{1 + \cos \alpha} = \cos \alpha$ +---PAGE_BREAK--- + +**Ans:** + +Correct Fig + +$$ \begin{aligned} \text{AQ} &= \frac{1}{2} (2\text{AQ}) \\ &= \frac{1}{2} (\text{AQ} + \text{AQ}) \\ &= \frac{1}{2} (\text{AQ} + \text{AR}) \\ &= \frac{1}{2} (\text{AB} + \text{BQ} + \text{AC} + \text{CR}) \\ &= \frac{1}{2} (\text{AB} + \text{BC} + \text{CA}) \\ &\therefore [\text{BQ} = \text{BP}, \text{CR} = \text{CP}] \end{aligned} $$ + +1/2 + +1/2 + +1 + +1 + +28. The area of a circular play ground is 22176 cm². Find the cost of fencing this ground at the rate of 50 per metre. + +**Ans:** Let the radius of playground be r cm + +$$ \begin{aligned} \pi r^2 &= 22176 \text{ cm}^2 \\ r &= 84 \text{ cm} \end{aligned} $$ + +1 + +Circumference = $2\pi r = 2 \times \frac{22}{7} \times 84 = 528$ cm + +1 + +Cost of fencing = $\frac{50}{100} \times 528 = 264$ + +1 + +29. If the mid-point of the line segment joining the points A(3, 4) and B(k, 6) is P(x, y) and x + y - 10 = 0, find the value of k. + +**Ans:** + +$$ A = \frac{\vert A - C \vert}{\sqrt{(x - 4)^2 + (y - 6)^2}} $$ + +$$ x = \frac{3+k}{2}, \quad y=5 $$ + +$$ x+y-10=0 \Rightarrow \frac{3+k}{2}+5-10=0 $$ + +$$ \Rightarrow k=7 $$ + +OR + +Find the area of triangle ABC with A(1, -4) and the mid-points of sides through A being (2, -1) and (0, -1). + +**Ans:** B(3, 2), C(-1, 2) + +$$ \text{Area} = \frac{1}{2} |1(2-2)+3(2+4)-1(-4-2)| = 12 \text{ sq units} $$ + +1/2+1/2 + +1 + +1 + +1/2+1/2 + +1+1 +---PAGE_BREAK--- + +30. In Fig. 6, if $\triangle ABC \sim \triangle DEF$ and their sides of lengths (in cm) are marked along them, then find the lengths of sides of each triangle. + +Fig. 6 + +**Ans:** As $\triangle ABC \sim \triangle DEF$ + +$$ \frac{2x-1}{18} = \frac{3x}{6x} $$ + +$$ x = 5 $$ + +$$ AB = 9 \text{ cm} $$ + +DE = 18 cm + +BC = 12 cm + +EF = 24 cm + +CA = 15 cm + +FD = 30 cm + +$$ \frac{1}{12+1/2} $$ + +31. If $2x + y = 23$ and $4x - y = 19$, find the value of $(5y - 2x)$ and $(\frac{y}{x} - 2)$ + +**Ans:** $2x + y = 23$, $4x - y = 19$ + +Solving, we get $x = 7$, $y = 9$ + +$$ 5y - 2x = 31, \quad \frac{y}{x} - 2 = \frac{-5}{7} $$ + +OR + +Solve for $x$: $\frac{1}{x+4} - \frac{1}{x+7} = \frac{11}{30}$, $x\# = -4, 7$ + +**Ans:** + +$$ \begin{aligned} \frac{1}{x+4} - \frac{1}{x-7} &= \frac{11}{30} \\ &\Rightarrow \frac{-11}{(x+4)(x-7)} = \frac{11}{30} \\ &\Rightarrow x^2 - 3x + 2 = 0 \\ &\Rightarrow (x-2)(x-1) = 0 \\ &\Rightarrow x = 2, 1 \end{aligned} $$ + +The Following solution should also be accepted + +$$ \begin{aligned} \frac{1}{x+4} - \frac{1}{x+7} &= \frac{11}{30} \\ &\Rightarrow \frac{x+7-x-4}{(x+4)(x-7)} = \frac{11}{30} \\ &\Rightarrow 11x^2 + 121x + 218 = 0 \end{aligned} $$ + +Here, D = 5049 + +$$ x = \frac{-121 \pm \sqrt{5049}}{22} $$ + +$$ \frac{1}{1+1/2} $$ + +$$ \frac{1}{2} $$ +---PAGE_BREAK--- + +**32.** Which term of the A.P. 20,19$\frac{1}{4}$,18$\frac{1}{2}$,17$\frac{3}{4}$... is the first negative term. + +$$ \text{Ans: } a = 20 \text{ & } d = 19\frac{1}{4} - 20 = -\frac{3}{4} $$ + +$$ a_n < 0 $$ + +$$ 20 + (n-1)\left(-\frac{3}{4}\right) < 0 $$ + +$$ n > 27\frac{2}{3} $$ + +∴ 28th term of the given A. P. is first negative term + +OR + +Find the middle term of the A.P. 7, 13, 19, ..., 247. + +$$ \text{Ans: } a = 7 \text{ & } d = 13 - 7 = 6 $$ + +$$ 247 = 7 + (n - 1)6 $$ + +$$ n = 41 $$ + +$$ \text{Middle term} = \left(\frac{41+1}{2}\right)^{\text{th}} = 21^{\text{st}} \text{ term.} $$ + +$$ a_{21} = 7 + 20 \times 6 = 127 $$ + +**33.** Water in a canal, 6 m wide and 1.5 m deep, is flowing with a speed of 10 km/h. +How much area will it irrigate in 30 minutes, if 8 cm standing water is +required ? + +$$ \text{Ans: Volume of water in canal in 1 hr} = 10000 \times 6 \times 1.5 = 90000 \text{ m}^3 $$ + +$$ \text{Volume of water in canal in 30 mins} = \frac{1}{2} \times 90000 = 45000 \text{ m}^3 $$ + +$$ \begin{aligned} \text{Area} &= \frac{45000}{8/100} \\ &= 562500 \text{ m}^2 \end{aligned} $$ + +**34.** Show that : + +$$ \frac{\cos^2(45^\circ + \theta) + \cos^2(45^\circ - \theta)}{\tan(60^\circ + \theta) \tan(30^\circ - \theta)} = 1 $$ + +$$ \text{Ans: L.H.S} = \frac{\cos^2(45^\circ + \theta) + \sin^2(90^\circ - 45^\circ + \theta)}{\tan(60^\circ + \theta) \cdot \cot(90^\circ - 30^\circ + \theta)} $$ + +$$ = \frac{\cos^2(45^\circ + \theta) + \sin^2(45^\circ + \theta)}{\tan(60^\circ + \theta) \cdot \cot(60^\circ + \theta)} $$ + +$$ = \frac{1}{1} = 1 = R.H.S $$ +---PAGE_BREAK--- + +SECTION - D + +Question numbers 35 to 40 carry 4 marks each. + +35. The mean of the following frequency distribution is 18. The frequency f in the class interval 19 – 21 is missing. Determine f. + +
Class interval11 - 1313 - 1515 - 1717 - 1919 - 2121 - 2323 - 25
Frequency36913f54
+ +**Ans:** + +C.I + +f + +x + +xf + +11-13 + +3 + +12 + +36 + +13-15 + +6 + +14 + +84 + +15-17 + +9 + +16 + +144 + +17-19 + +13 + +18 + +234 + +19-21 + +f + +20 + +20f + +21-23 + +5 + +22 + +110 + +23-25 + +$\frac{4}{40+f}$ + +24 + +96 +--- +$704 + 20f$ + +$$ \text{Mean} = \frac{\sum xf}{\sum f} \Rightarrow 18 = \frac{704+20f}{40+f} \Rightarrow f=8 $$ + +OR + +The following table gives production yield per hectare of wheat of 100 farms of a village : + +
Production yield40-4545-5050-5555-6060-6565-70
No. of farms4616203024
+ +Change the distribution to a 'more than' type distribution and draw its ogive. + +**Ans:** + +
Production yieldNumber of farms
More than or equal to 40100
More than or equal to 4596
More than or equal to 5090
More than or equal to 5574
More than or equal to 6054
More than or equal to 6524
+ +Plotting of points (40, 100) (45, 96) (50, 90) (55, 74) (60, 54) (65, 24) join to get ogive. + +2 + +2 + +36. From a point on the ground, the angles of elevation of the bottom and the top of a tower fixed at the top of a 20 m high building are 45° and 60° respectively. Find the height of the tower. + +**Ans:** Let height of tower = h m +---PAGE_BREAK--- + +In rt. $\triangle BCD \tan 45° = \frac{BC}{CD}$ + +$$ +\left. +\begin{array}{l} +1 = \frac{20}{CD} \\ +CD = 20 \text{ m} +\end{array} +\right\} +$$ + +In rt. $\triangle ACD \tan 60° = \frac{AC}{CD}$ + +$$ +\sqrt{3} = \frac{20 + h}{20} +$$ + +$$ +h = 20(\sqrt{3}-1)m +$$ + +corr fig. 1 + +1 + +1 + +1 + +1 + +37. It can take 12 hours to fill a swimming pool using two pipes. If the pipe of larger diameter is used for four hours and the pipe of smaller diameter for 9 hours, only half of the pool can be filled. How long would it take for each pipe to fill the pool separately ? + +Ans: Let time taken by pipe of larger diameter to fill the tank be x hr +Let time taken by pipe of smaller diameter to fill the tank be y hr + +A.T.Q + +$$ +\frac{1}{x} + \frac{1}{y} = \frac{1}{12}, \quad \frac{4}{x} + \frac{9}{y} = \frac{1}{2} +$$ + +Solving we get x = 20 hr y = 30 hr + +1+1 + +1+1 + +38. Prove that $\sqrt{5}$ is an irrational number. + +Ans: Let $\sqrt{5}$ be a rational number. + +$$ +\sqrt{5} = \frac{p}{q}, p \text{ & q are coprimes & } q \neq 0 +$$ + +1 + +$5q^2 = p^2 \Rightarrow 5$ divides $p^2 \Rightarrow 5$ divides $p$ also Let $p = 5a$, for some integer $a$ + +1 + +$5q^2 = 25a^2 \Rightarrow q^2 = 5a^2 \Rightarrow 5$ divides $q^2 \Rightarrow 5$ divides $q$ also + +1 + +∴ 5 is a common factor of p, q, which is not possible as p, q are coprimes. + +Hence assumption is wrong $\sqrt{5}$ is irrational no. + +1 + +39. Draw a circle of radius 3.5 cm. From a point P, 6 cm from its centre, draw two tangents to the circle. + +Ans: Correct construction of circle of radius 3.5 cm + +Correct construction of tangents. + +OR + +Construct a $\triangle ABC$ with AB = 6 cm, BC = 5 cm and $\angle B = 60°$. + +Now construct another triangle whose sides are $\frac{2}{3}$ times the corresponding sides of $\triangle ABC$. +---PAGE_BREAK--- + +**Ans:** Correct construction of given triangle +Construction of Similar triangle + +1 + +3 + +40. A solid is in the shape of a hemisphere surmounted by a cone. If the radius of hemisphere and base radius of cone is 7 cm and height of cone is 3.5 cm, find the volume of the solid. + +$$ \left(\text{Take } \pi = \frac{22}{7}\right) $$ + +**Ans:** + +$$ +\begin{aligned} +& \text{Volume of solid} = \frac{1}{3} \times \frac{22}{7} \times (7)^2 \times 3.5 + \frac{2}{3} \times \frac{22}{7} \times (7)^3 \\ +&= \frac{22}{7} \times (7)^2 \times \left[ \frac{3.5}{3} + \frac{2}{3} \times 7 \right] \\ +&= 898\frac{1}{3} \text{ or } 898.33 \text{ cm}^3 +\end{aligned} +$$ + +2 + +1 + +1 \ No newline at end of file diff --git a/samples_new/texts_merged/692782.md b/samples_new/texts_merged/692782.md new file mode 100644 index 0000000000000000000000000000000000000000..8e000c825bf9e5ec3ce858af1de2617be81121ad --- /dev/null +++ b/samples_new/texts_merged/692782.md @@ -0,0 +1,220 @@ + +---PAGE_BREAK--- + +# Propagation with time-dependent Hamiltonian + +Gang Huang¹ + +¹Johannes Gutenberg University of Mainz + +July 16, 2020 + +## Abstract + +In this note, we introduce one basic concept in nonlinear optical spectroscopy: time-dependent Hamiltonian. Then we give one example of application of the time evolution operator. + +APS/123-QED + +Institute for Physics, Johannes Gutenberg University, Mainz, Germany gang@uni-mainz.de + +In optical spectroscopy, the choice we face is: (1) working with a time-independent Hamiltonian in a larger phase space that includes the matter and the radiation field (Shaul Mukamel, 1995); (2) using a time-dependent Hamiltonian in a smaller phase space of the matter alone. + +For any vector $|\psi\rangle$ in Hilbert space, its dynamical equation is the time-dependent Schrodinger equation: + +$$i\hbar \frac{\partial |\psi(t)\rangle}{\partial t} = \mathbf{H} |\psi(t)\rangle. \quad (1)$$ + +Since + +$$|\psi(t)\rangle = \sum_l |f_l\rangle \langle f_l|\psi(t)\rangle, \quad (2)$$ + +and + +$$\mathbf{H}|f_l\rangle = E_l|f_l\rangle, \quad (3)$$ + +we have + +$$i\hbar \frac{\partial}{\partial t} \langle f_l |\psi(t)\rangle = E_l \langle f_l |\psi(t)\rangle,$$ + +which is + +$$i\hbar \frac{\partial}{\partial t} c_l = E_l c_l,$$ + +or + +$$\mathbf{H}\mathbf{c} = \mathbf{E}\mathbf{c}. \quad (4)$$ + +We obtain the wave function at time $t$: + +$$\langle f_l | \psi(t) \rangle = e^{-\frac{i E_l (t-t_0)}{\hbar}} \langle f_l | \psi(t_0) \rangle, \quad (5)$$ +---PAGE_BREAK--- + +where $\langle f_l | \psi(t_0) \rangle$ is the initial expansion coefficients of the wavefunction. We then have + +$$ |\psi(t)\rangle = \sum_l e^{-\frac{iE_l(t-t_0)}{\hbar}} |f_l\rangle \langle f_l|\psi(t_0)\rangle, \quad (6) $$ + +Therefore, the evolution operator $U(t, t_0)$ can be defined as: + +$$ |\psi(t)\rangle \equiv U(t, t_0)|\psi(t_0)\rangle, $$ + +or + +$$ U(t, t_0) = \sum_l |f_l\rangle e^{-\frac{iE_l(t-t_0)}{\hbar}} \langle f_l|. \quad (7) $$ + +It is immediately follows that + +$$ U(t_0, t_0) = 1. \quad (8) $$ + +The eq. 7 gives the evolution operator in a specific representation, i.e., the eigenstates of the Hamiltonian **H**. + +Here is one example of application of the time evolution operator. Calculate the time evolution operator of a coupled 2-level system ($|\psi_a\rangle$ and $|\psi_b\rangle$) with energies $\epsilon_a$, $\epsilon_b$, and a coupling $V_{ab}$, represented by the Hamiltonian + +$$ \begin{bmatrix} \epsilon_a & V_{ab} \\ V_{ba} & \epsilon_b \end{bmatrix}. $$ + +Solution: Denote + +$$ V_{ab} = V_{ba}^* = |V_{ab}|e^{-i\chi}(0 < \chi < \pi/2). \quad (9) $$ + +Denote $\lambda$ as the eigenvalue of the energy, solve the JiuQi equation + +$$ (\epsilon_a - \lambda)(\epsilon_b - \lambda) - |V_{ab}|^2 = 0, \quad (10) $$ + +we get the eigenvalue of the energy: $\lambda_{\pm} = \frac{(\epsilon_a + \epsilon_b) \pm \sqrt{(\epsilon_a - \epsilon_b)^2 + 4|V_{ab}|^2}}{2}$. Then the eigenstates can be calculated. +For $\lambda = \lambda_-$, + +$$ (\epsilon_b - \lambda_-)b = -V_{ab}e^{i\chi}a, $$ +---PAGE_BREAK--- + +$$ +(11) +$$ + +i.e., + +$$ +\begin{align*} +\frac{b}{a} &= \frac{-|V_{ab}|e^{i\chi}}{\epsilon_b - \lambda_{-}} \\ +&= \frac{-2|V_{ab}|e^{i\chi}}{(\epsilon_b - \epsilon_a) + \sqrt{(\epsilon_a - \epsilon_b)^2 + 4|V_{ab}|^2}} \\ +&= \frac{-2|V_{ab}|e^{i\chi}/(\epsilon_a - \epsilon_b)}{-1 + \sqrt{1 + \frac{4|V_{ab}|^2}{(\epsilon_a - \epsilon_b)^2}}} \\ +&= \frac{-\tan 2\theta}{-1 + \sec 2\theta} e^{i\chi} \\ +&= -\frac{\cos\theta}{\sin\theta} e^{i\chi}, +\end{align*} +$$ + +where we have set + +$$ +\tan 2\theta \equiv \frac{2|V_{ab}|}{\epsilon_a - \epsilon_b}, \quad 0 < \theta < \frac{\pi}{2}. \tag{12} +$$ + +Therefore, + +$$ +|\psi_-\rangle = \left[ \begin{array}{c} -\sin\theta e^{-i\chi/2} \\ \cos\theta e^{i\chi/2} \end{array} \right]. \qquad (13) +$$ + +Similarly, replace $\lambda_-$ by $\lambda_+$, we can obtain + +$$ +|\psi_+\rangle = \left[ \begin{array}{c} \cos\theta e^{-i\chi/2} \\ \sin\theta e^{i\chi/2} \end{array} \right]. +$$ + +$$ +(14) +$$ + +Thus, from eq. 7, the time evolution operator is + +$$ +U(t, t_0) = |\psi_+\rangle\langle\psi_+|e^{-\frac{i}{\hbar}\lambda_+(t-t_0)} + |\psi_-\rangle\langle\psi_-|e^{-\frac{i}{\hbar}\lambda_-(t-t_0)}. \quad (15) +$$ + +Using eq. (13) and (14), we obtain the exprssion of $U(t, t_0)$: + +$$ +U(t, t_0) = +\begin{bmatrix} +\cos^2\theta & \cos\theta\sin\theta e^{-i\chi} \\ +\cos\theta\sin\theta e^{i\chi} & \sin^2\theta +\end{bmatrix} +e^{-\frac{i}{\hbar}\lambda_{+}(t-t_0)} + +\\ ++ +\\ +\begin{bmatrix} +\sin^2\theta & -\cos\theta\sin\theta e^{-i\chi} \\ +-\cos\theta\sin\theta e^{i\chi} & \cos^2\theta +\end{bmatrix} +e^{-\frac{i}{\hbar}\lambda_{-}(t-t_0)}. +\tag{16} +$$ + +Discussion: suppose the system is initially (at time $t_0$ = 0) in the $|\phi_a\rangle$ state, i.e., $|\psi(0)\rangle = |\phi_a\rangle$. We can calculate the probability of the system to be found in the $|\phi_b\rangle$ state at time $t$ +---PAGE_BREAK--- + +$$ +\begin{align} +P_{ba}(t) &= |\langle \phi_b | \psi(t) \rangle|^2 \tag{17} \\ +&= |\langle \phi_b | U(t, t_0) \psi(0) \rangle|^2 \nonumber \\ +&= |\langle \phi_b | U(t, t_0) | \phi_a \rangle|^2 \nonumber +\end{align} +$$ + +$$ +(18) +$$ + +Since + +$$ +\langle \phi_b | U(t, t_0) | \phi_a \rangle = +$$ + +$$ +\begin{bmatrix} 0 & 1 \end{bmatrix} +\begin{bmatrix} U_{aa}(t) & U_{ab}(t) \\ U_{ba}(t) & U_{bb}(t) \end{bmatrix} +\begin{bmatrix} 1 \\ 0 \end{bmatrix} +$$ + +$$ +\begin{align*} +&= U_{ba}(t) \\ +&= \sin\theta\cos\theta e^{i\chi} e^{-\frac{i}{\hbar}\lambda_{+}(t-t_0)} - \\ +&\quad - \sin\theta\cos\theta e^{i\chi} e^{-\frac{i}{\hbar}\lambda_{-}(t-t_0)} \\ +&= \sin 2\theta e^{i\chi} \frac{2(\cos\frac{\lambda_{+}(t-t_0)}{\hbar} - i\sin\frac{\lambda_{+}(t-t_0)}{\hbar})}{2} - \\ +&\quad - \cos\lambda_{-}(t-t_0) \frac{\lambda_{-}(t-t_0)}{\hbar + i\sin\frac{\lambda_{-}(t-t_0)}{\hbar}} \\ +&= \sin 2\theta e^{i\chi} \frac{2 \times 2 i \sin\beta(\cos\alpha - i \sin\alpha)}{2} \\ +&= i \sin 2\theta e^{i(\chi - \alpha)} \sin\beta, \quad (13) \text{ where we have defined} +\end{align*} +$$ + +$$ +\alpha = \frac{(\epsilon_a + \epsilon_b)(t - t_0)}{2\hbar}, \beta = \frac{\sqrt{(\epsilon_a - \epsilon_b)^2 + 4|V_{ab}|^2}(t - t_0)}{2\hbar}. +$$ + +$$ +(14) +$$ + +So + +$$ +\begin{align} +|\langle \phi_b | U(t, t_0) | \phi_a \rangle|^2 &= \sin^2 2\theta \sin^2 \beta \nonumber \\ +&= \frac{4|V_{ab}|^2}{\sqrt{(\epsilon_a - \epsilon_b)^2 + 4|V_{ab}|^2}} \sin^2 \frac{\sqrt{(\epsilon_a - \epsilon_b)^2 + 4|V_{ab}|^2}(t-t_0)}{2\hbar}. \tag{15} +\end{align} +$$ + +This is known as Rabi formula and + +$$ +\Omega_R \equiv \frac{\sqrt{(\epsilon_a - \epsilon_b)^2 + 4|V_{ab}|^2}}{\hbar} \qquad (16) +$$ +---PAGE_BREAK--- + +is known as Rabi frequency. For example, in the case of alkali atoms, the order of magnitude of the Rabi frequency is MHz. We assume that $(\epsilon_a - \epsilon_b)^2$ and $4|V_{ab}|^2$ have the same order of magnitude, i.e., + +$$ \frac{4|V_{ab}|^2}{\sqrt{(\epsilon_a - \epsilon_b)^2 + 4|V_{ab}|^2}} \sim \sqrt{(\epsilon_a - \epsilon_b)^2 + 4|V_{ab}|^2} \approx 10^6. $$ + +## References + +(1995). \ No newline at end of file diff --git a/samples_new/texts_merged/7081601.md b/samples_new/texts_merged/7081601.md new file mode 100644 index 0000000000000000000000000000000000000000..ac196320060de82ae699faa87889eee1b1784ee0 --- /dev/null +++ b/samples_new/texts_merged/7081601.md @@ -0,0 +1,995 @@ + +---PAGE_BREAK--- + +# A Systolic Design Methodology with Application to Full-Search Block-Matching Architectures + +YEN-KUANG CHEN AND S.Y. KUNG + +Princeton University + +Received May 21, 1997; Revised November 5, 1997 + +**Abstract.** We present a systematic methodology to support the design tradeoffs of array processors in several emerging issues, such as (1) high performance and high flexibility, (2) low cost, low power, (3) efficient memory usage, and (4) system-on-a-chip or the ease of system integration. This methodology is algebraic based, so it can cope with high-dimensional data dependence. The methodology consists of some transformation rules of data dependency graphs for facilitating flexible array designs. For example, two common partitioning approaches, LPGS and LSGP, could be unified under the methodology. It supports the design of high-speed and massively parallel processor arrays with efficient memory usage. More specifically, it leads to a novel *systolic cache* architecture comprising of shift registers only (cache without tags). To demonstrate how the methodology works, we have presented several systolic design examples based on the block-matching motion estimation algorithm (BMA). By multiprojecting a 4D DG of the BMA to 2D mesh, we can reconstruct several existing array processors. By multiprojecting a 6D DG of the BMA, a novel 2D systolic array can be derived that features significantly improved rates in data reusability (96%) and processor utilization (99%). + +## 1. Introduction + +The rapid progress in VLSI technology will soon reach more than 100 million transistors in a chip, implying tremendous computation power for many applications, e.g., real-time multimedia processing. Many important design issues emerge for the hardware design for these applications: + +1. High performance and high flexibility + +2. Low cost, low power, and efficient memory usage + +3. System-on-a-chip or the ease of system integration + +4. Fast design turn-around + +The challenge is that many of these design issues dis- +cord with each other. + +In addressing these critical issues, we present a sys- +tematic methodology to support the design of a broad +scope of array processors. This allows us to design and +evaluate diverse designs easily and quickly. This alge- +braic methodology can handle algorithms with high- + +dimensional data dependency. It can exploit a high +degree of data reusability and thus it can design high +performance processor arrays with high efficiency in +memory usage. + +In this paper, we focus on the block-matching motion +estimation algorithm (BMA) [6] as an example. The +basic idea of the BMA is to locate a displaced block, +which is most similar to the current block, within the +search area in the previous frame as shown in Fig. 1. +Various criteria have been presented for the BMA. The +most popular one is to find the least sum of the absolute +difference (SAD) as + +$$ \text{Motion Vector} = \arg \min_{[u,v]} \{SAD[u, v]\} $$ + +$$ SAD[u, v] = \sum_{i=1}^{n} \sum_{j=1}^{n} \left| s[i+u, j+v] - r[i, j] \right| $$ + +$$ -p \leq u \leq p, -p \leq v \leq p $$ + +where *n* is the block width and height, *p* is the absolute value of the maximum possible vertical/horizontal motion, *r*[i,j] is the pixel intensity (luminance value) +---PAGE_BREAK--- + +Fig. 1. In the process of the block-matching motion estimation algorithm, the current frame is divided into a number of non-overlapping current blocks, which are *n* pixels × *n* pixels. Each of the current blocks will be compared with (2*p* + 1) × (2*p* + 1) different displaced blocks in the search area of the previous frame. + +in the current block at (i, j), s[i+u, j+v] is the pixel +intensity in the search area in the previous frame, and +(u, v) represents the candidate displacement vector. + +The BMA is extremely computationally intensive in +current video coding [7, 15]. For example, a SAD +for a block of 16 × 16 pixels requires 512 additions. +For search range {−32, ..., +32} × {−32, ..., +32}, +there are 4225 SADs, and hence, 2.16 × 10⁶ additions. +For a video with 720 pixels × 480 pixels × 30 frames +per second, 88 × 10⁹ additions per second would be +required for a real-time MPEG-1 video coding. In or- +der to tackle such a computationally demanding prob- +lem in real-time, putting massively parallel processing +elements (PEs) together as a computing engine, like +systolic array, is often mandatory. + +Such fully utilized processing power can process a +tremendous amount of data. In the example, each pixel +in the previous frame will be revisited thousands of +times. If each visit involves a memory fetch, it would +imply an extremely short memory read cycle time (32 +ps) for real-time motion estimation of CCIR 601 pic- +tures. So far, state-of-the-art memories are far beyond +such demand. In order to make the data flow keep up +with the processing power, memory access localities +must be exploited. Particularly, data reusability plays + +a critical role in the systolic design of many important +applications. + +In order to find a good tradeoff point between several +conflicting design goals, a systematic/comprehensive +design methodology must be used. Since most multi- +media signal processing algorithms have the following +features: localized operations, intensive computation, +and matrix operation, high-level mapping methodolo- +gies are proving very efficient. (For the reader's conve- +nience, in the Appendices, we review the basic systolic +design notations and methodology.) + +**1.1. Previous Approaches for Systolic BMA Design** + +Because the BMA for a single current block is a 4- +dimensional algorithm (as shown in Appendix A.1), it +is impossible to get a 2D or 1D system implementa- +tion by one projection. Conventionally, the BMA is +decomposed into subparts, which (1) are individually +defined over index spaces with dimensions less than +or equal to three and (2) are suitable to perform the +canonical projection. The functional decomposition +method simplifies the multi-dimensional time sched- +ule and projection problem [5, 10, 16, 20]. For exam- +ple, one such decomposition is to take *u* out first and +consider it later as follows: + +$$ +\begin{equation} +\begin{aligned} +SAD[v] = & \sum_{i=1}^{n} \sum_{j=1}^{n} |s[i, j + v] - r[i, j]| \\ +& - p \le v \le p +\end{aligned} +\end{equation} +$$ + +As a result, we can get several existing DGs as shown +in Fig. 2. + +There are many arrays in [10, 16] that can be derived by canonical projecting of the 3D DG shown in Fig. 2. However, most of the designs require a huge amount of memory bandwidth. For example, the design shown in Fig. 3(a) can be derived by projecting the DG in Fig. 2 along the *v*-direction. This design needs 16 byte data per cycles. Without sufficient memory bandwidth, the PEs are idle most of the time. Hence, most of these designs are not practical. + +Another method (called *index fixing*) fixes one the loop index at a time over and over. When two or fewer loop indices remain, the remaining algorithm can be easily transformed into systolic design [4, 5, 10, 16]. For example, the design in Fig. 3(a) can also be derived by fixing the index of the *u* and *v* of the 4-dimensional DG. +---PAGE_BREAK--- + +Fig. 2. Two 3D DG examples of the BMA [2, 10, 16]. + +Fig. 3. Previous array design examples. (a) Projected without buffers. (b) Projected with buffers [8]. + +A breakthrough design that greatly reduces the I/O bandwidth by exploiting *data reusability* is shown in [8] (cf. Fig. 3(b)). It carries some extra buffers. The advantage of this design is that the data are input serially such that the hunger of the I/O is greatly reduced. The amount of the input data per operation is only 1 byte. Furthermore, shift registers instead of random access memories are used here such that the control is easier, the buffer area is smaller, and the data access rate is higher. Moreover, because the search windows of the current blocks overlap each other, a simple FIFO (based on this design) is proposed to cap- + +ture more data reusability and thus further reduce the I/O bandwidth [14]. + +However, the design shown in Fig. 3(b) is one of the designs that is blamed for inefficiency because of unnecessary computations. The inefficiency comes from the following problem: In order to have only one I/O port for the whole array, the data running through the whole array must be unified. Hence, in this design, some processor may receive some useless data and do some unnecessary computation (or without doing real computation) [1, 8]. The utilization rate = $\frac{(2p + 1)^2}{(n + 2p)^2}$. +---PAGE_BREAK--- + +Later, a 2D array design prevents some unnecessary data running through every PE by inputting the data from two memory ports [1]. It not only needs low I/O bandwidth but can also achieve high computational power. + +A transformation of snapshot (called *slice and tile*) is employed to produce different forms of DGs [2]. There will be a reduction of one dimension in the DG. For example, an original 3D BMA would become a 2D DG. After that, canonical single projection approaches can be used. This technique can re-design most of the existing architectures in graphs. However, the memory organization must be designed via a careful bookkeeping system on the information about the interface between subparts. + +## 1.2. Overview of this Work + +In this paper, we present a systematic methodology, multiprojection, to support the design of a broad scope of array processors. Many previous approaches, such as *functional decomposition*, *index fixing*, and *slice and tile*, can be regarded as its special cases. + +We also propose several useful rules essential for the implementation of multiprojection. For instance, by applying LPGS (locally parallel globally sequential) or LSGP (locally sequential globally parallel) during the multiprojection, the design can enjoy expandabilities without compromising the data reusability. Other rules for reducing the number of buffers are also made available. The rules may be adopted to improve computational power and flexibilities and reduce I/O requirement and control overhead. + +We shall demonstrate how the multiprojection can achieve this goal, based on a systolic design example of the BMA. Our methodology is applied to design (1) massively parallel systolic architectures and (2) fast *systolic cache* architectures for the MPEG application. + +# 2. Multiprojection Methodology for Optimal Systolic Design + +Conventional single projection can only map an $n$-dimensional DG directly onto an $(n-1)$-dimensional SFG. However, due to current VLSI technology constraint, it is hard to implement a 3D or 4D systolic array. In order to map an $n$-dimensional DG directly onto an $(n-k)$-dimensional SFG without DG de- + +composition, a multi-dimensional projection method is introduced [11, 17, 18, 24]. + +The projection method, which maps an $n$-dimensional DG to an $(n-1)$-dimensional SFG, can be applied $k$ times and thus reduces the dimension of the array to $n-k$. More elaborately, a similar projection method can be used to map an $(n-1)$-dimensional SFG into an $(n-2)$-dimensional SFG, and so on. This scheme is called *multiprojection*. + +The *functional decomposition*, *index fixing*, and *slice and tile* are the special cases of the multiprojection. Multiprojection can not only obtain the DGs and SFGs from functional decomposition but can also obtain other 3D DGs, 2D SFGs, and other designs that are difficult to be obtained from other methods. + +Multiprojection is introduced here to design array processors which satisfy most of the following design criteria: (1) increase the computational power, (2) reduce the I/O requirement, (3) reduce the control overhead, and (4) have some expandabilities. For example, a localized recursive algorithm for block matching is derived so that the original 6D BMA is transferred into 3D algorithm [22]. (We will see why the BMA is 6-dimensional later in Section 2.1 and Section 4.3.) After that, it is derived into two designs—a 1D systolic array and a 2D semi-systolic array. Both of the arrays are reported to achieve an almost 100% utilization rate. Nevertheless, since the original 6D is folded into 3D, the designs have more constraints. The former one requires a massive amount of I/O ports. The latter one is only useful when the size of the current block ($n$) is equal to twice of the search range ($2p$) and requires a massive amount of data broadcasting. + +## 2.1. High Dimensional Algorithm + +Before we jump into the discussion of the multi-projection, it is advisable to introduce the concept of high-dimensional algorithms first. An algorithm is said to be $n$-dimensional if it has $n$-depth recursive loops in nature. For example, a block-matching algorithm for the whole frame is 6-dimensional as shown Fig. 4(a). The indices $x, y, u, v, i, j$ contribute the algorithm into 6D. + +It is very important to respect the *read-after-read* data dependency. If a datum could be read time after time by hundreds of operations and those operations are put closely together, then a small cache can get rid of a large amount of external memory accesses. +---PAGE_BREAK--- + +Fig. 4. (a) The 6D BMA, where $N_v$ is the number of current blocks in the vertical direction, $N_h$ is the number of current blocks in the horizontal direction, $n$ is the block size, and $p$ is the search range. The indices $x, y, u, v, i, j$ contribute the algorithm into 6D. The inner four loops are exactly those shown in Fig. 22. (b) A 3D BMA that folds two loops in (a) into one loop. (c) On the other hand, a 7D BMA ($x, y, u, v, i, j_1, j_2$ 7-dimension) can be constructed by modifying the inmost loop index $j$ of the original algorithm into two indices $j$ and $j_2$. + +Since $s[x*n+i+u, y*n+j+v]$ will be read time after time for different $x, y, u, v, i, j$ combinations, this algorithm is 6D. + +One the other hand, if we ignore the read-after-read data dependency, the DG has only two-dimensional + +read-after-write dependency based on variable SAD. Although the DG become lower dimensional, it would be harder to track the data reusability and reduce the amount of memory accesses. +---PAGE_BREAK--- + +*Transformation to Lower Dimension.* As shown in Fig. 4(b), two loops are folded into one loop to make the algorithm become less-dimensional [22]. + +The DG becomes 3-dimensional because there are only 3 loop indices. The number of projections in multiprojection become less and it is easier to optimize the scheduling. However, in this modified algorithm, the operation regarding (u,v+1) must be executed directly after the operation regarding (u,v). It makes the algorithm become less flexible. Efficient, expandable, and low I/O designs are harder to achieve. Besides, the folding of 6D DG will make it benefit less from some useful graph transformation as shown in Section 3. + +*Transformation to Higher Dimension.* We can also construct some artificial indices to make a lower-dimensional DG problem become higher-dimensional DG. For example, the inmost loop of the original algorithm could be modified as shown in Fig. 4(c). + +The indices x, y, u, v, i, j₁, j₂ transform this algorithm into a 7-dimensional concept. This approach is not generally recommended because the number of steps for multiprojection increases in order to have the low-dimension design. However, this method provides an option of execution in the order of $j = \{1, N/2 + 1, 2, N/2 + 2, ...\}$ instead of $j = \{1, 2, ..., N/2, N/2 + 1, ...\}$ (simply exchanging the order of the j₁ loop and the j₂ loop). As we will see later in Section 3.7, LSGP and LPGS partitioning can be carried out via multiprojection after a DG is transformed into an artificial higher-dimensional DG. + +## 2.2. Algebraic Formulation of Multiprojection + +The process of multiprojection could be written as a number of single projections using the same algebraic formulation as introduced in Appendix A.1. In this section, we explain how to project the (n-1)-dimensional SFG to an (n-2)-dimensional SFG. The potential difficulties of this mapping are (1) the presence of delay edges in the (n-1)-dimensional SFG, and (2) the delay management of the edges in the (n-2)-dimensional SFG. + +*Double-Projection.* For simplicity, we first introduce how to have a 2D SFG for a 4D DG by the multiprojection. + +**Step 1** We project the 4D DG into a 3D SFG by projection vector $\vec{d}_4$ (4 × 1 column vector), projection matrix $\mathbf{P}_4$ (3 × 4 matrix), and scheduling vector $\vec{s}_4$ (4 × 1 column vector) with three constraints: (1) $\vec{s}_4^T \vec{d}_4 > 0$, (2) $\mathbf{P}_4 \vec{d}_4 = 0$, and (3) $\vec{s}_4^T \vec{e}_i \ge 0 \ \forall i$. The computation node $\underline{\mathcal{C}}$ (4 × 1) in 4D DG will be mapped into the 3D SFG by + +$$ \begin{bmatrix} T_3(\underline{\mathcal{C}}) \\ \underline{n}_3(\underline{\mathcal{C}}) \end{bmatrix} = \begin{bmatrix} \vec{s}_4^T \\ \mathbf{P}_4 \end{bmatrix} \underline{\mathcal{C}} $$ + +The data dependence edges will be mapped into the 3D SFG by + +$$ \begin{bmatrix} D_3(\vec{e}_i) \\ \vec{m}_3(\vec{e}_i) \end{bmatrix} = \begin{bmatrix} \vec{s}_4^T \\ \mathbf{P}_4 \end{bmatrix} \vec{e}_i $$ + +**Theorem 1.** $D_3(\vec{e}_i) \neq 0$ for any $\vec{m}_3(\vec{e}_i) = 0$. + +*Proof:* For $\vec{m}_3(\vec{e}_i) = 0$, $\vec{e}_i$ is proportional to $\vec{d}_4$. For example, $\vec{e}_i = \alpha\vec{d}_4$ ($\alpha \neq 0$). The basic constraint $\vec{s}_4^T\vec{d}_4 > 0$ implies $\alpha\vec{s}_4^T\vec{d}_4 \neq 0$; therefore, $D_3(\vec{e}_i) = \vec{s}_4^T\vec{e}_i \neq 0$. $\square$ + +**Step 2** We project the 3D SFG into a 2D SFG by projection vector $\vec{d}_3$ (3 × 1 column vector), projection matrix $\mathbf{P}_3$ (2 × 3 matrix), and scheduling vector $\vec{s}_3$ (3 × 1 column vector) with three constraints: (1) $\vec{s}_3^T\vec{d}_3 > 0$, (2) $\mathbf{P}_3\vec{d}_3 = 0$, and (3) $\vec{s}_3^T\vec{m}_3(\vec{e}_i) \ge 0 \ \forall \vec{e}_i$ for broadcasting data. Or, $\vec{s}_3^T\vec{m}_3(\vec{e}_i) > 0 \ \forall \vec{e}_i$ for non-broadcasting data. +The computation node $\underline{n}_3(\underline{\mathcal{C}})$ (3 × 1) in the 3D SFG, which is mapped from $\underline{\mathcal{C}}$ (4 × 1) in the 4D DG, will be mapped into the 2D SFG by + +$$ \begin{bmatrix} T'_2(\underline{\mathcal{C}}) \\ \underline{n}'_2(\underline{\mathcal{C}}) \end{bmatrix} = \begin{bmatrix} \vec{s}_3^T \\ \mathbf{P}_3 \end{bmatrix} \underline{n}_3(\underline{\mathcal{C}}) $$ + +The data dependence edges in the 3D SFG will further be mapped into the 2D SFG by + +$$ \begin{bmatrix} D'_2(\vec{e}_i) \\ \vec{m}'_2(\vec{e}_i) \end{bmatrix} = \begin{bmatrix} \vec{s}_3^T \\ \mathbf{P}_3 \end{bmatrix} \vec{m}_3(\vec{e}_i) $$ + +**Step 3** We can combine the results from the previous 2 steps. Let allocation matrix $\mathbf{A} = \mathbf{P}_3\mathbf{P}_4$ and scheduling vector $\mathbf{S}^T = \vec{s}_3^T\mathbf{P}_4 + M_4\vec{s}_4^T$. ($M_4 \ge 1 + (N_4 - 1)\vec{s}_3^T\vec{d}_3$ where $N_4$ is the maximum number of nodes along the $\vec{d}_3$ direction in the 3D SFG.) + +• Node mapping: + +$$ \begin{bmatrix} T_2(\underline{\mathcal{C}}) \\ \underline{n}_2(\underline{\mathcal{C}}) \end{bmatrix} = \begin{bmatrix} \mathbf{S}^T \\ \mathbf{A} \end{bmatrix} \underline{\mathcal{C}} $$ +---PAGE_BREAK--- + +where $\underline{n}_2(\underline{\mathcal{C}}) = \underline{A}\underline{\mathcal{C}}$ means where the original computational node $\underline{\mathcal{C}}$ is mapped. $T_2(\underline{\mathcal{C}}) = \underline{S}\underline{\mathcal{C}}$ means when the computation node is to be executed. + +* Edge mapping: + +$$ \begin{bmatrix} D_2(\vec{e}_i) \\ \vec{m}_2(\vec{e}_i) \end{bmatrix} = \begin{bmatrix} \mathbf{S}^T \\ \mathbf{A} \end{bmatrix} \vec{e}_i $$ + +where $\vec{m}_2(\vec{e}_i) = \mathbf{A}\vec{e}_i$ means where the original data dependency relationship is mapped. $D_2(\vec{e}_i) = \mathbf{S}^T\vec{e}_i$ means how much time delay should be in the edge $\vec{m}_2(\vec{e}_i)$. + +**Constraints for Data and Processor Availability.** Every dependent datum comes from previous computation. To ensure data availability, every edge must have at least one unit of delay if the edge is not broadcasting some data. + +**Theorem 2.** **Data Availability.** $D_2(\vec{e}_i) = \mathbf{S}^T\vec{e}_i \ge 0$ if $\vec{e}_i$ is for broadcasting data. $D_2(\vec{e}_i) = \mathbf{S}^T\vec{e}_i > 0$ if $\vec{e}_i$ is not for broadcasting data. + +**Proof:** + +$$ +\begin{align*} +D_2(\vec{e}_i) &= \mathbf{S}^T \vec{e}_i \\ +&= (\vec{s}_3^T \mathbf{P}_4 + M_4 \vec{s}_4^T) \vec{e}_i \\ +&= \vec{s}_3^T \mathbf{P}_4 \vec{e}_i + M_4 \vec{s}_4^T \vec{e}_i \\ +&\geq \vec{s}_3^T \mathbf{P}_4 \vec{e}_i \\ +&\quad (\text{from the constraint (3) in step 1}) \\ +&> 0 \quad (\text{or, } \geq 0) \\ +&\quad (\text{from the constraint (3) in step 2}) +\end{align*} +$$ + +□ + +Two computational nodes that are mapped into a single processor could not be executed at the same time. To ensure processor availability, $T_2(\underline{\mathcal{C}}_i) \neq T_2(\underline{\mathcal{C}}_j)$ must be satisfied for any $\underline{\mathcal{C}}_i \neq \underline{\mathcal{C}}_j$ and $\underline{n}_2(\underline{\mathcal{C}}_i) = \underline{n}_2(\underline{\mathcal{C}}_j)$. + +**Theorem 3.** **Processor Availability.** $T_2(\underline{\mathcal{C}}_i) \neq T_2(\underline{\mathcal{C}}_j)$ for any $\underline{\mathcal{C}}_i \neq \underline{\mathcal{C}}_j$ and $\underline{n}_2(\underline{\mathcal{C}}_i) = \underline{n}_2(\underline{\mathcal{C}}_j)$. + +**Proof:** For any $\underline{n}_2(\underline{\mathcal{C}}_i) = \underline{n}_2(\underline{\mathcal{C}}_j)$ +$\Rightarrow \mathbf{P}_3\underline{n}_3(\underline{\mathcal{C}}_i) - \mathbf{P}_3\underline{n}_3(\underline{\mathcal{C}}_j) = 0$ +$\Rightarrow \underline{n}_3(\underline{\mathcal{C}}_i) - \underline{n}_3(\underline{\mathcal{C}}_j)$ is proportional to $\vec{d}_3$. +$\Rightarrow \underline{n}_3(\underline{\mathcal{C}}_i) - \underline{n}_3(\underline{\mathcal{C}}_j) = \mathbf{P}_4(\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) = \alpha\vec{d}_3$ + +Since $N_4$ is the maximum number of nodes along the $\vec{d}_3$ direction in the 3D SFG, $\alpha \in \{\underline{0}, \pm\underline{1}, \pm\underline{2}, \dots, \pm\underline{(N_4-1)}\}$. + +$$ +\begin{align*} +T_2(\underline{\mathcal{C}}_i) - T_2(\underline{\mathcal{C}}_j) &= \mathbf{S}^T(\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) \\ +&= (\vec{s}_3^T \mathbf{P}_4 + M_4 \vec{s}_4^T)(\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) \\ +&= \vec{s}_3^T \mathbf{P}_4(\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) + M_4 \vec{s}_4^T (\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) \\ +&= \alpha \vec{s}_3^T \vec{d}_3 + M_4 \alpha \vec{s}_4^T (\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) +\end{align*} +$$ + +1. If $\mathbf{P}_4\underline{\mathcal{C}}_i = \mathbf{P}_4\underline{\mathcal{C}}_j$, then $\alpha = 0$ and + +$$ +\begin{align*} +T_2(\underline{\mathcal{C}}_i) - T_2(\underline{\mathcal{C}}_j) &= M_4 \vec{s}_4^T (\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) \\ +&\neq 0 && (\text{by Theorem 1}) +\end{align*} +$$ + +2. If $\mathbf{P}_4\underline{\mathcal{C}}_i \neq \mathbf{P}_4\underline{\mathcal{C}}_j$, then $\alpha \in \{\pm\underline{1}, \dots, \pm\underline{(N_4-1)}\}$ + +(a) If $\vec{s}_4^T(\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) = 0$, then + +$$ +\begin{align*} +T_2(\underline{\mathcal{C}}_i) - T_2(\underline{\mathcal{C}}_j) &= \alpha \vec{s}_3^T \vec{d}_3 \\ +&\neq 0 && (\text{by the basic constraint of step 2}) +\end{align*} +$$ + +(b) If $\vec{s}_4^T(\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) \neq 0$, then by assuming $\vec{s}_4^T(\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) > 0$ without losing generality, we have + +$$ +\begin{align*} +T_2(\underline{\mathcal{C}}_i) - T_2(\underline{\mathcal{C}}_j) &= \alpha \vec{s}_3^T \vec{d}_3 + M_4 \vec{s}_4^T (\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) \\ +&\geq \alpha \vec{s}_3^T \vec{d}_3 \\ +&\quad +(1 + (\underline{(N_4-1)}\vec{s}_3^T \vec{d}_3))\vec{s}_4^T (\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) \\ +&= (\alpha + (\underline{(N_4-1)}\vec{s}_4^T (\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j)))\vec{s}_3^T \vec{d}_3 \\ +&\quad +\vec{s}_4^T (\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) \\ +&\geq (\alpha + (\underline{(N_4-1)}\vec{s}_3^T \vec{d}_3 + \vec{s}_4^T (\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j)) \\ +&\quad (\because \vec{s}_4^T (\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) \geq 1) \\ +&\geq 0 + \vec{s}_4^T (\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) \\ +&\quad (\because \alpha + N_4 - 1 \geq 0) \\ +&> 0 +\end{align*} +$$ + +If $\vec{s}_4^T(\underline{\mathcal{C}}_i - \underline{\mathcal{C}}_j) < 0$, then let $\underline{c}'_i = \underline{\mathcal{C}}_j$ and $\underline{c}'_j = \underline{\mathcal{C}}_i$. The condition $T_2(\underline{c}'_i) \neq T_2(\underline{c}'_j)$ for any $\underline{c}'_i \neq c'_j$ and $\underline{n}_2(\underline{\mathcal{C}}'_i) = n'_2(\underline{\mathcal{C}}'_j)$ holds. So, the proof will. + +Q.E.D. from 1, 2(a), and 2(b). □ + +Multiprojection n-Dimensional DG into k-Dimensional SFG. +---PAGE_BREAK--- + +**Step 1** Let the $n$-dimensional SFG define as the +$n$-dimensional DG. That is, $\underline{n}_n(\mathcal{C}_x) = \mathcal{C}_x$ and +the $\vec{m}_n(\vec{e}_i) = \vec{e}_i$. + +**Step 2** We project the *l*-dimensional SFG into a +(*l* − 1)-dimensional SFG by projection vector $\vec{d}_l$ +(*l* × 1), projection matrix **P****l* ((*l* − 1) × *l*), and +scheduling vector $\vec{s}_l$ (*l* × 1) with basic constraint +$\vec{s}_l^T \vec{d}_l > 0$, **P****l* $\vec{d}_l$ = 0, and $\vec{s}_l^T \vec{m}_l(\vec{e}_i) \ge$ (or >) +0$\forall\vec{e}_i$. +The computation node $\mathcal{C}_i$ (*l* × 1) and the data de- +pendence edge $\vec{m}_l(\vec{e}_i)$ (*l* × 1) in *l*-dimensional +SFG will be mapped into the (*l* − 1)-dimensional +SFG by + +$$ +\underline{n}_{l-1}(\underline{\mathcal{c}}_i) = \mathbf{P}_l \underline{n}_l(\underline{\mathcal{c}}_i) \quad (1) +$$ + +$$ +\vec{m}_{l-1}(\vec{e}_i) = \mathbf{P}_l \vec{m}_l(\vec{e}_i) \quad (2) +$$ + +**Step 3** After ($n-k$) projections, the results can be +combined. The allocation matrix will be + +$$ +\mathbf{A} = \mathbf{P}_k \mathbf{P}_{k+1} \cdots \mathbf{P}_n \qquad (3) +$$ + +The scheduling vector will be + +$$ +\begin{align} +\mathbf{S}^T &= \bar{\mathbf{s}}_{k+1}^T \mathbf{P}_{k+2} \mathbf{P}_{k+3} \cdots \mathbf{P}_n \nonumber \\ +&\quad + M_{k+2} \bar{\mathbf{s}}_{k+2}^T \mathbf{P}_{k+3} \mathbf{P}_{k+4} \cdots \mathbf{P}_n \nonumber \\ +&\quad + M_{k+2} M_{k+3} \bar{\mathbf{s}}_{k+3}^T \mathbf{P}_{k+4} \mathbf{P}_{k+5} \cdots \mathbf{P}_n \nonumber \\ +&\vdots \nonumber \\ +&\quad + M_{k+2} M_{k+3} \cdots M_n \bar{\mathbf{s}}_n \tag{4} +\end{align} +$$ + +where $M_l \ge 1 + (N_l - 1)\bar{s}_{l-1}^T d_{l-1}$ and $N_l$ is +the maximum number of nodes along the $d_{l-1}$ +direction in the $l$-dimensional SFG. Therefore, + +• Node mapping will be: + +$$ +\left[ \frac{T_k(\underline{\mathcal{c}}_i)}{\underline{n}_k(\underline{\mathcal{c}}_i)} \right] = \left[ \frac{\mathbf{S}^T}{\mathbf{A}} \right] \underline{\mathcal{c}}_i \quad (5) +$$ + +• Edge mapping will be: + +$$ +\left[ D_k(\vec{e}_i) \quad \vec{m}_k(\vec{e}_i) \right] = \left[ \begin{matrix} S^T \\ A \end{matrix} \right] \vec{e}_i \quad (6) +$$ + +Constraints for Processor and Data Availability. If no transmittance property is assumed, every edge must have at least one delay because every dependent data is come from previous computation. It is easy to show that data availability is satisfied, i.e., $D_k(\vec{e}_i) > 0 \forall i.$ + +Following the same proof of Theorem 3, one can +easily show processor availability is also satisfied., i.e., +$T_k(c_i) \neq T_k(c_j)$ for any $c_i \neq c_j$ and $\underline{n}_2(c_i) = \underline{n}_2(c_j)$. + +2.3. Optimization in Multiprojection + +After projection directions are fixed, the structure of +the array is determined. The remaining part of the +design is to find a scheduling that can complete the +computation in minimal time under processor and data +availability constraint. That is, + +$$ +\min_{\mathbf{S}} \left( \max_{\underline{\mathcal{c}}_x, \underline{\mathcal{c}}_y} \{\mathbf{S}^T (\underline{\mathcal{c}}_x - \underline{\mathcal{c}}_y)\} \right) +$$ + +under the following constraints: + +1. $\mathbf{S}^T\vec{e}_i > 0 \quad \forall \vec{e}_i$ (Data Availability) + +2. $\mathbf{S}^T\mathcal{C}_i \neq \mathbf{S}^T\vec{c}_j \quad \forall \mathcal{C}_i \neq \vec{c}_j, A\mathcal{C}_i = A\vec{c}_j$ (Processor Availability) + +A method using quadratic programming techniques +is proposed to tackle the optimization problem [26]. +However, it takes non-polynomial time to find the op- +timal solution. A polynomial-time heuristic approach, +which uses the branch-and-bound technique and tries +to solve the problem by linear programming, is also +proposed [25]. + +Here, we propose another heuristic procedure to +find a near optimal scheduling in our multiprojection +method. In each single projection, from i-dimension +to (i - 1)-dimension, find an $\vec{s}_i$ by + +$$ +\vec{s}_i = \arg\min_{\vec{s}} \left\{ +\max_{\underline{n}_i(\underline{\mathcal{c}}_x), \underline{n}_i(\underline{\mathcal{c}}_y)} +\left\{ +\vec{s}^T [\underline{n}_i(\underline{\mathcal{c}}_x) - \underline{n}_i(\underline{\mathcal{c}}_y)] +\right\} +\right\} +\quad \forall \underline{\mathcal{c}}_x, \underline{\mathcal{c}}_y \in \text{DG}(7) +$$ + +under the following constraints: + +1. $\bar{\boldsymbol{s}}_i^T \bar{\boldsymbol{d}}_i > 0$ + +2. $\bar{s}_i^T m_i(e_j) \ge 0 \quad \forall j$ if $(i-1)$-dimension is not the final goal. + +$\bar{s}_i^T m_i(e_j) > 0$ $\forall j$ if $(i-1)$-dimension is the final goal. + +This procedure will find a linear scheduling vector in polynomial time, when the given processor allocation function is linear. Although we have no proof of optimization yet, several design examples show our method can provide optimal scheduling when the DG is shift-invariant and the projections directions are along the axes. (Nevertheless, it will still be an NP-hard problem for all possible processor allocation and time allocation functions.) +---PAGE_BREAK--- + +**Table 1.** Graph transformation rules for equivalent DGs. Note that the *transmittent data*, which are used repeatedly by many computation nodes in the DG (see Appendix A.2), play a critical role here. + +
RulesApply toFunctionAdvantages
Assimilarity2D transmittent dataKeep only one edge and delete the others in the 2nd dimensionSave links
Summation2D accumulation dataKeep only one edge and delete the others in the 2nd dimensionSave links
Degeneration2D transmittent dataReduce a long buffers to a single registerSave buffers
Reformation2D transmittent dataReduce a long delay to a shorter oneSave buffers
RedirectionOrder independent data (e.g., transmittent or accumulation data)Opposite the edgeSave problems on negative edges
+ +Fig. 5. (a) A high-dimensional DG, where a datum is transmittent to a set of nodes by the solid 2D mesh. (b) There are several paths via which the datum can reach a certain node. (c) During the multiprojection, the dependencies in different directions get different delay. (d) Because the data could reach the nodes by two possible paths, the *assimilarity rule* is applied to this SFG. Only one of the edges in the second dimension is kept. Without changing the correctness of the algorithm, a number of links and buffers are reduced. + +## 3. Equivalent Graph Transformation Rules + +In Appendix A.2 and Section 2.1, some transformation rules of the DG are introduced. In order to have better designs, we also provide some graph transformation rules that can help us reduce the number of connections between processors, the size of buffer, or the power consumption. Table 1 shows a brief summary of the rules. + +### 3.1. Assimilarity Rule + +As shown in Fig. 5, the assimilarity rule can save some links without changing the correctness of the DG. If a datum is transmittent to a set of operation/computation nodes in the DG/SFG by a 2D (or higher-dimensional) mesh, then there are several possible paths via which the datum can reach a certain node. For example, in the BMA, the $s[i+u, j+v]$ +---PAGE_BREAK--- + +Fig. 6. (a) A datum is the summation of a set of nodes by a 2D mesh in an SFG. During the multiprojection, the dependencies in different directions get different delay. (b) Without changing the correctness of the algorithm, only one of the edges in the second dimension is kept. By the summation rule, a number of links and buffers are reduced. + +Fig. 7. (a) When transforming an SFG description to a systolic array, the conventional delay management uses $(m-1)$ registers for $m$ units of delay on the links. (b) If the data sets of two adjacent nodes overlap each other, the degeneration rule suggests that only a register is required because the other data could be obtained by the other direction. + +can be passed by $s[(i+1)+(u-1), j+v]$ via loop *i*, or by $s[i+u, (j+1)+(v-1)]$ via loop *j*. Keeping only one edge in the second dimension is sufficient for the data to reach everywhere. + +The procedure of keeping only one edge for a set of edges can save a great number of interconnection buffers. Usually, this rule is applied after the final SFG is obtained. In this way, we can get rid of edges with longer delay and more edges. + +One of the major drawbacks of this assimilarity rule is that every node must use the same set of data before this rule can be applied. It is not true for any algorithm that uses a 2D mesh to transmittent the data. Generally speaking, the data set of a node greatly overlaps with the data set of the other nodes but not identically. In order to reduce the connection edges, we can make all + +the nodes process the same set of data artificially (i.e., ask the nodes to do some useless computations) and then apply this rule. + +## 3.2. Summation Rule + +As shown in Fig. 6, the summation rule can save some links without changing the correctness of the DG. Because summation is associative, the order of the summation can be changed. If output is obtained by aggregating a 2D (or higher-dimensional) mesh of computational nodes, we can accumulate the partial sum in one dimension first, then accumulate the total from the partial sum in the second dimension afterward. For example, in the BMA, the SAD[u,v] is the 2D summation of $|s[i+u, j+v] - r[i, j]|$ over $1 \le i, j \le n$. We can accumulate the difference over index *i* first, or over +---PAGE_BREAK--- + +**Fig. 8.** (a) A high-dimensional DG, where a datum is transmittent to a set of nodes by a 2D mesh, is projected into an SFG. During the multiprojection, the dependencies in different directions get different delay. Because the data could reach the nodes by more than two possible paths, the assimilarity rule is applied to this SFG. Only one of the edges in the second dimension is kept. (b) The delay (i.e., the number of buffers) could be further decreased when the *reformation* rule transforms the original 2D mesh into a tilted mesh. + +index *j* first (cf. Fig 2). We should calculate the data in +the direction with fewer buffers first, then rigorously +calculate the data in the other direction later. + +### 3.4. Reformation Rule + +For 2D or higher-dimensional transmittent data, the +structure of the mesh is not rigid. For example, +in the BMA, the $s[i+u, j+v]$ can be passed +by $s[(i+k)+(u-k), j+v]$ via loop *i* and by +$s[i+u, (j+k)+(v-k)]$ via loop *j* for $1 \le k \le n$. +For a different *k*, the structure of the 2D transmittent +mesh is different. The final delay in the designed +SFG will be different. As a result, we should choose +*k*, depending on the required buffer size. Generally +speaking, the shorter the delay, the fewer the buffers. + +For example, Fig. 8(a) shows a design after applying +the assimilarity rule. Only a long delayed edge was left. +Moreover, the data are transmittent to the whole array. +So, we detour the long delayed edge, make use of the +delay in the first dimension, and get the design show +in Fig. 8(b), where the longest delay is now shorter. + +### 3.3. Degeneration Rule + +The degeneration rule reduces the data link when data +are transmittent through a 2D (or higher-dimensional) +mesh when (1) each node has its own data set and +(2) the data sets of two adjacent nodes overlap each +other significatly. One way to save the buffer is to +let the overlapping data transmittent from one dimen- +sion thoroughly (like that in the assimilarity rule) and +let the non-overlapping transmittent from the other di- +mension(s) (unlike that in the assimilarity rule). In the +second dimension, it is only necessary to keep non- +overlapping data. Fig. 7 shows that only a register is +required because the other data could be obtained by +the other direction. + +### 3.5. Redirection Rule + +Because some operations are associative (e.g., sum- +mation data, transmittent data), the arcs in the DG are +reversible. The arcs are reversed to help the design. +For example, the datum $s[(i+1)+(u-1), j+v]$ is passed to $s[i+u, j+v]$ via loop *i* in the BMA. +After mapping the DG to a SFG, the delay on the edge is +negative. Conventionally, negative delay is not allowed +and we must find another scheduling vector $\vec{s}$. This +rule tells us to move the data in the opposite direction +/passing the $s[i+u, j+v]$ to $s[(i+1)+(u-1), j+v]$ instead of re-calculating the scheduling vector +(cf. Fig. 9). + +**Fig. 9.** (a) Generally speaking, an SFG with a negative delay is not permissible. (b) However, if the dependencies have no polarization, then we apply the redirection rule to direct the edges with negative delay to the opposite direction. After that, the SFG become permissible. +---PAGE_BREAK--- + +### 3.6. Design Optimization vs. Equivalent Transformation Rules + +All these rules do not modify the correctness of the implementation, but could accomplish some degree of design optimization. + +1. The assimilarity rule and the summation rule have no influence on the overall calculation time. However, these two rules reduce the buffers and links. Generally speaking, these two rules are applied after the SFG is yielded. + +2. The degeneration rule does not influence the overall calculation time. It is applied when one would like to transform the SFG into hardware design. It helps the reduction of the buffers and links. However, extra control logic circuits are required. + +3. The reformation rule and the redirection rule will have influence on the scheduling problem because these two rules can make some prohibited scheduling vectors become permissible. + +These rules help the design optimization but also make the optimization process harder. Sometimes, the optimization process will become a iterative procedure which consists of (1) scheduling optimization and (2) equivalent transformation. + +### 3.7. Locally Parallel Globally Sequential and Locally Sequential Globally Parallel Systolic Design by Multiprojection + +In Appendix A.4, LPGS and LSGP have been introduced briefly. In this section, we delineate a unified partitioning and scheduling scheme for LPGS and LSGP into our multiprojection method. The advantage of this unified partitioning model is that various partitioning methods can be achieved by choosing projection vectors. The systematic scheduling scheme can explore more inter-processor parallelism. + +*Equivalent Graph Transformation Rules for Index Folding.* A unified re-indexing method is adopted to fold original DG into a higher-dimensional DG but with a smaller size in a chosen dimension. Then, our multiprojection approach is applied to obtain the LPGS or LSGP designs. The only difference between LPGS and LSGP under our uniform approaches is the order of the projection. Our approach is even better in deciding the scheduling because our scheduling is automatically inherited from multiprojection scheduling instead of hierarchical scheduling. + +*Index Folding.* In order to map an algorithm into a systolic array by LPGS or LSGP, we propose a re- + +Fig. 10. (a) shows a 2 × 6 DG. (b) shows an equivalent 2 × 3 × 2 DG after index folding. (c) an LPGS partitioning when we project the 3D DG along the *a* direction. (d) an LSGP partitioning when we project the 3D DG along the *b* direction. +---PAGE_BREAK--- + +Fig. 11. A core in the 4D DG of the BMA. There are $n \times n \times (2p+1) \times (2p+1)$ nodes in the DG. The node $i, j, u, v$ represents the computation $SAD[u, v] = SAD[u, v] + |s[i+u, j+v] - r[i, j]|$. We denote $\vec{E}_1$ as the data dependency between computation nodes for $s[i+u, j+v]$. Because $s[i+u, j+v]$ can come from two possible directions: (1) $s[(i-1)+u, (j+v)]$ or (2) $s[i+u, (j-1)+v]$, $\vec{E}_1$ can be $(1, 0, -1, 0)$ and $(0, 1, 0, -1)$. By the same token, $\vec{E}_2$—the data dependency of the current block—could be $(0, 0, -1, 0)$ and $(0, 0, -1, 0)$. $\vec{E}_3$, which accumulates the difference, could be $(1, 0, 0, 0)$ and $(0, 1, 0, 0)$. The representation of the DG is not unique; most of the dependence edges can be redirected because of data transmittance. + +indexing method for the computational nodes into a +higher-dimensional DG problem. + +An example is shown in Fig. 10. We want to map a +$2 \times 6$ DG into a smaller 2D systolic array. Let $u, v$ be +the indices $(0 \le u \le 1, 0 \le v \le 5)$ of the DG. + +First, we will re-index all the computational nodes +$(u, v)$ into $(u, a, b)$. The 2D DG becomes a 3D DG +$(2 \times 2 \times 3)$ where an $a$ means 3 units of $v$, a $b$ means +1 unit of $v$, and $0 \le a \le 1$, $0 \le b \le 2$. Then, a node +at $(u, a, b)$ in the 3D DG is equivalent to the node at +$(u, (3a + b))$ in the original 2D DG. + +After this, by multiprojection, we can have the fol- +lowing two partitioning methods: + +**1. LPGS** + +If we project the 3D DG along the *a* direction, +then the nodes that are close to each other in the *v* +direction will be mapped into the different nodes. +That is, the computation nodes are going to be +executed in parallel. This is an LPGS partitioning. + +**2. LSGP** + +If we project the 3D DG along *b*, then the nodes +that are close to each other in the *v* direction will be +mapped into the same node. That is, the computa- + +tion nodes are going to be executed in a sequential +order. This is an LSGP partitioning. + +Note that we must be careful about the data depen- +dency after transformation. One unit of original *v* will +be 0 unit of *a* and 1 unit of *b* when the dependence edge +does not move across different packing segments. (In +the example, a packing segment consists of all the com- +putation nodes within three units of sequential *v*. That +is, the packing boundary is when 3 divides *v*.) One +unit of the *v* is 1 unit of the *a* and -2 unit of the *b* when +the dependence edge crosses the packing boundary of +the transformed DG one time. + +**4. Systolic Designs for Full-Search Block-Matching Algorithms by Multiprojection Approach** + +4.1. 4D DG of BMA + +As Fig. 22 shows the pseudo code of the BMA of a +single current block, Fig. 11 shows a core in the 4D +DG of the BMA for a current block. The operations of +taking difference, taking absolute value, and accumu- +lating residue are embedded in a 4-dimensional space +i,j,u,v. The indeices i and j (1 ≤ i, j ≤ n) are the +indices of the pixels in a current block. The indices +u and v (-p ≤ u, v ≤ p) are the indices of the po- +tential displacement vector. The actual DG would be +a 4-dimensional repeat of the same core. Although it +is more difficult to visualize the actual DG, it is fairly +straightforward to manipulate algebra on the core and +thus manipulate multiprojection. + +We use $\vec{E}_1$ to denote the data dependency of the search window. The $s[i+u, j+v]$ will be used repeatedly for (1) different $i, j$, (2) same $i + v$, and (3) same $j + u$. Therefore, $\vec{E}_1$ is a 2-dimensional reformable mesh. One possible choice is (1, 0, -1, 0) and (0, 1, 0, -1). The $r[i, j]$ will be used repeatedly for different $u, v$. Hence, $\vec{E}_2$, the data dependency of the current block, could be (0, 0, -1, 0) and (0, 0, -1, 0). The summation can be done in *i*-first order or *j*-first order. $\vec{E}_3$, which accumulates the difference, could be (1, 0, 0, 0) and (0, 1, 0, 0). The representation of the DG is not unique; most of the dependence edges can be redirected because of data transmittance. +---PAGE_BREAK--- + +Constructing Previous Designs. As mentioned be- +fore, our multiprojection can cover most of the previ- +ous design methods. Here is the first example. + +After our first projection with $\vec{d}_4^T = (0, 0, -1, 0)$, $\vec{s}_4^T = (0, 0, -1, 0)$, and + +The following is the 4D DG of the BMA: + +$$P_4 = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & -1 \end{bmatrix}$$ + +Search Window ($\vec{E}_1$) +1, 0, -1, 0 $D_4$ = 0 +0, 1, 0, -1 $D_4$ = 0 + +Current Blocks ($\vec{E}_2$) +0, 0, -1, 0 $D_4$ = 0 +0, 0, 0, -1 $D_4$ = 0 + +Partial Sum of SAD ($\vec{E}_3$) +1, 0, 0, 0 $D_4$ = 0 +0, 1, 0, 0 $D_4$ = 0 + +the SFG will be + +Fig. 12. (a) A 2D BMA systolic design from double-projecting the 4D DG using Eq. (9). (b) The design after the assimilarity rule is applied. (c) The design after the reformation rule is applied (cf. Fig. [8]). (d) The design by applying the degeneration rule. Its timing diagram is shown in Fig. 13. +---PAGE_BREAK--- + +Fig. 13. The timing diagram of the design in Fig. 12(d). + +Fig. 14. (a) The data sets of different current blocks indicates the possibilities of the data reuse. (b) The 5D DG of the BMA. + +
Search Window ($\vec{E}_1$)1, 0, 0$D_3 = 1$
0, 1, 1$D_3 = 0$
Current Blocks ($\vec{E}_2$)0, 0, 0$D_3 = 1$
0, 0, 1$D_3 = 0$
Partial Sum of SAD ($\vec{E}_3$)1, 0, 0$D_3 = 0$
0, 1, 0$D_3 = 0$
+ +If we discard any edges that have delay, then $\vec{E}_1 = (\bar{0}, \bar{1}, \bar{1})$, $\vec{E}_2 = (\bar{0}, \bar{0}, \bar{1})$, $\vec{E}_3 = (\bar{0}, \bar{1}, \bar{0}) \& (\bar{1}, \bar{0}, \bar{0})$. We construct the 3D DG shown in Fig. 2. And, we also construct many previous designs based on the 3D DG. + +If we keep the edges that have delays, then we can reconstruct the design in [8] (cf. Fig. 3(b)) by projecting the SFG one more time with $\vec{d}_3^T = (\bar{0}, \bar{0}, \bar{1})$, $\vec{s}_3^T = (\bar{1}, \bar{0}, \bar{1})$, and + +$$P_3 = \begin{bmatrix} 1 & 0 & 0 \\ 0 & 1 & 0 \end{bmatrix}$$ +---PAGE_BREAK--- + +To ensure processor availability, + +$$M \geq 1 + (N - 1)(\vec{s}_3 \cdot \vec{d}_3) \quad (8)$$ + +where N is the maximal number of nodes along the $\vec{d}_3$-direction in the SFG. Because the index u ranges from $-p$ to $p$, N is $2p+1$. Hence, $M = 2p+1$ and + +$$\left\{ \begin{array}{l} \mathbf{A} = \mathbf{P}_3 \mathbf{P}_4 = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \end{bmatrix} \\ \mathbf{S}^T = \vec{s}_3^T \mathbf{P}_4 + M \vec{s}_4^T = [1, 0, -2p-1, -1]^T \end{array} \right. \quad (9)$$ + +We have + +
Search Window ($\vec{E}_1$)1, 0$D_2 = 2p + 2$
0, 1$D_2 = 1$
+ +
Current Blocks ($\vec{E}_2$)0, 0$D_2 = 2p + 1$
0, 0$D_2 = 1$
+ +
Partial Sum of SAD ($\vec{E}_3$)1, 0$D_2 = 1$
0, 1$D_2 = 0$
+ +as Fig. 12(a) shows the design. + +Design Via Assimilarity and Reformation Rule. This design has a huge amount of buffers although it can catch considerable data reusability. In order to reduce the number of buffers, we can apply the *assimilarity rule*, as suggested in Section 3.1. We make + +all the nodes process the same set of data (s [-p+1, -p+1],..., s[p+n, p+n]), and delete most of the link in the second dimension, as shown in Fig. 12(b). We further apply the *reformation rule* to make the design smaller, and get the design shown in Fig. 12(c), which is identical to the design proposed in [8]. + +In terms of I/O bandwidth requirements, this design is superior to many other designs because the data are input serially and the I/O bandwidth is reduced by one order of magnitude. Shift registers instead of random access memories are used here. Thus, the control is easier, the buffer area is smaller, and the data access rate is higher. (The I/O rate of the current block is only 6% of the rate f the search window. It is relatively easy to manage the data flow of the current date. Therefore, we focus on the I/O requirement of the search window in this paper.) + +However, because of artificial unifying of the input data, some unnecessary data must go through every PE. So, the utilization rate is only 66% when n = 1 and p = 32. + +Design Via Degeneration Rule. Another approach to save buffer for Fig. 12(a) is to apply the *degeneration rule*. As shown in Fig. 12(d), this design can also save a number of buffers as well as keep the processor busy. It has a 77% total utilization rate (include the loading + +Fig. 15. (a) The design, proposed in [14], can be re-delivered by multiprojecting the 5D DG of the BMA with the *assimilarity rule* and the *reformation rule*. (b) A new design can be devised by multiprojecting the 5D DG of the BMA with the degeneration rule. +---PAGE_BREAK--- + +Fig. 16. (a) The data sets of different current blocks (in row-major order) indicates different possibilities of the data reuse. (b) The design with data input in the order of row major. Its timing diagram is shown in Fig. 17. + +Fig. 17. The timing diagram of the design in Fig. 16(b). + +phase and computation), and use only one I/O port for search window. Its timing diagram is shown in Fig. 13. + +As shown in Fig. 14, two contiguous current blocks may share some parts of the search window. + +## 4.2. Multiprojecting 5D DG of BMA + +Increasing the reusability of the data can reduce the I/O and, hence, increase the overall performance. This motivates the introduction of the 5D DG of the BMA. + +Let $x, y$ define the indices of the current blocks in a frame. In the 5D design, we fix $y$ at a constant value. $\vec{E}_4$ is new. $\vec{E}_4$ passes the data of the search window shared by the current blocks of a same $y$. $\vec{E}_1, \vec{E}_2, \vec{E}_3$ are the same as before; more specifically, $\vec{E}_1$ passes the data of the search window for a given current block. + +If we project the 5D DG along $x, u, v$ direction and apply the assimilarity and the reformation rule +---PAGE_BREAK--- + +Fig. 18. (a) The data reusability between current blocks. (b) The core of the 6D DG of the BMA. (The core will be repeated when $0 \le x \le N_v$, $0 \le y \le N_h$, $1 \le i, j \le n$, $-p \le u, v \le p$.) $\vec{E}_1 = (0, 0, 1, 0, -1, 0)$ and $(0, 0, 0, 1, 0, -1)$. $\vec{E}_2 = (0, 0, 0, 0, -1, 0)$ and $(0, 0, 0, 0, -1, 0)$. $\vec{E}_3 = (0, 0, 1, 0, 0, 0)$ and $(0, 0, 0, 1, 0, 0)$. $\vec{E}_4 = (1, 0, 0, 0, -n, 0)$ and $(0, 1, 0, 0, 0, -n)$. + +Fig. 19. The design by multiprojecting the 6D DG of the BMA with the degeneration rule. The basic structure of the processor array is the same as 5D design. Its systolic cache is detailed in Fig. 20. + +to it, we have the same design as proposed in [14] (cf. Fig. 15(a)). By adding some buffers in the chip, we can reuse a major part of the search window without reloading it. The ratio of reused data: $\frac{2p \times (n+2p)}{(n+2p) \times (n+2p)}$. When $n = 16$, $p = 32$, the ratio amounts to about 80% while 4KB on-chip buffer is added. However, this de- + +sign would share the same problem, a low utilization rate, as that in [8] (cf. Fig. 3(b)). + +Fig. 15(b) shows the design after the degeneration rule is applied to 5D DG. It has a 99% total utilization rate (include the loading phase and the computation phase), and uses only one I/O port for search window. + +**Row-Major 5D DG of BMA.** In the previous design, we assume that the BMA will be performed in the column major of the current blocks. However, in MPEG codec, current blocks are coded in the order of the row major. In order to work with current MPEG codec, the previous column-major systolic design may require an extra buffer to save the motion vector information. + +In order to avoid the extra buffer, the data that overlapped between the current blocks in the row major (cf. Fig. 16(a)) is also considered. Because the memory designed for the buffer is in row-major, the data reused between two current blocks become piecewise continuous. Its correspondent design and timing diagram are shown in Fig. 16(b) and 17. + +## 4.3. Multiprojecting 6D DG of BMA + +As the full-frame BMA is 6D (cf. Fig. 4), Fig. 18 shows the 6D DG of the BMA. Let $x_i, y_j$ define the indices of the current blocks in a frame. $\vec{E}_1, \vec{E}_2, \vec{E}_3$ are the same as above. The new feature is that $\vec{E}_4$ now represents inter-block usability shifted in both $x$ and $y$ indices. +---PAGE_BREAK--- + +Fig. 20. The systolic cache of the design shown in Fig. 19: (a) Its timing diagram. (b) The overall picture. (c) The first-level systolic cache. (d) A subcell of second-level systolic cache. (e) The second-level systolic cache. +---PAGE_BREAK--- + +Fig. 21. A seamless design of expandable array processors (cf. Fig 19). + +Table 2. A comparison of several designs. Our algebraic design methodology can handle algorithms with high-dimensional data dependency and thus exploit the maximum degree of data reusability. Our design from multiprojection the 6D DG of the BMA can achieve 99% total utilization rate of the PEs and 96% data reusability rate of the search window. + +
AdvantageDisadvantage
Our design from 4D DG
(By degeneration rule, Fig. 12)
Only one I/O port81% total utilization rate
Our design from 5D DG
(By degeneration rule, Fig. 15)
Only one I/O port
99% total utilization rate
80% data reusability rate
Our design from 6D DG
(By degeneration rule, Fig. 21)
Only one I/O port
99% total utilization rate
96% data reusability rate
Expandable
+ +Special Supporting Memory/Cache/Buffer Design. +Since it is hard to hold all the data in the same chip that +holds the processor array, a small cache is important. +Because the memory access pattern is very regular in +the full search BMA, there is a predetermined way +for best replacement policy of the cache. Eventually, +we can get rid of the tags for the cache between the +main memory and processing unit because we know +(1) where the data should go, (2) which data should +be replaced, and (3) where we should fetch the data. + +Based on this idea, we can design a so-called *systolic cache*—a pre-fetch external cache. + +Fig. 19 shows the extended systolic design for the row-major 6D DG. The schematic design of the *systolic cache* to support such a row-major 6D DG design is detailed in Fig. 20. If the width of a frame $F_h$ is 1024 ($F_h = N_h \times n$) and half of the search window size $p$ is 32, then the size of that cache will be $2p \times F_h = 64K$ cache. + +**LPGS and LSGP for Expandable Design.** In addition to the overlapping between search windows of different current blocks, another important property is that there +---PAGE_BREAK--- + +Fig. 22. (a) The pseudo code of the BMA for a single current block. This pseudo code is exactly the inner four loops as shown in Fig. 4(a). +(b) A single assignment code for the BMA. Every element in SAD[u, v, i, j] array will be assigned to a value only once—as the name +come from. +---PAGE_BREAK--- + +```c +for (i = 1; i <= n; i++) + for (j = 1; j <= n; j++) + { + R[u, -p-1, i, j] = r[i, j]; + S[u, -p-1, i, j] = s[u+i, -p-1+j]; + } + +for (v = -p; v <= p; v++) +{ + SAD[u, v, 0, n] = 0; + for (i = 1; i <= n; i++) + { + SAD[u, v, i, 0] = SAD[u, v, i - 1, n]; + for (j = 1; j <= n; j++) + { + R[u, v, i, j] = R[u, v-1, i, j]; + S[u, v, i, j] = S[u, v-1, i, j+1]; + SAD[u,v,i,j] = SAD[u,v,i,j-1] + | S[u,v,i,j] - R[u,v,i,j] |; + } + } +} +``` + +Fig. 23. A example of the localized recursive BMA. The variable $s[u+i, u+j]$ and $r[i, j]$ in the inner three loop of the single assignment code shown in Fig. 22(b) are replaced by locally-interconnected array $S[u,v,i,j]$ and $R[u,v,i,j]$ respectively. + +Fig. 24. There are two methods for mapping the partitioned DG to an array: locally parallel globally sequential (LPGS) and locally sequential globally parallel (LSGP). + +is **no overlap and no gap** between search windows of different current blocks at any time. The search window data departing one array can be used immediately by another array. The reusable data are taken over naturally by the next array without extra buffers or special links. This design has very high expandabilities. The chips can be cascaded easily without performance lost as shown in Fig. 21. + +## 5. Conclusions + +In this work, we concentrate on an algebraic multiprojection methodology, capable of manipulating an algorithm with high-dimensional data dependence, to design the special data flow for highly reusable data. + +Multiprojecting the 6D DG of the BMA can give us high performance processor array designs with minimum supporting buffers (cf. Table 2). We can achieve very high data reusability rates by simple buffers, e.g., +---PAGE_BREAK--- + +shift registers or cache without tags. The data in the search-window are reused as many times as possible in the SAD computations at different search-positions. Therefore, the problem of the input bandwidth for the search-area data can be alleviated. + +It is desirable to have a chip flexible for different block-sizes and search-ranges so that it can be used in a variety of application systems. The size of buffers and their scheduling could be derived automatically when array processors are designed via multiprojection. + +In addition, the expandability of the array processor design is very important for some practical implementations. The multiprojection can give us the expandability not only for single chip solution but also for the chip array design. + +This work has also been extended to operation placement and scheduling in fine-grain parallel architectures [3]. Because this method exploits cache and communication localities, it results in highly efficient parallel codes. + +# Appendix + +## A.1. Common Systolic Design Approaches + +Several useful transformation techniques have been proposed for mapping the algorithm into parallel and/or pipeline VLSI architecture [11]. There are 3 stages in common systolic design methodology: the first is dependence graph (DG) design, the second is mapping the DG to a signal flow graph (SFG), and the third is design array processor based on the SFG. + +More precisely, a DG is a directed graph, $G =< V, E >$, which shows the dependence of the computations that occur in an algorithm. Each operation will be represented as one node, $\zeta \in V$, in the graph. The dependence relation will be shown as an arc, $\vec{e} \in E$, between the corresponding operations. A DG can be also considered as the graphical representation of a single assignment algorithm. Our approach to the construction of a DG will be based on the space-time indices in the recursive algorithm: Corresponding to the space-time index space in the recursive algorithm, there is a natural lattice space (with the same indices) for the DG, with one node residing on each grid point. Then the data dependencies in the recursive algorithm may be explicitly expressed by the arcs connecting the interacting nodes in the DG, while its functional description will be embedded in the nodes. A high-dimensional + +looped algorithm will lead to a high-dimensional DG. For example, the BMA for a single current block is a 4-dimensional recursive algorithm [22]. + +A complete SFG description includes both functional and structural description parts. The functional description defines the behavior within a node, whereas the structural description specifies the interconnection (edges and delays) between the nodes. The structural part of an SFG can be represented by a finite directed graph, $G =< V, E, D(E) >$ since the SFG expression consists of processing nodes, communicating edges, and delays. In general, a node, $\zeta \in V$, represents an arithmetic or logic function performed with zero delay, such as multiplication or addition. The directed edges $\vec{e} \in E$ model the interconnections between the nodes. Each edge $\vec{e}$ of $E$ connects an output port of a node to an input port of some node and is weighted with a delay count $D(\vec{e})$. The delay count is determined by the timing and is equal to the number of time steps needed for the corresponding arcs. Often, input and output ports are refereed to as sources and sinks, respectively. + +Since a complete SFG description should include both functional description (defines the behavior within a node) and structural description (specifies the interconnection—edges and delays—between the nodes), we can easily transform an SFG into a systolic array, wavefront array, SIMD, or MIMD. Therefore, most research is on how to transfer a DG to an SFG in the systolic design methodology. + +There are two basic considerations for mapping from a DG to an SFG: + +1. **Placement:** To which processors should operations be assigned? (A criterion might be to minimize communication/exchange of data between processors.) + +2. **Scheduling:** In what ordering should the operations be assigned to a processor? (A criterion might be to minimize total computing time.) + +Two steps are involved in mapping a DG to an SFG array. The first step is the processor assignment. Once the processor assignment is fixed, the second step is the scheduling. The allowable processor and schedule assignments can be quite general; however, in order to derive a regular systolic array, linear assignments and scheduling attract more attention. + +*Processor Assignment.* Processor assignment decides which processor is going to execute which node in the DG. A processor could carry out the opera- +---PAGE_BREAK--- + +tions of a number of nodes. For example, a projection method may be applied, in which nodes of the DG along a straight line are assigned to a common processing element (PE). Since the DG of a locally recursive algorithm is regular, the projection maps the DG onto a lower dimensional lattice of points, known as the processor space. Mathematically, a linear projection is often represented by a projection vector $\vec{d}$. The mapping assigns the node activities in the DG to processors. The index set of nodes of the SFG are represented by the mapping + +$$ \mathbf{P}: I^n \rightarrow I^{n-1} $$ + +where $I^n$ is the index set of the nodes of the DG, and $I^{n-1}$ is the Cartesian product of (n-1) integers. The mapping of a computation $\mathcal{C}_i$ in the DG onto a node $\underline{n}$ in the SFG is found by: + +$$ \underline{n}(\mathcal{C}_i) = \mathbf{P}\mathcal{C}_i $$ + +where $\underline{n}(\cdot)$ denotes the mapping function from a node in the DG to a node in the SFG, and the processor basis $\mathbf{P}$, denoted by an $(n-1) \times n$ matrix, is orthogonal to $\vec{d}$. Mathematically, + +$$ \vec{d}^T \mathbf{P} = 0 $$ + +This mapping also maps the arcs of the DG to the edges of the SFG. The set of edges $\vec{m}(\vec{e})$ into each node of the SFG is derived from the set of dependence edges $\vec{e}$ at each point in the DG by + +$$ \vec{m}(\vec{e}_i) = \mathbf{P}\vec{e}_i $$ + +where $\vec{m}(\cdot)$ denotes the mapping function from an edge in the DG to an edge in the SFG. + +In this paper, bold face letters (e.g., $\mathbf{P}$) represent matrices. Overhead arrows represent an $n$-dimensional vector, written as an $n \times 1$ matrix, e.g., $\vec{e}_i$ (a dependency arc in the DG) and $\vec{m}(\vec{e}_i)$ (an SFG dependency edge that comes for the $\vec{e}_i$). An $n$-tuple (a point in $n$-dimensional space), written as an $n \times 1$ matrix, is represented by underlined letters, e.g., $\mathcal{C}_i$ (a computation node in the DG) and $\underline{n}(\mathcal{C}_i)$ (an SFG computation node that comes from $\mathcal{C}_i$). + +**Scheduling.** The projection should be accompanied by a scheduling scheme, which specifies the sequence of the operations in all the PEs. A schedule function represents a mapping from the $n$-dimensional index space of the DG onto a 1D scheduling time space. A linear schedule is based on a set of parallel and uni- + +formly spaced hyper-planes in the DG. These hyper-planes are called equi-temporal hyper-planes—all the nodes on the same hyper-plane must be processed at the same time. Mathematically, the schedule can be represented by a schedule vector (column vector) $\vec{s}$, pointing to the normal direction of the hyper-planes. The scheduling of a computation $\mathcal{C}$ in the DG on a node $\underline{n}$ in the SFG is found by: + +$$ T(\underline{n}) = \vec{s}^T \underline{n} $$ + +where $T(\cdot)$ denotes the timing function of a node in the DG to the execution time of the processor in the SFG. + +The delay $D(\vec{e})$ on every edge is derived from the set of dependence edges $\vec{e}$ at each point in the DG by + +$$ D(\vec{e}_i) = \vec{s}^T \vec{e}_i $$ + +where $D(\cdot)$ denotes the timing function of an edge in the DG to the delay of the edge in the SFG. + +**Permissible Linear Schedules.** There is a partial ordering among the computations, inherent in the algorithm, as specified by the DG. For example, if there is a directed path from node $\mathcal{C}_x$ to node $\mathcal{C}_y$, then the computation represented by node $\mathcal{C}_y$ must be executed after the computation represented by node $\mathcal{C}_x$ is completed. The feasibility of a schedule is determined by the partial ordering and the processor assignment scheme. + +The necessary and sufficient conditions are stated below: + +1. $\vec{s}^T \vec{e} \ge 0$, for any dependence arc $\vec{e}$. $\vec{s}^T \vec{e} \neq 0$, for non-broadcast data. + +2. $\vec{s}^T \vec{d} > 0$. + +The first condition stands for data availability and states that the precedent computation must be completed before the succeeding computation starts. Namely, if node $\mathcal{C}_y$ depends on node $\mathcal{C}_x$, then the time step assigned for $\mathcal{C}_y$ can not be less than the time step assigned for $\mathcal{C}_x$. The first condition means that the causality should be enforced in a permissible schedule. But, if a datum is used by many operations in the DG (read-after-read data dependencies), the causality constraint could be a little bit different. As popularly adopted, the same data value is broadcast to all the operation nodes. The data are called *broadcast data*. In this case, there is no delay required. Alternatively, the same data may be propagated step by step via local +---PAGE_BREAK--- + +arcs without being modified to all the nodes. This kind of data, which is propagated without being modified, is called *transmittent data*. There should be at least one delay for transmittent data. + +The second condition stands for processor availability, i.e., 2 computation nodes cannot be executed in the same time if they are mapped into the same processor element. The second condition implies that nodes on an equi-temporal hyper-plane should not be projected to the same PE. In short, the schedule is permissible if and only if (1) all the dependency arcs flow in the same direction across the hyper-planes; and (2) the hyper-planes are not parallel with projection vector $\vec{d}$. + +In general, the projection procedure involves the following steps: + +1. For any projection direction, a processor space is orthogonal to the projection direction. A processor array may be obtained by projecting the index points to the processor space. + +2. Replace the arcs in the DG with zero or nonzero delay edges between their corresponding processors. The delay on each edge is determined by the timing and is equal to the number of time steps needed for the corresponding arcs. + +3. Since each node has been projected to a PE and each input (or output) data is connected to some nodes, it is now possible to attach the input and output data to their corresponding processors. + +## A.2. The Transformation of DG + +Besides the direction of the projection and the schedule, the choice of a particular DG for an algorithm can greatly affect the performance of the resulting array. The following are the two most common transformations of the DG seen in the literature: + +### Reindexing + +A useful technique for modifying the DG is to apply a coordinate transformation to the index space (called *reindexing*). Examples for reindexing are plane-by-plane shifting or circular shifting in the index space. For instance, when there is no permissible linear schedule or systolic schedule for the original DG, it is often desirable to modify the DG so that such a desired schedule may be obtained. The effect of this method is equivalent to the re-timing method [13]. + +### Localized dependence graph + +A locally recursive algorithm is an algorithm whose corresponding DG has only local dependencies—all variables are (directly) dependent upon the variables of neighboring nodes only. The length of each dependency arc is independent of the problem size. + +On the other hand, a non-localized recursive algorithm has global interconnections/dependencies. For example, a same datum will be used by many operations, i.e., the same data value will repeatedly appear in a set of index points in the recursive algorithm or DG. As popularly adopted, the operation nodes receive the datum by broadcasting. The data are called *broadcast data* and this set is termed a broadcast contour. Such a non-localized recursive algorithm, when mapped onto an array processor, is likely to result in an array with global interconnections. + +In general, global interconnections are more expensive than localized interconnections. In certain instances, such global arcs can be avoided by using a proper projection direction in the mapping schemes. To guarantee a locally interconnected array, a localized recursive algorithm would be derived (and, equivalently, a localized DG). In many cases, such broadcasting can be avoided and replaced by local communication. For example, in Fig. 23, the variable $s[u+i, u+j]$ and $r[i, j]$ in the inner three loops of the BMA (cf. Fig. 22(b)) are replaced by local variables $s[u,v,i,j]$ and $r[u,v,i,j]$ respectively. The key point is that instead of broadcasting the (public) data along a global arc, the same data may be propagated step by step via local arcs without being modified to all the nodes. This kind of data, which is propagated without being modified, is called *transmittent data*. + +## A.3. General Formulation of Optimization Problems + +It takes more efforts to find an optimal and permissible linear scheduling than it does to find a permissible linear scheduling. In this section, we show how to derive an optimal design. + +*Optimization Criteria.* Optimization plays an important role in implementing systems. In terms of parallel processing, there are many ways to evaluate of a de- +---PAGE_BREAK--- + +sign: one is to measure by the completion time (T), another one is to measure by the product of the VLSI chip area and the completion time (A × T) [12]. In general, the optimization problems can be categorized into: + +1. To find a best scheduling that minimizes the execution time, for given constraints on the number of processing units [25]. + +2. To minimize the cost (area, power, etc.) under certain given timing constraints [19]. + +In either case, such tasks are proved to be NP-hard. In this paper, we focus on how to find an optimal schedule given an array structure—the timing is an optimization goal, not a constraint. + +**Basic Formula.** First, we know that the computation time of a systolic array can be written as + +$$T = \max_{\mathcal{L}_x, \mathcal{L}_y} \{\vec{s}^T (\mathcal{L}_x - \mathcal{L}_y)\} + 1$$ + +where $\mathcal{L}_x$ and $\mathcal{L}_y$ are two computation nodes in the DG. + +The optimization problem becomes the following min-max formulation: + +$$\vec{s}_{op} = \arg \left[ \min_{\vec{s}} \left[ \max_{\mathcal{L}_x, \mathcal{L}_y} \{\vec{s}^T (\mathcal{L}_x - \mathcal{L}_y)\} + 1 \right] \right]$$ + +under the following two constraints: $\vec{s}^T \vec{d} > 0$ and $\vec{s}^T \vec{e} > 0$, for any dependence arc $\vec{e}$. + +The minimal computation time schedule $\vec{s}$ can be found by solving the proper integer linear programming [12, 21, 25] or quadratic programming [26]. + +### A.4. Partitioning Methods + +As DSP systems grow too complex to be contained in a single chip, partitioning is used to design a system into multi-chip architectures. In general, the mapping scheme (including both the node assignment and scheduling) will be much more complicated than the regular projection methods discussed in the previous sections because it must optimize chip area while meeting constraints on throughput, input/output timing and latency. The design takes into consideration I/O pins, inter-chip communication, control overheads, and tradeoff between external communication and local memory. + +For a systematic mapping from the DG onto a systolic array, the DG is regularly partitioned into many blocks, each consisting of a cluster of nodes in the DG. As shown in Fig. 24, there are two methods for mapping the partitioned DG to an array: the locally sequential globally parallel (LSGP) method and the locally parallel globally sequential (LPGS) method [11]. + +For convenience of presentation, we adopt the following mathematical notations. Suppose that an $n$-dimensional DG is linear projected to an $(n-1)$-dimensional SFG array of size $L_1 \times L_2 \times \cdots \times L_{n-1}$. The SFG is partitioned into $M_1 \times M_2 \times \cdots \times M_{n-1}$ blocks, where each block is of size $Z_1 \times Z_2 \times \cdots \times Z_{n-1}$. $Z_i = L_i/M_i$ for $i \in \{1, 2, \cdots, n-1\}$, + +**Allocation.** + +1. In the LSGP scheme, one block is mapped to one PE. Each PE sequentially executes the nodes of the corresponding block. The number of blocks is equal to the number of PEs in the array, i.e., the array size equals to the product $M_1 \times M_2 \times \cdots \times M_{n-1}$. + +2. In the LPGS scheme, the block size is chosen to match the array size, i.e., one block can be mapped to one array. All nodes within one block are processed concurrently, i.e., locally parallel. One block after another block of node data is loaded into the array and processed in a sequential manner, i.e., globally sequential. + +**Scheduling.** In LSGP, after processor allocation, from the processor sharing perspective, there are $Z_1 \times Z_2 \times \cdots \times Z_{n-1}$ nodes in each block in the SFG, which share one PE. An acceptable (i.e., sufficiently slow) schedule is chosen so that at any instant there is at most one active PE in each block. + +As to the scheduling scheme for the LPGS method, a general rule is to select a (global) scheduling that does not violate the data dependencies. Note that the LPGS design has the advantage that blocks can be executed one after another in a natural order. However, this simple ordering is valid only when there is no reverse data dependence for the chosen blocks. + +**Generalized Partitioning Method.** A unified partitioning and scheduling scheme is proposed for LPGS and LSGP in [9]. The main contribution includes a unified partitioning model and a systematic two-level scheduling scheme. The unified partitioning model can support LPGS and LSGP design in the same manner. +---PAGE_BREAK--- + +The systematic two-level scheduling scheme can spec- +ify the intra-processor schedule and inter-processor +schedule independently. Hence, more inter-processor +parallelism can be effectively explored. + +A general frame work for processing mapping is also +proposed in [17, 18]. + +Optimization for Partitioning. The problem of find- +ing an optimal (or reasonably small) schedule is a NP- +hard problem. A systematic methodology for optimal +partitioning is described in [23]. + +Acknowledgements + +This work was supported in part by Sarnoff Research +Center, Mitsubishi Electric, and the George Van Ness +Lothrop Honorific Fellowship. + +References + +1. J. Baek, S. Nam, M. Lee, C. Oh, and K. Hwang, "A Fast Array Architecture for Block Matching Algorithm," *Proc. of IEEE Symposium on Circuits and Systems*, vol. 4, pp. 211–214, 1994. +2. S. Chang, J.-H. Hwang, and C.-W. Jen, "Scalable Array Architecture Design for Full Search Block Matching," *IEEE Trans. on Circuits and Systems for Video Technology*, vol. 5, no. 4, pp. 332–343, Aug. 1995. +3. Y.-K. Chen and S. Y. Kung, "An Operation Placement and Scheduling Scheme for Cache and Communication Localities in Fine-Grain Parallel Architectures," in *Proc. of Int'l Symposium on Parallel Architectures, Algorithms and Networks*, pp. 390–396, Dec. 1997. +4. L. De Vos, "VLSI-architectures for the Hierarchical Block-Matching Algorithm for HDTV Applications," *SPIE Visual Communications and Image Processing*, vol. 1360, pp. 398–409, 1990. +5. L. De Vos and M. Stegherr, "Parameterizable VLSI Architectures for Full-Search Block-Matching Algorithm," *IEEE Trans. on Circuits and Systems*, vol. 36, no. 10, pp. 1309–1316, Oct. 1989. +6. D. Le Gall, "MPEG: A Video Compression Standard for Multimedia Applications," *Communications of the ACM*, vol. 34, no. 4, Apr. 1991. +7. K. Guttag, R. J. Gove, and J. R. V. Aken, "A Single-Chip Multiprocessor For Multimedia: The MVP," *IEEE Computer Graphics & Applications*, vol. 11, no. 6, pp. 53–64, Nov. 1992. +8. C.-H. Hsieh and T.-P. Lin, "VLSI Architecture for Block-Matching Motion Estimation Algorithm," *IEEE Trans. on Circuits and Systems for Video Technology*, vol. 2, no. 2, pp. 169–175, June 1992. +9. Y.-T. Hwang and Y.-H. Hu, "A Unified Partitioning and Scheduling Scheme for Mapping Multi-Stage Regular Iterative Algorithms onto Processor Arrays," *Journal of VLSI Signal Processing Applications*, vol. 11, pp. 133–150, Oct. 1995. + +10. T. Komarek and P. Pirsch, "Array Architectures for Block Matching Algorithms," *IEEE Trans. on Circuits and Systems*, vol. 36, no. 10, pp. 1301-1308, Oct. 1989. +11. S. Y. Kung, *VLSI Array Processors*. Englewood Cliffs, NJ: Prentice Hall, 1988. +12. G.-J. Li and B. W. Wah, "The Design of Optimal Systolic Array," *IEEE Trans. on Computer*, vol. 34, no. 1, pp. 66-77, Jan. 1985. +13. N. L. Passos and E. H.-M. Sha, "Achieving Full Parallelism Using Multidimensional Retiming," *IEEE Trans. on Parallel and Distributed Systems*, vol. 7, no. 11, pp. 1150-1163, Nov. 1996. +14. P. Pirsch, N. Demassieux, and W. Gehrke, "VLSI Architectures for Video Compression-A Survey," *Proceedings of the IEEE*, vol. 83, no. 2, pp. 220-246, Feb. 1995. +15. F. Sijstermans and J. van der Meer, "CD-1 Full-Motion Video Encoding on a Parallel Computer," *Communications of the ACM*, vol. 34, no. 4, pp. 81-91, Apr. 1991. +16. M.-T. Sun, "Algorithms and VLSI Architectures for Motion Estimation," *VLSI Implementations for Image Communications*, pp. 251-282, 1993. +17. J. Teich and L. Thiele, "Partitioning of Processor Arrays: a Piecewise Regular Approach," *INTEGRATION: The VLSI Journal*, vol. 14, no. 3, pp. 297-332, 1993. +18. J. Teich, L. Thiele, and L. Zhang, "Partitioning Processor Arrays under Resource Constraints," *Journal of VLSI Signal Processing*, vol. 17, no. 1, pp. 5-20, Sept. 1997. +19. W.F. Verhaegh, P.E. Lippens, E.H.Aarts, J.H.Korst,J.L.van Meerbergen,and A.van der Werf,"Improved Force-directed Scheduling in High-throughput Digital Signal Processing,"*IEEE Trans.on Computer-Aided Design of Integrated Circuits and Systems*, vol. 14, no. 8, pp. 945-960,Aug 1995. +20.B.-M.Wang,J.-C.Yen.,and S.Chang,"Zero Waiting-Cycle Hierarchical Block Matching Algorithm and its Array Architectures,"*IEEE Trans.on Circuits and Systemsfor Video Technology*, vol. 4, no. 4, pp. 18-28, Feb. 1994. +21.Y.Wong and J.-M.Delosme,"Optimization of Computation Time for Systolic Array,"*IEEE Trans.on Computer*, vol. 41, no. 2, pp. 159-177, Feb. 1992. +22.H.Yeo and Y.-H.Hu,"A Novel Modular Systolic Array Architecture for Full-Search Block Matching Motion Estimation","*IEEE Trans.on Circuits and Systems for Video Technology*, vol. 5, no. 5, pp. 407-416, Oct. 1995. +23.K.-H.Zimmermann,"A Unifying Lattice-Based Approach for the Partitioning of Systolic Arrays via LPGS and LSGP,"*Journal of VLSI Signal Processing*, vol. 17, no. 1, pp. 21-47, Sept. 1997. +24.K.-H.Zimmermann,"Linear Mappings of n-Dimensional Uniform Recurrences onto k-Dimensional Systolic Array","*Journal of Signal Processing System for Signal, Image, and Video Technology*, vol. 12, no. 2, pp. 187-202, May 1996. +25.K.-H.Zimmermann and W.Achtziger,"Finding Space-Time Transformations for Uniform Recurrences via Branching Parametric Linear Programming","*Journal of VLSI Signal Processing*, vol. 15, no. 3, pp. 259-274, 1997. +26.K.-H.Zimmermann and W.Achtziger,"On Time Optimal Implementation of Uniform Recurrences onto Array Processors via Quadratic Programmin","*Journal of VLSI Signal Processing*, vol. 19, no. 1, pp. 19-38, 1998. + diff --git a/samples_new/texts_merged/7089754.md b/samples_new/texts_merged/7089754.md new file mode 100644 index 0000000000000000000000000000000000000000..cf3a48897031c7bd303ab1be7d3f8fe77de2935f --- /dev/null +++ b/samples_new/texts_merged/7089754.md @@ -0,0 +1,1254 @@ + +---PAGE_BREAK--- + +# HOMOLOGY FOR HIGHER-RANK GRAPHS AND TWISTED $C^*$-ALGEBRAS + +ALEX KUMJIAN, DAVID PASK, AND AIDAN SIMS + +**ABSTRACT.** We introduce a homology theory for $k$-graphs and explore its fundamental properties. We establish connections with algebraic topology by showing that the homology of a $k$-graph coincides with the homology of its topological realisation as described by Kaliszewski et al. We exhibit combinatorial versions of a number of standard topological constructions, and show that they are compatible, from a homological point of view, with their topological counterparts. We show how to twist the $C^*$-algebra of a $k$-graph by a $\mathbb{T}$-valued 2-cocycle and demonstrate that examples include all noncommutative tori. In the appendices, we construct a cubical set $\tilde{Q}(\Lambda)$ from a $k$-graph $\Lambda$ and demonstrate that the homology and topological realisation of $\Lambda$ coincide with those of $\tilde{Q}(\Lambda)$ as defined by Grandis. + +## 1. INTRODUCTION + +In this paper we initiate the study of homology for higher-rank graphs. We develop a suite of fundamental results and techniques, and also establish connections with a number of related areas: Via the topological realisations of $k$-graphs introduced in [21], we establish connections with the cubical approach to algebraic topology used in [30]. We also show in an appendix how our approach connects the theory of $k$-graphs to the theory of cubical sets discussed in, for example, [5, 13, 14, 15, 19]. Our key motivation, however, is that our homology theory and in particular the associated cohomology theory promises to have an interesting application to $C^*$-algebras. We discuss this application in Section 7: we introduce the cohomology theory corresponding to our homology and show that $\mathbb{T}$-valued 2-cocycles on a $k$-graph can be used to twist its $C^*$-algebra. As examples we obtain all noncommutative tori and the Heegaard-type quantum 3-spheres of Baum, Hajac, Matthes and Szymański (see [1]). A more detailed study of the cohomology of $k$-graphs and the structure theory of the associated $C^*$-algebras will be the subject of future work. + +Higher-rank graphs, or $k$-graphs, were introduced by the first two authors in [25] as a combinatorial model for the higher-rank Cuntz-Krieger algebras discovered and analysed by Robertson and Steger [38], and to unify the constructions of many other interesting $C^*$-algebras [24]. The $C^*$-algebras of higher-rank graphs have been studied by numerous authors over the last decade (see, for example, [6, 7, 10, 11, 40, 41, 43]). + +The combinatorial properties of a $k$-graph suggest a sort of $k$-dimensional directed graph, and this point of view has been borne out in numerous ways in the study of $k$-graph $C^*$-algebras. More recently, however, it has begun to suggest relationships with topology. + +*Date:* 7 October 2011. + +*2010 Mathematics Subject Classification.* Primary 46L05; Secondary 18G60, 55N10. + +*Key words and phrases.* higher-rank graph; $C^*$-algebra; homology; cubical set; topological realization. + +This research was supported by the ARC. Part of the work was completed while the first author was employed at the University of Wollongong on the ARC grant DP0984360. +---PAGE_BREAK--- + +These connections first arose in [33, 34] where a theory of coverings and a notion of fundamental group for $k$-graphs was developed. These notions closely parallel the topological theory, but were motivated by $C^*$-algebraic considerations: the authors demonstrated that coverings of $k$-graphs correspond to relative skew products which in turn correspond to coaction crossed products and crossed products by homogeneous spaces. + +The topological flavour of some of the results of [33, 34] suggest that each $k$-graph should have a topological realisation, which would be a $k$-dimensional CW complex, and that the $k$-graph could profitably be viewed as a combinatorial version of its topological realisation [33, Section 6]. Current work of the first and third authors with Kaliszewski and Quigg [21] bears this idea out, showing in particular that the fundamental groups of a $k$-graph and of its topological realisation are isomorphic and that many well-known $k$-graph constructions are well-behaved with respect to fundamental groups. + +In the current paper, we expand on this idea further by commencing the study of homology of higher-rank graphs. After recalling basic definitions and notation in Section 2, we proceed in Section 3 to define our homology, prove that it is a functor, show that we can measure connectedness by the 0-th homology group, and show that the 1-cycles correspond naturally to integer combinations of undirected cycles in the $k$-graph. + +In Section 4, we prove analogs of a number of standard theorems in algebraic topology for our homology. For example we show that the Künneth formula holds for the homology of a cartesian product of higher-rank graphs, and that the homology of the quotient of an acyclic $k$-graph by a free action of a discrete group $G$ is isomorphic to the homology of $G$. We also show that every automorphism of a $k$-graph induces a long exact sequence in homology which corresponds exactly to the long exact sequence for a mapping torus. + +In Section 5, we use a combination of these results and direct calculation to describe examples of 2-graphs whose homology is identical to that of the sphere, the torus, the Klein bottle and the projective plane respectively; we also present these examples in a way which indicates that their topological realisations should coincide with these four spaces. Details of these homeomorphisms will appear in [21]. In Section 6, we use an argument based on that given by Hatcher for simplicial complexes and singular homology [17], to show that our homology for a $k$-graph agrees with the singular homology of its topological realisation. This suggests strongly that our homology theory is a reasonable one for $k$-graphs. + +Section 7 gives a taste of the $C^*$-algebraic application which motivates our study of homology for $k$-graphs: twisted $k$-graph $C^*$-algebras. We briefly discuss the cohomology of a higher-rank graph and check that it satisfies the Universal Coefficient Theorem. We introduce the notion of the $C^*$-algebra of higher-rank graph twisted by a T-valued 2-cocycle, and show that the isomorphism class of the $C^*$-algebra depends only on the cohomology class of the cocycle. We then consider some basic examples of finite $k$-graphs whose twisted $C^*$-algebras capture the noncommutative tori and the Heegaard-type quantum 3-spheres of [1]. + +Our homology is modeled on the cubical version of singular homology in [30] and is closely related to the homology of a cubical set introduced by Grandis [14]. We establish in Appendix A that a $k$-graph $\Lambda$ determines a cubical set $\tilde{Q}(\Lambda)$, and that our homology of $\Lambda$ is isomorphic to Grandis' homology of $\tilde{Q}(\Lambda)$. Hence, in principle, some of our earlier results (Theorem 4.9 and part of the statement of Theorem 4.3) could be recovered from +---PAGE_BREAK--- + +Grandis'. However we provide a self-contained treatment avoiding unnecessary complications involving degeneracy maps: we believe that the resulting simplicity of presentation justifies our approach. We demonstrate in Appendix B, that the topological realisation of a *k*-graph as described in [21] is homeomorphic to the topological realisation, outlined in [14], of the associated cubical set. + +**Acknowledgements.** The idea that homology of *k*-graphs might be of interest first arose from the study of topological realizations (see [21, 33]), which was suggested by John Quigg. We thank Mike Whittaker for a number of helpful discussions and in particular for his contributions to Examples 5.7 and 5.6. The second author thanks his coauthors for their hospitality. + +## 2. PRELIMINARIES + +As in [27], in our definition of a *k*-graph we will allow for the possibility of 0-graphs with the convention that $\mathbb{N}^0$ is the trivial semigroup $\{0\}$. We insist that all *k*-graphs are nonempty. + +We adopt the conventions of [27, 33] for *k*-graphs. Given a nonnegative integer *k*, a *k*-graph is a nonempty countable small category $\Lambda$ equipped with a functor $d : \Lambda \to \mathbb{N}^k$ satisfying the factorisation property: for all $\lambda \in \Lambda$ and $m, n \in \mathbb{N}^k$ such that $d(\lambda) = m+n$ there exist unique $\mu, \nu \in \Lambda$ such that $d(\mu) = m$, $d(\nu) = n$, and $\lambda = \mu\nu$. When $d(\lambda) = n$ we say $\lambda$ has degree *n*. We often use the same symbol $d$ to denote the degree functor in all *k*-graphs in this paper. + +For $k \ge 1$, the standard generators of $\mathbb{N}^k$ are denoted $e_1, \dots, e_k$, and for $n \in \mathbb{N}^k$ and $1 \le i \le k$ we write $n_i$ for the $i^{th}$ coordinate of $n$. For $n = (n_1, \dots, n_k) \in \mathbb{N}^k$ let $|n| = \sum_{i=1}^k n_i$. If $\Lambda$ is a *k*-graph, then for $\lambda \in \Lambda$, we write $\lambda$ for $|d(\lambda)|$. For $m, n \in \mathbb{N}^k$ we write $m \le n$ if $m_i \le n_i$ for all $i \le k$. We often implicitly identify $\mathbb{N}^{k_1+k_2} = \mathbb{N}^{k_1} \times \mathbb{N}^{k_2}$. + +Given a *k*-graph $\Lambda$ and $n \in \mathbb{N}^k$, we write $\Lambda^n$ for $d^{-1}(n)$. The vertices of $\Lambda$ are the elements of $\Lambda^0$. The factorisation property implies that $o \mapsto \text{id}_o$ is a bijection from the objects of $\Lambda$ to $\Lambda^0$. The domain and codomain maps in the category $\Lambda$ therefore determine maps $s, r : \Lambda \to \Lambda^0$: for $\alpha \in \Lambda$, the source $s(\alpha)$ of $\alpha$ is the identity morphism associated with the object $\text{dom}(\alpha)$ and similarly, $r(\alpha) = \text{id}_{\text{cod}(\alpha)}$. An edge in a *k*-graph is a morphism $f$ with $d(f) = e_i$ for some $i = 1, \dots, k$. In keeping with graph terminology an element $\lambda \in \Lambda$ is often called a *path*. + +A 0-graph is then a countable category whose only morphisms are the identity morphisms, which we regard as a collection of isolated vertices. + +Each 1-graph $\Lambda$ is the path-category of the directed graph with vertices $\Lambda^0$ and edges $\Lambda^1$ and range and source maps inherited from $\Lambda$. Conversely, if $E$ is a directed graph, then its path-category $E^*$ is a 1-graph under the length function. This leads to the unusual convention that a path in $E$ is a sequence of edges $\alpha_1 \cdots \alpha_n$ such that $s(\alpha_i) = r(\alpha_{i+1})$ for all $i$, and we write $r(\alpha) = r(\alpha_1)$ and $s(\alpha) = s(\alpha_n)$. + +Let $\lambda$ be an element of a *k*-graph $\Lambda$ and suppose that $0 \le m \le n \le d(\lambda)$. By the factorisation property there exist unique elements $\alpha \in \Lambda^m$, $\beta \in \Lambda^{n-m}$ and $\gamma \in \Lambda^{d(\lambda)-n}$ such that $\lambda = \alpha\beta\gamma$. We define $\lambda(m,n) := \beta$. We then have $\lambda(0,m) = \alpha$ and $\lambda(n,d(\lambda)) = \gamma$. In particular, for $0 \le m \le d(\lambda)$, + +$$\lambda = \lambda(0, m)\lambda(m, d(\lambda)).$$ + +For $v \in \Lambda^0$ and $E \subset \Lambda$, we write $vE$ for $E \cap r^{-1}(v)$ and $Ev$ for $E \cap s^{-1}(v)$. +---PAGE_BREAK--- + +**Definition 2.1** ([25, Definition 5.1] (see also [34))). Let $G$ be a discrete group, $(\Lambda, d)$ a $k$-graph and $c : \Lambda \to G$ a functor. The skew product $k$-graph $\Lambda \times_c G$ is defined as follows: as a set $\Lambda \times_c G$ is the cartesian product $\Lambda \times G$ and $d(\lambda, g) = d(\lambda)$ (so $(\Lambda \times_c G)^0 = \Lambda^0 \times G$) with + +$$s(\lambda, g) = (s(\lambda), gc(\lambda)) \quad \text{and} \quad r(\lambda, g) = (r(\lambda), g).$$ + +If $s(\lambda) = r(\mu)$ then $(\lambda, g)$ and $(\mu, gc(\lambda))$ are composable in $\Lambda \times_c G$ and + +$$ (2.1) \qquad (\lambda, g)(\mu, gc(\lambda)) = (\lambda\mu, g). $$ + +**Examples 2.2.** (1) For $k \ge 0$ let $T_k = \mathbb{N}^k$ regarded as a $k$-graph with $d : T_k \to \mathbb{N}^k$ the identity map. So $T_k$ has exactly one morphism of degree $n$ for each $n \in \mathbb{N}^k$, and in particular a single vertex 0. For $k \ge 1$, $T_k$ is generated by the $k$ commuting elements, $e_1, \dots, e_k$. + +(2) For $n \ge 1$ let $B_n$ be the path category of the directed graph with one vertex and $n$ distinct edges $f_1, \dots, f_n$. We refer to $B_n$ as the 1-graph associated to the bouquet of $n$-circles (see Example 4.11(1)). + +(3) For $n \ge 2$ let $\mathbb{F}_n$ be the free group on $n$ generators $\{h_1, \dots, h_n\}$ and define the functor $c : B_n \to \mathbb{F}_n$ by $c(f_i) = h_i$ for $i = 1, \dots, n$. Let $A_n$ denote the skew product 1-graph $B_n \times_c \mathbb{F}_n$. The underlying directed graph associated to $A_n$ is the (right) Cayley graph of $\mathbb{F}_n$ and may be visualised as a uniform $n$-ary tree. + +(4) For $k \ge 1$ and $m \in (\mathbb{N} \cup \{\infty\})^k$, we write $\Omega_{k,m}$ for the $k$-graph with + +$$ \Omega_{k,m} := \{(p,q) \in \mathbb{N}^k \times \mathbb{N}^k : p \le q \le m\} $$ + +and with structure maps $r(p,q) := (p,p)$, $s(p,q) := (q,q)$, $d(p,q) := q-p$ and $(p,q)(q,r) := (p,r)$. Define $\Omega_0 := \{0\}$ and for $k \ge 1$ let $\Omega_k := \Omega_{k,(\infty,\dots,\infty)}$. + +(5) For $k \ge 1$, let $\Delta_k$ be the $k$-graph with $\Delta_k := \{(p,q) \in \mathbb{Z}^k \times \mathbb{Z}^k : p \le q\}$ and structure maps as in $\Omega_{k,m}$. + +(6) Let $(\Lambda_i, d_i)$ be a $k$-graph for $i=1,2$. The disjoint union $\Lambda_1 \sqcup \Lambda_2$ may be regarded as a $k$-graph with $d(\lambda) = d_i(\lambda)$ if $\lambda \in \Lambda_i$ and with other structure maps likewise inherited from the $\Lambda_i$. + +(7) Let $(\Lambda_i, d_i)$ be a $k_i$-graph for $i=1,2$. Then $(\Lambda_1 \times \Lambda_2, d_1 \times d_2)$ is a $(k_1+k_2)$-graph where $\Lambda_1 \times \Lambda_2$ is the product category and $d_1 \times d_2 : \Lambda_1 \times \Lambda_2 \to \mathbb{N}^{k_1+k_2}$ is given by $(d_1 \times d_2)(\lambda_1, \lambda_2) = (d_1(\lambda_1), d_2(\lambda_2)) \in \mathbb{N}^{k_1} \times \mathbb{N}^{k_2}$ for $\lambda_1 \in \Lambda_1$ and $\lambda_2 \in \Lambda_2$. + +Let $k_1, k_2 \ge 1$. Let $\pi_1 : \mathbb{Z}^{k_1+k_2} \to \mathbb{Z}^{k_1}$ denote the projection onto the first $k_1$ coordinates and $\pi_2 : \mathbb{Z}^{k_1+k_2} \to \mathbb{Z}^{k_2}$ denote the projection onto the last $k_2$ coordinates. We frequently regard $\pi_i$ as a homomorphism from $\mathbb{N}^{k_1+k_2}$ to $\mathbb{N}^{k_i}$. + +A *$k$-graph morphism* between $k$-graphs is a degree-preserving functor. There is a category whose objects are $k$-graphs and whose morphisms are $k$-graph morphisms. Whenever we regard $k$-graphs as objects of a category in this paper, it will be this one. + +**Examples 2.3.** + +(1) For $k_1, k_2 \ge 1$ we have $T_{k_1+k_2} = \mathbb{N}^{k_1+k_2} = \mathbb{N}^{k_1} \times \mathbb{N}^{k_2} = T_{k_1} \times T_{k_2}$. + +(2) For $k_1, k_2 \ge 1$ we have $\Delta_{k_1+k_2} \cong \Delta_{k_1} \times \Delta_{k_2}$. One checks that the map $(m,n) \mapsto ((\pi_1(m), \pi_1(n)), (\pi_2(m), \pi_2(n)))$ gives the desired isomorphism of $k$-graphs. + +It is sometimes useful to consider morphisms between higher-rank graphs which do not preserve degree. The following definition is from [27, §2]. + +**Definition 2.4.** Let $(\Lambda, d)$ be a $k$-graph and $(\Gamma, d')$ be an $\ell$-graph. A functor $\psi : \Lambda \to \Gamma$ is called a *quasimorphism* if there is a homomorphism $\pi : \mathbb{N}^k \to \mathbb{N}^\ell$ such that for all $\lambda \in \Lambda$ we have $\pi(d(\lambda)) = d'(\psi(\lambda))$. +---PAGE_BREAK--- + +*Example 2.5.* For $i=1,2$, let $(\Lambda_i, d_i)$ be a $k_i$-graph. Let $\Lambda_1 \times \Lambda_2$ the associated cartesian product $(k_1+k_2)$-graph. Since every element $\lambda \in \Lambda_1 \times \Lambda_2$ is of the form $\lambda = (\lambda_1, \lambda_2)$ where $\lambda_1 \in \Lambda_1$ and $\lambda_2 \in \Lambda_2$, for $i=1,2$ there is a natural functor $\psi_i : \Lambda_1 \times \Lambda_2 \to \Lambda_i$ given by $(\lambda_1, \lambda_2) \mapsto \lambda_i$; note that $\psi_i$ is a quasimorphism with $d_i \circ \psi_i = \pi_i \circ (d_1 \times d_2)$. + +**Definition 2.6.** Let $f: \mathbb{N}^k \to \mathbb{N}^l$ be a homomorphism and let $\Gamma$ be an $l$-graph. The pullback $f^*\Gamma$ is the $k$-graph $\{(\gamma, n) \in \Gamma \times \mathbb{N}^k : f(n) = d(\gamma)\}$ with degree map $d(\gamma, n) = n$ (see [25, Definition 1.9]). The structure maps are given by $r(\gamma, n) = (r(\gamma), 0)$ and $s(\gamma, n) = (s(\gamma), 0)$. If $s(\lambda) = r(\mu)$ in $\Gamma$ then $(\lambda, n)$ and $(\mu, m)$ are composable in $f^*\Gamma$, and + +$$ (2.2) \qquad (\lambda, n)(\mu, m) = (\lambda\mu, m+n). $$ + +All of the above is standard notation for *k*-graphs. In the remainder of this section we introduce some new notation related to *k*-graphs as a preliminary to the definition and basic properties of homology for *k*-graphs in Section 3. + +**Definition 2.7.** Let $\Lambda$ be a $k$-graph where $k \ge 1$. For $\lambda \in \Lambda$ and $m \in \{1, -1\}$, we define + +$$ s(\lambda, m) := \begin{cases} s(\lambda) & \text{if } m = 1 \\ r(\lambda) & \text{if } m = -1 \end{cases} \quad \text{and} \quad r(\lambda, m) := s(\lambda, -m). $$ + +An *undirected path* is a pair $(g, m)$ where $g = (g_1, \dots, g_n)$ is a sequence of edges in $\Lambda$ and $m = (m_1, \dots, m_n)$ is a sequence of orientations, $m_i \in \{1, -1\}$ such that $s(g_i, m_i) = r(g_{i+1}, m_{i+1})$ for all $i$. If $(g, m)$ is an undirected path, we define $s(g, m) := s(g_n, m_n)$ and $r(g, m) := r(g_1, m_1)$. If $r(g, m) = s(g, m)$, then we say that the undirected path $(g, m)$ is *closed*. + +A closed undirected path $(g, m)$ is called *simple* if $s(g_i, m_i) \neq s(g_j, m_j)$ for $i \neq j$. + +**Definition 2.8.** (cf. [33, §3]) A $k$-graph $\Lambda$ is *connected* if the equivalence relation on $\Lambda^0$ generated by $\{(r(\lambda), s(\lambda)) : \lambda \in \Lambda\}$ is $\Lambda^0 \times \Lambda^0$. + +*Remark 2.9.* A $k$-graph $\Lambda$ is connected if and only if for all $u, v \in \Lambda^0$ there is an undirected path with source $u$ and range $v$. + +For each equivalence class $X \subseteq \Lambda^0$ from Definition 2.8, the $k$-graph $X\Lambda X$ is a connected component of $\Lambda$. Each $k$-graph is the disjoint union of its connected components. + +For $k \ge 0$ define $\mathbf{1}_k := \sum_{i=1}^k e_i \in \mathbb{N}^k$. By convention $\mathbf{1}_0 = 0 \in \mathbb{N}^0$. + +**Definition 2.10.** Let $\Lambda$ be a $k$-graph. For $r \ge 0$ let + +$$ Q_r(\Lambda) = \{\lambda \in \Lambda : d(\lambda) \leq \mathbf{1}_k, |\lambda| = r\}. $$ + +Let $Q(\Lambda) = \cup_{r \ge 0} Q_r(\Lambda)$. + +We have $Q_0(\Lambda) = \Lambda^0$, and $Q_r(\Lambda) = \emptyset$ if $r > k$. Let $0 < r \le k$. The set $Q_r(\Lambda)$ consists of the morphisms in $\Lambda$ which may be expressed as the composition of a sequence of $r$ edges with distinct degrees. We regard elements of $Q_r(\Lambda)$ as unit $r$-cubes in the sense that each one gives rise to a commuting diagram of edges in $\Lambda$ shaped like an $r$-cube. In particular, when $r \ge 1$, each element of $Q_r(\Lambda)$ has $2r$ faces in $Q_{r-1}(\Lambda)$ defined as follows. + +**Definition 2.11.** Fix $\lambda \in Q_r(\Lambda)$ and write $d(\lambda) = e_{i_1} + \cdots + e_{i_r}$ where $i_1 < \cdots < i_r$. For $1 \le j \le r$, define $F_j^0(\lambda)$ and $F_j^1(\lambda)$ to be the unique elements of $Q_{r-1}(\Lambda)$ such that there exist $\alpha, \beta \in \Lambda^{e_{i_j}}$ satisfying + +$$ F_j^0(\lambda)\beta = \lambda = \alpha F_j^1(\lambda). $$ +---PAGE_BREAK--- + +*Remark 2.12.* Equivalently, $F_j^0(\lambda) = \lambda(0, d(\lambda) - e_{i_j})$ and $F_j^1(\lambda) = \lambda(e_{i_j}, d(\lambda))$. If $1 \le i < j \le r$, then $F_i^\ell \circ F_j^m = F_{j-1}^m \circ F_i^\ell$ for $\ell, m \in \{0, 1\}$. + +**Notation 2.13.** Let $X$ be a set. We write $\mathbb{Z}X$ for the free abelian group generated by $X$ (so $\mathbb{Z}\emptyset = \{\emptyset\}$). + +*Remark 2.14.* Let $X$ and $Y$ be sets. Then every function $f : X \to Y$ extends uniquely to a homomorphism $f : \mathbb{Z}X \to \mathbb{Z}Y$. In particular, the inclusion maps induce an isomorphism $\mathbb{Z}(X \sqcup Y) \cong \mathbb{Z}X \oplus \mathbb{Z}Y$. Moreover there is an isomorphism $\mathbb{Z}(X \times Y) \cong \mathbb{Z}X \otimes \mathbb{Z}Y$ determined by $(x, y) \mapsto x \otimes y$. + +### 3. THE HOMOLOGY OF A k-GRAPH + +In this section we define the homology of a *k*-graph, compute some basic examples and provide descriptions of the first two homology groups. Throughout this paper, we use *r* (for rank) for the indexing subscript in complexes and in homology groups because *n* is more commonly used for a generic element of $\mathbb{N}^k$. + +**Definitions 3.1.** For $r \in \mathbb{N}$ let $C_r(\Lambda) = \mathbb{Z}Q_r(\Lambda)$. For $r \ge 1$, define $\partial_r : C_r(\Lambda) \to C_{r-1}(\Lambda)$ to be the unique homomorphism such that + +$$ (3.1) \qquad \partial_r(\lambda) = \sum_{\ell=0}^{1} \sum_{i=1}^{r} (-1)^{i+\ell} F_i^{\ell}(\lambda) \quad \text{for all } \lambda \in Q_r(\Lambda). $$ + +We write $\partial_0$ for the zero homomorphism $C_0(\Lambda) \to \{\emptyset\}$. + +*Remarks 3.2.* For $f \in Q_1(\Lambda)$ we have $F_1^0(f) = s(f)$ and $F_1^1(f) = r(f)$ and so $\partial_1(f) = s(f) - r(f)$. + +Fix $\lambda \in Q_2(\Lambda)$. Write $d(\lambda) = e_{j_1} + e_{j_2}$ with $j_1 < j_2$. Factorise $\lambda = f_1g_1 = g_2f_2$ where $d(f_i) = e_{j_1}$ and $d(g_i) = e_{j_2}$ for $i=1,2$. Then $F_2^0(\lambda) = \lambda(0, e_{j_1}) = f_1$, $F_2^1(\lambda) = \lambda(e_{j_2}, e_{j_1} + e_{j_2}) = f_2$, $F_1^0(\lambda) = \lambda(0, e_{j_2}) = g_2$ and $F_1^1(\lambda) = \lambda(e_{j_1}, e_{j_1} + e_{j_2}) = g_1$. Hence + +$$ (3.2) \qquad \partial_2(\lambda) = g_1 + f_1 - f_2 - g_2. $$ + +For $r \ge 0$, $\partial_r$ is a homomorphism and $\partial_r \circ \partial_{r+1} = 0$ by Remark 2.12. Hence we have the following. + +**Lemma 3.3.** Let $\Lambda$ be a k-graph, then $(C_*(\Lambda), \partial_*)$ is a chain complex. + +We define the homology of $\Lambda$ to be the homology of the chain complex $C_*(\Lambda)$. + +**Definition 3.4.** For $r \in \mathbb{N}$ define $H_r(\Lambda) = \ker(\partial_r)/\operatorname{Im}\partial_{r+1}$. We call $H_r(\Lambda)$ the $r^{th}$ homology group of $\Lambda$ and we call $H_*(\Lambda)$ the homology of $\Lambda$. + +**Lemma 3.5.** Fix $n \in \mathbb{N}$. If $\psi : \Lambda_1 \to \Lambda_2$ is a k-graph morphism, then there is a homomorphism $\psi_* : H_r(\Lambda_1) \to H_r(\Lambda_2)$ determined by $\psi_*([λ]) = [\psi(λ)]$ for all $\lambda \in Q_r(\Lambda)$. Moreover, the assignments $\Lambda \mapsto H_r(\Lambda)$ and $\psi \mapsto \psi_*$ comprise a covariant functor from the category of k-graphs with k-graph morphisms to the category of abelian groups with homomorphisms. + +*Proof.* For $\lambda \in Q_r(\Lambda_1)$ we have $\psi(\lambda) \in Q_r(\Lambda_2)$ as $\psi$ is degree preserving. Since it preserves factorisations, $\psi$ intertwines the face maps on $Q_r(\Lambda_1)$ and $Q_r(\Lambda_2)$, so it intertwines the boundary maps $\partial_r$ and therefore defines a homomorphism $\psi_* : H_r(\Lambda_1) \to H_r(\Lambda_2)$. + +For the second assertion of the Lemma, we just have to check that $\psi \mapsto \psi_*$ preserves composition. This follows immediately from the definition. □ +---PAGE_BREAK--- + +*Remark 3.6.* For a $k$-graph $\Lambda$ and $r > k$, we have $Q_r(\Lambda) = \emptyset$, so $C_r(\Lambda)$ and $H_r(\Lambda)$ are trivial. + +*Remark 3.7.* Let $\Lambda_i$ be $k$-graphs for $i = 1,2$. Then the chain complex $C_*(\Lambda_1 \sqcup \Lambda_2)$ decomposes as the direct sum of the complexes $C_*(\Lambda_1)$ and $C_*(\Lambda_2)$. Thus the canonical inclusions of $\Lambda_1, \Lambda_2$ into $\Lambda_1 \sqcup \Lambda_2$ induce an isomorphism $H_*(\Lambda_1) \oplus H_*(\Lambda_2) \cong H_*(\Lambda_1 \sqcup \Lambda_2)$. Indeed, this isomorphism holds for countable disjoint unions of $k$-graphs. + +*Remark 3.8.* Let $\Lambda$ be a $k$-graph and let $\Lambda^{\text{op}}$ be the opposite category, which is a $k$-graph under the same degree map. We write $\lambda^{\text{op}}$ for an element $\lambda \in \Lambda$ when regarded as an element of $\Lambda^{\text{op}}$. For each $r$, the assignment $\lambda \mapsto (-1)^r \lambda^{\text{op}}$ induces an isomorphism $\phi_r : C_r(\Lambda) \to C_r(\Lambda^{\text{op}})$. Using that $F_i^l(\lambda^{\text{op}}) = F_i^{1-l}(\lambda)^{\text{op}}$ for all $\lambda \in Q_r(\Lambda)$, a calculation shows that $\partial_{r+1} \circ \phi_{r+1} = \phi_r \circ \partial_{r+1}$ for all $r$. So $\phi_*$ is an isomorphism of complexes and hence induces an isomorphism $H_*(\Lambda) \cong H_*(\Lambda^{\text{op}})$. + +*Examples 3.9.* (1) Let $T_0$ be the 0-graph of Examples 2.2 (1). Then $Q_0(T_0) = \{0\}$ and $Q_r(T_0) = \emptyset$ for all $r \ge 1$. Hence $C_0(T_0) = \mathbb{Z}\{0\}$ and $C_r(T_0) = \{0\}$ for all $r \ge 1$. Since $\partial_r = 0$ for all $r \ge 0$, we have $H_0(T_0) = \mathbb{Z}\{0\} \cong \mathbb{Z}$ and $H_r(T_0) = \{0\}$ for $r \ge 1$. + +(2) More generally, for $k \ge 1$, we have $Q_0(T_k) = \{0\}$, $Q_r(T_k) = \emptyset$ for all $r > k$ and + +$$Q_r(T_k) = \{e_{i_1} + \cdots + e_{i_r} \mid 1 \le i_1 < \cdots < i_r \le k\}$$ + +for $1 \le r \le k$. Thus $|Q_r(T_k)| = \binom{k}{r}$ for $0 \le r \le k$. For $1 \le j \le r \le k$, we have $F_j^0 = F_j^1$, so $\partial_r = 0$. Hence + +$$H_r(T_k) = \mathbb{Z}Q_r(T_k) \cong \mathbb{Z}(\binom{k}{r}) \quad \text{for } 0 \le r \le k,$$ + +and $H_r(T_k) = \{0\}$ for $r > k$. In particular $T_k$ has the same homology as the $k$-torus $\mathbb{T}^k$. + +**Definition 3.10.** Let $\Lambda$ be a $k$-graph and let $(g, m)$ be an undirected path in $\Lambda$ (see Definition 2.7). Then + +$$h = \sum_{i=1}^{n} m_i g_i \in C_1(\Lambda)$$ + +is called the *trail* associated to $(g, m)$. If $(g, m)$ is closed, then $h$ is said to be a *closed trail*. If in addition $(g, m)$ is simple, then $h$ is called a *simple closed trail*. + +*Remark 3.11.* Let $(g, m)$ be an undirected path in $\Lambda$ with source $u$ and range $v$. A straightforward computation shows that $\partial_1(h) = u - v$ where $h$ is the trail associated to $(g, m)$. Hence, if $h$ is a closed trail then $\partial_1(h) = 0$. If $h$ is a closed trail and $a \in \mathbb{Z}$ is nonzero then $ah$ is also a closed trail. + +**Proposition 3.12.** Let $\Lambda$ be a connected $k$-graph, then $H_0(\Lambda) \cong \mathbb{Z}$. + +*Proof.* Define a homomorphism $\theta : C_0(\Lambda) \to \mathbb{Z}$ by $\theta(v) = 1$ for all $v \in \Lambda^0$. It suffices to show that $\ker(\theta) \subset \operatorname{Im}(\partial_1)$, as the reverse inclusion is clear. + +Fix distinct $u, v \in \Lambda^0$. Since $\Lambda$ is connected there is an undirected path $(g, m)$ from $u$ to $v$. By Remark 3.11 $\partial_1(h) = u - v$ where $h$ is the trail associated to $(g, m)$. In particular, $u - v \in \operatorname{Im}(\partial_1)$. +---PAGE_BREAK--- + +Let $a = \sum_{i=1}^{n} m_i v_i \in \ker(\theta)$ with distinct $v_i$ and $m_i \neq 0$ for all $i$. We prove by induction on $n \ge 2$ that $\sum_{i=1}^{n} m_i v_i \in \operatorname{Im}(\partial_1)$. When $n=2$ we must have $m_1+m_2=0$. The preceding paragraph yields a trail $h$ such that $\partial_1(h) = v_1 - v_2$, and then $a = \partial_1(m_1h) \in \operatorname{Im}(\partial_1)$. + +Fix $n \ge 3$ and suppose the result holds for all $\ell$ with $n > \ell \ge 2$. Relabeling if necessary, we may assume that $m_1$ and $m_2$ have opposite sign, and $|m_1| \le |m_2|$. We give a proof for the case $m_1 > 0$, the case $m_1 < 0$ being similar. Since $\Lambda$ is connected there is an undirected path $(g_1, m_1)$ from $v_1$ to $v_2$. Let $h_1 \in C_1(\Lambda)$ be the associated trail. Then $\partial_1(h_1) = v_1 - v_2$ and + +$$a_1 = a - \partial_1(m_1 h_1) = (m_2 + m_1)v_2 + \sum_{i=3}^{n} m_i v_i.$$ + +By the inductive hypothesis $a_1 \in \operatorname{Im}(\partial_1)$ and so $a = a_1 + \partial(m_1 g_1) \in \operatorname{Im}(\partial_1)$. $\square$ + +Combining Proposition 3.12, Remark 3.7 and Remark 2.9 gives the following. + +**Corollary 3.13.** Let $\Lambda$ be a k-graph with $p$ connected components (where $p \in \{1, 2, \dots\} \cup \{\infty\}$). Then $H_0(\Lambda) \cong \mathbb{Z}^p$. In particular $\Lambda$ is connected if and only if $H_0(\Lambda) \cong \mathbb{Z}$. + +*Example 3.14.* Since $\Delta_1$ is connected we have $H_0(\Delta_1) \cong \mathbb{Z}$ by Proposition 3.12. We claim that $H_r(\Delta_1) = 0$ for all $r \ge 1$. By Remark 3.6 it suffices to check that $H_1(\Delta_1) = \{0\}$. To see this fix $f \in C_1(\Delta_1) \setminus \{0\}$. Then we may express $f = \sum_{i=\ell}^{m} a_i(i, i+1)$, where $a_i \in \mathbb{Z}$ and $a_m \ne 0$. Then + +$$ +\begin{align*} +\partial_1(f) &= \sum_{i=\ell}^{m} a_i ((i+1, i+1) - (i, i)) \\ +&= a_m(m+1, m+1) - a_\ell(\ell, \ell) + \sum_{i=\ell+1}^{m} (a_{i-1} - a_i)(i, i). +\end{align*} +$$ + +Since $a_m \neq 0$ it follows that $\partial_1(f) \neq 0$. So $\partial_1$ is injective and hence $H_1(\Delta_1) = \ker(\partial_1)$ is trivial. + +**Proposition 3.15.** Let $\Lambda$ be a k-graph. For each $a \in \ker \partial_1$, there exist simple closed trails $h_1, \dots, h_n$ in $C_1(\Lambda)$ such that $a = \sum_{i=1}^n m_i h_i$. + +*Proof*. For $a = \sum_{i=1}^n a_i f_i \in \ker \partial_1$ where the $f_i$ are distinct elements of $Q_1(\Lambda)$, set $N(a) := \sum_{i=1}^n |a_i|$. We proceed by induction on $N(a)$. If $N(a) = 0$, the result is trivial. Fix $N > 0$ and suppose as an inductive hypothesis that whenever $N(a) < N$, there are simple closed trails $h_i$ and integers $m_i$ such that $a = \sum_{i=1}^n m_i h_i$. Fix $a$ with $N(a) = N$. It suffices to show that there is a simple closed trail $h \in C_1(\Lambda)$ such that $N(a-h) < N(a)$. + +Recall from Definition 2.7 that if $p \in \{1, -1\}$ and $f \in Q_1(\Lambda)$, then $s(f,p)$ means $s(f)$ if $p=1$ and $r(f)$ if $p=-1$; and $r(f,p) = s(f, -p)$. + +Express $a = \sum_{i=1}^{n} a_i f_i$ where the $f_i$ are distinct elements of $Q_1(\Lambda)$, and each $a_i \neq 0$. Let $i_1 := 1$, let $p_1 := \operatorname{sign}(a_1)$. If $s(p_1) = r(p_1)$, then $h := p_1 f_1$ has the desired property. Otherwise, let $v_0 = r(p_1, p_1)$ and $v_1 = s(p_1, p_1)$. Since the coefficient of $v_1$ in $\partial_1(a)$ is zero, there must exist $i_2$ such that the coefficient of $v_1$ in $\partial_1(a_{i2}f_{i2})$ is nonzero with the opposite sign to that in $\partial_1(p_1f_{i1})$; let $p_2 := \operatorname{sign}(a_{i2})$ and let $v_2 = s(f_{i2}, p_2)$. Observe that $r(f_{i2}, p_2) = s(f_{i1}, p_1)$. We may continue iteratively, as long as the $v_i$ are all distinct, to choose an index $j$ such that $p_j := \operatorname{sign}(a_{ij})$ has the property that the coefficient of $v_{j-1}$ in $\partial_1(p_j f_{ij})$ has the opposite sign to that in $\partial_1(p_{j-1} f_{j-1})$ for each j. We then set +---PAGE_BREAK--- + +$v_j := s(f_{ij}, p_j)$, and observe that $r(f_{ij}, p_j) = v_{j-1}$. Since there are only finitely many nonzero coefficients in $a$, this process must terminate: there is a first $l$ such that $v_l \in \{v_0, v_1, \dots, v_{l-1}\}$; say $v_l = v_q$ where $q < l$. Then $h := \sum_{j=q+1}^l p_j f_{ij}$ is a simple closed trail. Since $p_j = \text{sign}(a_{ij})$ for each $j$, we have $N(a-h) = N(a) - (l-q) < N(a)$ as required. $\square$ + +## 4. FUNDAMENTAL RESULTS + +In this section we prove versions of a number of standard results in homology theory which suggest that our notion of homology for $k$-graphs is a reasonable one. In Appendix A, we will show that each $k$-graph determines in a fairly natural way a cubical set, and that our homology then agrees with that of Grandis [14]. So a number of results in this section could be recovered from Grandis’ work. However, it seems worthwhile to present self-contained proofs which are consistent with the notation and conventions associated with $k$-graphs. + +We begin with a version of the Künneth formula for our homology (see Theorem 4.3). In order to do this we must show how our chain complexes behave with respect to cartesian product of $k$-graphs. + +Recall from Example 2.5 that given a cartesian product graph $\Lambda_1 \times \Lambda_2$ there are quasi-morphisms $\psi_i : \Lambda_1 \times \Lambda_2 \to \Lambda_i$ consistent with the projections $\pi_i : \mathbb{N}^{k_1+k_2} \to \mathbb{N}^{k_i}$. + +**Lemma 4.1.** Let $(\Lambda_i, d_i)$ be a $k_i$-graph for $i=1,2$ and $\Lambda_1 \times \Lambda_2$ the associated cartesian product $(k_1+k_2)$-graph. Then for $r \ge 0$, we have $Q_r(\Lambda) = \bigsqcup_{r_1+r_2=r} Q_{r_1}(\Lambda_1) \times Q_{r_2}(\Lambda_2)$. Hence there is an isomorphism + +$$ (4.1) \qquad \Psi_r : C_r(\Lambda_1 \times \Lambda_2) \cong \bigoplus_{r_1+r_2=r} C_{r_1}(\Lambda_1) \otimes C_{r_2}(\Lambda_2) $$ + +given by $\Psi_r(\lambda_1, \lambda_2) = \lambda_1 \otimes \lambda_2$. + +*Proof*. For the first assertion, just note that $(d_1 \times d_2)(\lambda_1, \lambda_2) \le \mathbf{1}_{k_1+k_2}$ if and only if $d_i(\lambda_i) \le \mathbf{1}_{k_i}$ for $i=1,2$. So + +$$ +\begin{align*} +Q_r(\Lambda_1 \times \Lambda_2) &= \{(\lambda_1, \lambda_2) : (d_1 \times d_2)(\lambda_1, \lambda_2) \le \mathbf{1}_{k_1+k_2}, |\lambda_1| + |\lambda_2| = r\} \\ +&= \bigsqcup_{r_1+r_2=r} \{(\lambda_1, \lambda_2) : d_i(\lambda_i) \le \mathbf{1}_{k_i}, |\lambda_i| = r_i \text{ for } i=1,2\} \\ +&= \bigsqcup_{r_1+r_2=r} Q_{r_1}(\Lambda_1) \times Q_{r_2}(\Lambda_2). +\end{align*} +$$ + +The second assertion follows from Remark 2.14. $\square$ + +Recall from [29, V.9] that if $K$ and $L$ are chain complexes with boundary maps $\partial_r^K : K_r \to K_{r-1}$ and $\partial_r^L : L_r \to L_{r-1}$, then the tensor complex $K \otimes L$ is given by + +$$ (K \otimes L)_r = \bigoplus_{r_1+r_2=r} K_{r_1} \otimes L_{r_2}, $$ + +with boundary maps + +$$ (4.2) \qquad \partial_{r_1+r_2}^{K \otimes L}(k \otimes l) := \partial_{r_1}^K(k) \otimes l + (-1)^{r_1} k \otimes \partial_{r_2}^L(l) \quad \text{for all } k \in K_{r_1} \text{ and } l \in L_{r_2}. $$ + +The following is an analog of [14, Theorem 2.7]. + +**Proposition 4.2.** Let $\Lambda_i$ be a $k_i$-graph for $i=1,2$. The isomorphisms $\Psi_r$ of Lemma 4.1 induce an isomorphism of complexes $\Psi : C_*(\Lambda_1 \times \Lambda_2) \to C_*(\Lambda_1) \otimes C_*(\Lambda_2)$. +---PAGE_BREAK--- + +*Proof.* Fix $r_1, r_2$ such that $0 \le r_i \le k_i$ for $i = 1, 2$ and set $r = r_1 + r_2$. Let $\lambda_i \in Q_{r_i}(\Lambda_i)$ ($i = 1, 2$). Then for each $0 \le j \le k_1 + k_2$ and $\ell \in \{0, 1\}$, + +$$F_j^\ell(\lambda_1, \lambda_2) = \begin{cases} (F_j^\ell(\lambda_1), \lambda_2) & \text{if } 1 \le j \le r_1 \\ (\lambda_1, F_{j-r_1}^\ell(\lambda_2)) & \text{if } r_1+1 \le j \le r_1+r_2. \end{cases}$$ + +Hence by (3.1) we may calculate: + +$$\begin{align} +\partial_r(\lambda_1, \lambda_2) &= \sum_{\ell=0}^{1} \sum_{j=1}^{r} (-1)^{\ell+j} F_j^\ell(\lambda_1, \lambda_2) \\ +&= \sum_{\ell=0}^{1} \left( \sum_{j=1}^{r_1} (-1)^{\ell+j} (F_j^\ell(\lambda_1), \lambda_2) + \sum_{j=r_1+1}^{r_1+r_2} (-1)^{\ell+j} (\lambda_1, F_{j-r_1}^\ell(\lambda_2)) \right) \\ +&= \sum_{\ell=0}^{1} \sum_{j=1}^{r_1} (-1)^{\ell+j} (F_j^\ell(\lambda_1), \lambda_2) + \sum_{\ell=0}^{1} \sum_{h=1}^{r_2} (-1)^{\ell+h+r_1} (\lambda_1, F_h^\ell(\lambda_2)) \\ +(4.3) \qquad &= (\partial_{r_1}(\lambda_1), \lambda_2) + (-1)^{r_1} (\lambda_1, \partial_{r_2}(\lambda_2)). +\end{align}$$ + +It remains to show that for all $r$, + +$$\partial_r(\Psi_r(\lambda_1, \lambda_2)) = \Psi_{r-1}(\partial_r(\lambda_1, \lambda_2)).$$ + +By definition of the boundary map $\partial_r$ on $C_{r_1}(\Lambda_1) \otimes C_{r_2}(\Lambda_2)$ (see (4.2)), we have + +$$\begin{align*} +\partial_r(\Psi_r(\lambda_1, \lambda_2)) &= \partial_r(\lambda_1 \otimes \lambda_2) \\ +&= \partial_{r_1}(\lambda_1) \otimes \lambda_2 + (-1)^{r_1} \lambda_1 \otimes \partial_{r_2}(\lambda_2) \\ +&= \Psi_{r-1}(\partial_{r_1}(\lambda_1), \lambda_2) + (-1)^{r_1} (\lambda_1, \partial_{r_2}(\lambda_2)), +\end{align*}$$ + +and this is equal to $\Psi_{r-1}(\partial_r(\lambda_1, \lambda_2))$ by (4.3). $\square$ + +We may now state a Künneth formula for our homology. The map $\alpha$ was considered in [14, Theorem 2.7]. + +**Theorem 4.3.** Let $\Lambda_i$ be a $k_i$-graph for $i=1,2$. Then there is a split exact sequence + +$$0 \rightarrow \bigoplus_{r_1+r_2=r} H_{r_1}(\Lambda_1) \otimes H_{r_2}(\Lambda_2) \xrightarrow{\alpha} H_r(\Lambda_1 \times \Lambda_2) \xrightarrow{\beta} \bigoplus_{r_1+r_2=r-1} \operatorname{Tor}(H_{r_1}(\Lambda_1), H_{r_2}(\Lambda_2)) \rightarrow 0.$$ + +*The homomorphisms $\alpha$ and $\beta$ are natural with respect to maps induced by $k$-graph morphisms, but the splitting is not natural.* + +*Proof.* The result follows from Proposition 4.2 and [29, Theorem V.10.4] using the fact that $C_r(\Lambda)$ is torsion free for each $r$. $\square$ + +**Corollary 4.4.** Let $\Lambda_i$ be a $k_i$-graph for $i=1,2$. Suppose that for some $i$ the groups $H_r(\Lambda_i)$ are all torsion-free. Then the map $\alpha$ in Theorem 4.3 is an isomorphism, so + +$$H_r(\Lambda_1 \times \Lambda_2) \cong \bigoplus_{r_1+r_2=r} H_{r_1}(\Lambda_1) \otimes H_{r_2}(\Lambda_2).$$ + +*Example 4.5.* For $k \ge 2$, we have $T_k \cong T_1 \times \cdots \times T_1$ by Examples 2.3 (1). We claim that for $0 \le r \le k$ we have + +$$H_r(T_k) \cong \mathbb{Z}^{(k)_r}.$$ +---PAGE_BREAK--- + +For $k=0,1$ this follows by Examples 3.9. The general case follows by induction on $k$ +using Corollary 4.4. + +**Definition 4.6.** We say that a *k*-graph Λ is *acyclic* if $H_0(\Lambda) \cong \mathbb{Z}$ and $H_r(\Lambda) = 0$ for all $r \ge 1$. + +*Remark 4.7.* Let $\Lambda_i$ be an acyclic $k_i$-graph for $i = 1, 2$. Then by Corollary 4.4 it follows that $\Lambda_1 \times \Lambda_2$ is an acyclic $k_1 + k_2$-graph. + +*Examples 4.8.* (1) Note that by Examples 2.3 (2) we have $\Delta_k \cong \Delta_1 \times \cdots \times \Delta_1$ for $k \ge 2$. By Example 3.14 $\Delta_1$ is acyclic, and so by Remark 4.7 it follows that $\Delta_k$ is acyclic for all $k$. Indeed for $k \ge 1$ the $k$-graph $\Delta_k$ has the same homology as $\mathbb{R}^k$. + +(2) Let $\Lambda$ be a connected 1-graph which is a tree. By Proposition 3.12 we have $H_0(\Lambda) \cong \mathbb{Z}$. Since $\Lambda$ contains no closed undirected paths, $C_1(\Lambda)$ has no closed trails. Thus by Proposition 3.15 $\ker(\partial_1) = 0$ and hence $H_1(\Lambda) = 0$. Since $H_r(\Lambda) = 0$ for $r > 1$, it follows that $\Lambda$ is acyclic. + +The proof of the next result follows the argument used in [4, II.4.1]. This result may +also be deduced from [14, Theorem 3.3] using the identification of our homology with that +of the corresponding cubical set established in Theorem A.9. + +**Theorem 4.9.** Suppose that $\Lambda$ is an acyclic $k$-graph. If $G$ is a discrete group acting freely on $\Lambda$, then $H_*(\Lambda/G) \cong H_*(G, \mathbb{Z})$. + +*Proof.* If $M$ is a $G$-module, then we write $DM$ for the submodule of $M$ generated by the elements $\{gm - m : m \in M, g \in G\}$. We write $M_G$ for $M/DM$. Note that $M \mapsto M_G$ is a functor from the category of $G$-modules to the category of abelian groups (so it maps a complex of $G$-modules to a complex of abelian groups). If $G$ acts on a set $X$ then $\mathbb{Z}X$ may be regarded as a $G$-module and $\mathbb{Z}X_G \cong \mathbb{Z}(X/G)$ (see [4, § II.2]). + +Since $G$ acts freely on $\Lambda$, it acts freely on each $Q_r(\Lambda)$. Thus $C_r(\Lambda) = \mathbb{Z}Q_r(\Lambda)$ is a free +$G$-module. We have + +$$ +\mathbb{Z}Q_r(\Lambda)_G \cong \mathbb{Z}Q_r(\Lambda/G). +$$ + +Moreover, this isomorphism is compatible with the boundary maps. So if $C_*(\Lambda)_G$ denotes +the complex obtained from $C_*(\Lambda)$ by applying the functor $M \mapsto M_G$, then $C_*(\Lambda)_G \cong$ +$C_*(\Lambda/G)$. Since $\Lambda$ is acyclic, the sequence + +$$ +\dots \xrightarrow{\partial_3} C_2(\Lambda) \xrightarrow{\partial_2} C_1(\Lambda) \xrightarrow{\partial_1} C_0(\Lambda) \xrightarrow{\varepsilon} \mathbb{Z} \to 0 +$$ + +is a resolution of $\mathbb{Z}$ by free $G$-modules. Since the complex $C_*(\Lambda)_G$ is isomorphic to the +complex $C_*(\Lambda/G)$, we have + +$$ +H_*(C_*(\Lambda)_G) \cong H_*(C_*(\Lambda/G)) = H_*(\Lambda/G). +$$ + +Therefore, $H_*(G, \mathbb{Z}) \cong H_*(\Lambda/G)$. $\square$ + +Recall that the fundamental group $\pi_1(\Lambda)$ of a connected 1-graph $\Lambda$ is free (see for example [42, §2.1.8] or [24, §4]) and the universal cover $T$ is a tree. Thus $\Lambda$ may be realised as the quotient of $T$ by the action of $\pi_1(\Lambda)$; moreover, if $\Lambda$ has finitely many vertices and edges, then $\pi_1(\Lambda) \cong \mathbb{F}_p$, where $\mathbb{F}_p$ is the free group on $p$ generators and $p = |\Lambda^1| - |\Lambda^0| + 1$ (see [39, §I.3.3, Theorem 4]). Since $T$ is acyclic, we obtain the following result. +---PAGE_BREAK--- + +**Corollary 4.10.** Let $\Lambda$ be a connected 1-graph. Then $H_1(\Lambda) \cong H_1(\pi_1(\Lambda), \mathbb{Z})$. In particular if $\Lambda$ has finitely many vertices and edges, then $\pi_1(\Lambda) \cong \mathbb{F}_p$ where $p = |\Lambda^1| - |\Lambda^0| + 1$ and so + +$$H_1(\Lambda) \cong H_1(\mathbb{F}_p, \mathbb{Z}) \cong \mathbb{Z}^p.$$ + +*Examples 4.11.* (1) Recall from Examples 2.2 (2) that $B_n$ is the path category of a directed graph with a single vertex and $n$ edges, regarded as a 1-graph. The universal cover of $B_n$, which we denote $A_n$, is the skew-product $B_n \times_c \mathbb{F}_n$, and can be identified with the Cayley graph of $\mathbb{F}_n$, the free group on $n$ generators. By [25, Remark 5.6] $\mathbb{F}_n$ acts freely on $A_n$ with $A_n/\mathbb{F}_n \cong B_n$. By Corollary 4.10 we have $H_1(B_n) \cong \mathbb{Z}^n$. Hence $B_n$ has the same homology as the wedge of $n$ circles. + +(2) Let $H$ be a subgroup of $\mathbb{Z}^k$. Then as in [26, §6.4] $H$ acts freely on $\Delta_k$. Since $\Delta_k$ is acyclic, by Theorem 4.9 we have $H_*(\Delta_k/H) \cong H_*(H, \mathbb{Z})$. If $H \cong \mathbb{Z}^q$, then for $0 \le r \le k$ we have (cf. Example 4.5) + +$$H_r(\Delta_k/H) \cong \mathbb{Z}^{(q/r)}.$$ + +Hence $\Delta_k/H$ has the same homology as the *q*-torus. If $H$ has finite index then $q=k$ and the quotient graph $\Delta_k/H$ may be viewed as yet another *k*-graph analog of the *k*-torus (note $\Delta_k/H = T_k$ when $H = \mathbb{Z}^k$). + +(3) The following example indicates that Theorem 4.9 is in practise less useful than it might appear because it is difficult to recognise acyclic *k*-graphs (short of explicitly computing their homology). In particular one might expect that a pullback of an acyclic *k*-graph by a full-rank endomorphism of $\mathbb{N}^k$ is itself acyclic, but this is not so. + +Let $\Lambda$ be the 2-graph with $\Lambda^0 = \{\mathrm{v}\}$, $\Lambda^{\mathrm{e}_1} = \{a_1, a_2\}$, $\Lambda^{\mathrm{e}_2} = \{b_1, b_2\}$, and factorisation property determined by $a_i b_j = b_i a_j$ for $i,j=1,2$. Recall that we denote the generators of $\mathbb{F}_2$ by $h_1$ and $h_2$. There is a functor $\sigma: \Lambda \to \mathbb{F}_2 \times \mathbb{Z}$ determined by $\sigma(a_i) = (h_i, 0)$ and $\sigma(b_i) = (h_i, 1)$. Let $\Gamma := \Lambda \times_\sigma (\mathbb{F}_2 \times \mathbb{Z})$, and observe that by [25, Remark 5.6] $\mathbb{F}_2 \times \mathbb{Z}$ acts freely on $\Gamma$ with quotient $\Lambda$. + +Let $A_2 = B_2 \times_c \mathbb{F}_2$ as in (1) above. Define $g: \mathbb{N}^2 \to \mathbb{N}^2$ by $g(m,n) := (m+n,n)$. Tedious calculations show that $\Gamma$ is isomorphic to the pullback $g^*(A_2 \times \Delta_1)^1$. + +We claim that $\Gamma$ is not acyclic. Suppose that it is. Then Theorem 4.9 implies that $H_*(\Lambda) \cong H_*(\mathbb{F}_2 \times \mathbb{Z})$. By the Künneth theorem for group homology, since both $H_r(\mathbb{F}_2)$ and $H_r(\mathbb{Z})$ are trivial for $r \ge 2$, + +$$H_2(\mathbb{F}_2 \times \mathbb{Z}) = H_1(\mathbb{F}_2) \otimes H_1(\mathbb{Z}) \cong \mathbb{Z}^2.$$ + +A straightforward computation shows that $a_1b_1$, $a_2b_2$ and $a_1b_2 + a_2b_1$ all belong to +$\ker(\partial_2) = H_2(\Lambda)$, so the latter has rank at least three, giving a contradiction. + +So $\Gamma$ is not acyclic, despite being a pull-back of the acyclic graph $A_2 \times \Delta_1$ (see +Remark 4.7) by the full-rank endomorphism $g$. + +We now turn our attention to exact sequences of homology groups associated to au- +tomorphisms of *k*-graphs. Recall from [12] that if Λ is a *k*-graph and α is an automor- +phism of Λ, then there is a (*k* + 1)-graph Λ ×$_α$ Z with morphisms Λ × N, range and +source maps given by r(λ, n) = (r(λ), 0), s(λ, n) = (α⁻ⁿ(s(λ)), 0), degree map given + +¹This is not meant to be obvious. After unraveling the definitions of Γ and of g*(A₂ × Δ₁), one can check that the formulas (aᵢ, (h, n)) ↦ ((fᵢ, h), (n, n)), (1, 0)) and (bᵢ, (h, n)) ↦ (((fᵢ, h), (n, n+1)), (0, 1)) for i = 1, 2 determine the desired isomorphism. +---PAGE_BREAK--- + +by $d(\lambda, n) = (d(\lambda), n)$ and composition given by $(\lambda, m)(\mu, n) := (\lambda \alpha^m(\mu), m + n)$. In particular $(\Lambda \times_\alpha \mathbb{Z})^0 = \Lambda^0 \times \{0\}$. + +We may describe the cubes of $\Lambda \times_\alpha \mathbb{Z}$ in terms of those of $\Lambda$ as follows: $Q_0(\Lambda \times_\alpha \mathbb{Z}) = Q_0(\Lambda) \times \{0\}$ and for $0 \le r \le k$ an element of $Q_{r+1}(\Lambda \times_\alpha \mathbb{Z})$ is of the form $(\lambda, 0)$ where $\lambda \in Q_{r+1}(\Lambda)$ or $(\lambda, 1)$ where $\lambda \in Q_r(\Lambda)$, so + +$$ (4.4) \qquad Q_{r+1}(\Lambda \times_{\alpha} \mathbb{Z}) = (Q_{r+1}(\Lambda) \times \{0\}) \sqcup (Q_r(\Lambda) \times \{1\}). $$ + +Given an element $a = \sum a_\lambda \lambda \in C_r(\Lambda)$, we shall somewhat inaccurately write $(a, 0) := \sum a_\lambda(\lambda, 0)$ and $(a, 1) := \sum a_\lambda(\lambda, 1)$ for the corresponding elements of $C_r(\Lambda \times_\alpha \mathbb{Z})$ and $C_{r+1}(\Lambda \times_\alpha \mathbb{Z})$. With this notation, the boundary map on $C_{r+1}(\Lambda \times_\alpha \mathbb{Z})$ is given by + +$$ (4.5) \qquad \begin{aligned} \partial_{r+1}(\lambda, 0) &= (\partial_{r+1}(\lambda), 0) && \text{and} \\ \partial_{r+1}(\mu, 1) &= (-1)^r ((\alpha^{-1}(\mu), 0) - (\mu, 0)) + (\partial_r(\mu), 1). \end{aligned} $$ + +We will deduce our long exact sequence for the homology of $\Lambda \times_\alpha \mathbb{Z}$ from the long exact sequence associated to a mapping-cone complex arising from the chain map $\alpha^{-1} - 1$ (see [29, Proposition II.4.3]). So we recall the definition of the mapping cone complex. Given a chain map $f : A_* \to B_*$, define a complex $M_* = M(f)_*$ by $M_r := A_{r-1} \oplus B_r$ (with the convention that $A_{-1} = \{0\}$) with boundary map + +$$ (4.6) \qquad \partial_r(a, b) := (-\partial_{r-1}(a), \partial_r(b) + f(a)). $$ + +If $\alpha$ is an automorphism of a $k$-graph $\Lambda$, then $\alpha^{-1}$ maps cubes to cubes and intertwines boundary maps, and so induces a chain map $\alpha^{-1} : C_*(\Lambda) \to C_*(\Lambda)$. Hence $\alpha^{-1} - 1$ is also a chain map from $C_*(\Lambda)$ to itself. + +**Lemma 4.12.** Let $\Lambda$ be a $k$-graph and let $\alpha$ be an automorphism of $\Lambda$. Then there is an isomorphism of chain complexes $\psi : C_*(\Lambda \times_\alpha \mathbb{Z}) \to M(\alpha^{-1} - 1)_*$ such that + +$$ \psi(\lambda, 0) = (0, \lambda) \quad \text{and} \quad \psi(\mu, 1) = ((-1)^r \mu, 0) $$ + +for all $(\lambda, 0), (\mu, 1) \in Q_{r+1}(\Lambda \times_\alpha \mathbb{Z})$. Hence, $\psi_* : H_*(\Lambda \times_\alpha \mathbb{Z}) \to H_*(M(\alpha^{-1}-1)_*)$ is an isomorphism. + +*Proof.* Write $M_* := M(\alpha^{-1}-1)_*$ and $C_* := C_*(\Lambda \times_\alpha \mathbb{Z})$. It is clear that $\psi$ determines isomorphisms of groups $C_r \cong M_r$. So to see that $\psi$ is an isomorphism of complexes, it suffices to show that it intertwines the boundary maps on generators. We consider cubes of the form $(\lambda, 0)$ and those of the form $(\mu, 1)$ separately. Fix $\lambda \in Q_{r+1}(\Lambda)$. We have $\partial_{r+1}(\psi(\lambda, 0)) = \partial_{r+1}(0, \lambda) = (0, \partial_{r+1}(\lambda))$ by (4.6), and $\psi(\partial_{r+1}(\lambda, 0)) = \psi(\partial_{r+1}(\lambda), 0) = (0, \partial_{r+1}(\lambda))$ by (4.5). So $\partial_{r+1}(\psi(\lambda, 0)) = \psi(\partial_{r+1}(\lambda, 0))$ as required. + +Now fix $\mu \in Q_r(\Lambda)$. Then we have + +$$ +\begin{align*} +\psi(\partial_{r+1}(\mu, 1)) &= \psi((-1)^r((\alpha^{-1}(\mu), 0) - (\mu, 0)) + (\partial_r(\mu), 1)) \\ +&= (-1)^r(\psi(\alpha^{-1}(\mu), 0) - \psi(\mu, 0)) + \psi(\partial_r(\mu), 1) \\ +&= (-1)^r(0, (\alpha^{-1}-1)(\mu)) + (-1)^{r-1}(\partial_r(\mu), 0) \\ +&= (-1)^r(-\partial_r(\mu), (\alpha^{-1}-1)(\mu)). +\end{align*} +$$ +---PAGE_BREAK--- + +On the other hand, + +$$ +\begin{align*} +\partial_{r+1} \psi((\mu, 1)) &= (-1)^r \partial_{r+1}(\mu, 0) \\ +&= (-1)^r (-\partial_r(\mu), \partial_{r+1}(0) + (\alpha^{-1} - 1)(\mu)) \\ +&= (-1)^r (-\partial_r(\mu), (\alpha^{-1} - 1)(\mu)). \quad \square +\end{align*} +$$ + +Now recall from [29, Proposition 4.3], that a chain map $f : A_* \to B_*$ determines a long exact sequence + +$$ +(4.7) \qquad \dots \to H_r(B_*) \xrightarrow{\iota_*} H_r(M(f)_*) \xrightarrow{\pi_*} H_{r-1}(A_*) \xrightarrow{\jmath_*} H_{r-1}(B_*) \to \dots +$$ + +where $\iota_* : H_r(B_*) \to H_r(M(f)_*)$ is induced by the inclusion map $\iota : B_r \to M(f)_r$, and +$\pi_* : H_r(M(f)_*) \to H_{r-1}(A_*)$ is induced by the projection $\pi : M(f)_r \to A_{r-1}$. + +The following result gives an exact sequence which may be regarded as an analog of the Pimsner-Voiculescu sequence for crossed products of $C^*$-algebras (cf. [35, Theorem 2.4], [3, Theorem 10.2.1]). + +**Theorem 4.13.** Let $\Lambda$ be a k-graph, and let $\alpha$ be an automorphism of $\Lambda$. Then there is an exact sequence + +$$ +\begin{align*} +0 \to H_{k+1}(\Lambda \times_\alpha \mathbb{Z}) \xrightarrow{\pi_*} H_k(\Lambda) \xrightarrow{\substack{1-\alpha_* \\ \alpha_*}} H_k(\Lambda) \xrightarrow{\iota_*} H_k(\Lambda \times_\alpha \mathbb{Z}) \to \dots \\ +\dots \to H_1(\Lambda \times_\alpha \mathbb{Z}) \xrightarrow{\pi_*} H_0(\Lambda) \xrightarrow{\substack{1-\alpha_* \\ \alpha_*}} H_0(\Lambda) \xrightarrow{\iota_*} H_0(\Lambda \times_\alpha \mathbb{Z}) \to 0. +\end{align*} +$$ + +Proof. The long exact sequence (4.7) applied with $f = \alpha^{-1}-1$ together with Lemma 4.12 (and identifying $H_*(\Lambda \times_\alpha \mathbb{Z}) \cong H_*(M(\alpha^{-1}-1)_*)$) gives a long exact sequence + +$$ +\begin{align*} +0 \to H_{k+1}(\Lambda \times_{\alpha} \mathbb{Z}) \xrightarrow{\pi_*} H_k(\Lambda) \xrightarrow{\alpha_*^{-1}-1} H_k(\Lambda) \xrightarrow{\iota_*} H_k(\Lambda \times_{\alpha} \mathbb{Z}) \to \dots \\ +\dots \to H_1(\Lambda \times_{\alpha} \mathbb{Z}) \xrightarrow{\pi_*} H_0(\Lambda) \xrightarrow{\alpha_*^{-1}-1} H_0(\Lambda) \xrightarrow{\iota_*} H_0(\Lambda \times_{\alpha} \mathbb{Z}) \to 0. +\end{align*} +$$ + +Since $\alpha_*$ is an automorphism of $H_r(\Lambda)$ which commutes with $\alpha_*^{-1} - 1$, both $\ker(\alpha_*^{-1} - 1)$ and $\operatorname{Im}(\alpha_*^{-1} - 1)$ are $\alpha_*$-invariant. Therefore + +$$ +\ker(\alpha_*^{-1} - 1) = \ker(\alpha_*(\alpha_*^{-1} - 1)) = \ker(1 - \alpha_*) +$$ + +and similarly, $\mathrm{Im}(\alpha_*^{-1} - 1) = \mathrm{Im}(1 - \alpha_*)$. $\square$ + +*Remark 4.14.* Theorem 4.13 may also be proved using the topological realizations, introduced in [21] (see also Section 6), of $\Lambda$ and $\Lambda \times_\alpha Z$. To see how, recall from [21, Lemma 2.23] that $\alpha$ induces a homeomorphism $\tilde{\alpha}$ of the topological realisation $X_\Lambda$ of $\Lambda$, and that $X_{\Lambda \times_\alpha Z}$ is homeomorphic to the mapping torus $M(\tilde{\alpha})$. Combining this with Theorem 6.3 and the long exact sequence of [17, Example 2.48] yields the result. + +5. EXAMPLES + +In this section we present some examples. We describe them using skeletons, so we first indicate what this means. Our examples are all 2-graphs (since there are already a number of interesting examples in this case), so we restrict ourselves to a discussion of skeletons for 2-graphs. + +A 2-coloured graph is a directed graph $E$ together with a map $c: E^1 \to \{1, 2\}$. A complete collection of squares in $E$ is a collection of relations of the form $ef \sim f'e'$ where $ef, f'e' \in E^2$ with $c(e) = c(e') = 1$ and $c(f) = c(f') = 2$ such that each bi-coloured path +---PAGE_BREAK--- + +of length two appears in exactly one such relation². It follows from [25, Section 6] (see also [18, Theorems 4.4 and 4.5]) that each pair consisting of a 2-coloured graph and a complete collection of pairs uniquely determines a 2-graph, and also that each 2-graph arises from such a pair ($E_Λ, C_Λ$). It is standard to refer to the equalities $ef = f'e'$ in $Λ$ determined by the squares $ef \sim f'e'$ in $C$ as the *factorisation rules*. We refer to $E$ as the *skeleton* of $Λ$. + +In our diagrams, edges of colour 1 are blue and solid, and edges of colour 2 are red and dashed. + +Our first example is a 2-graph whose first homology group contains torsion. Combined with Example 5.2, it also demonstrates that the homology of a *k*-graph depends on the factorisation rules and not just on the skeleton. + +*Example 5.1.* Fix $n > 1$ and consider the 1-graph $\Lambda$ with skeleton + +Define $\alpha \in \operatorname{Aut}(\Lambda)$ by $\alpha(f_i) = f_{i+1}$, where addition is modulo $n$ (so $\alpha$ fixes vertices). Then $\Lambda \times_\alpha \mathbb{Z}$ (see page 12) is the 2-graph with skeleton + +and factorisation rules $(f_i, 0)(v, 1) = (u, 1)(f_{i+1}, 0)$ for $i = 0, \dots, n-1$, where addition is modulo $n$. + +We claim that + +$$ H_0(\Lambda \times_{\alpha} \mathbb{Z}) \cong \mathbb{Z}, \quad H_1(\Lambda \times_{\alpha} \mathbb{Z}) \cong \mathbb{Z} \oplus \mathbb{Z}/n\mathbb{Z}, \quad \text{and} \quad H_2(\Lambda \times_{\alpha} \mathbb{Z}) = \{0\}. $$ + +By Proposition 3.12 we have $H_0(\Lambda \times_\alpha \mathbb{Z}) \cong \mathbb{Z}$ and $H_0(\Lambda) \cong \mathbb{Z}$. Since $\alpha$ fixes vertices it follows that $\alpha_* : H_0(\Lambda) \to H_0(\Lambda)$ is the identity map. Hence $\ker(1 - \alpha_*) = H_0(\Lambda) \cong \mathbb{Z}$. + +We next calculate $H_1(\Lambda)$. Since $C_2(\Lambda) = \{0\}$, we have $H_1(\Lambda) = \ker(\partial_1)$. Since $\partial_1(f_i) = u-v$ for all $0 \le i \le n-1$, and since $C_1(\Lambda) = \mathbb{Z}\{f_0, \dots, f_{n-1}\}$, we have + +$$ (5.1) \qquad \{f_i - f_{i+1} : 0 \le i \le n-2\} \text{ is a basis for the } \mathbb{Z}\text{-module } H_1(\Lambda). $$ + +Let $b_i := f_i - f_{i+1}$ for $0 \le i \le n-2$ then $\alpha_*(b_i) = b_{i+1}$ for $0 \le i < n-2$, and + +$$ \alpha_*(b_{n-2}) = f_{n-1} - f_0 = -\sum_{i=0}^{n-2} b_i. $$ + +²Strictly speaking, in [18], a complete collection of squares is defined to be a collection $\mathcal{C}$ of coloured-graph morphisms from model coloured graphs $E_{k,e_i+e_j}$ into $\Lambda$, and the relation $\sim$ is defined by $ef \sim f'e'$ if and only if the two paths traverse a common element of $\mathcal{C}$. But we can recover the collection of coloured-graph morphisms as in [18] from the relation $\sim$, so the two formalisms are equivalent. +---PAGE_BREAK--- + +Hence, regarded as an endomorphism of $\mathbb{Z}^{n-1}$, the map $1 - \alpha_*$ is implemented by the +$(n-1) \times (n-1)$ matrix + +$$ +\left( +\begin{array}{rrrrr} +1 & 0 & 0 & \cdots & 0 & 1 \\ +-1 & 1 & 0 & \cdots & 0 & 1 \\ +0 & -1 & 1 & \cdots & 0 & 1 \\ +\vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ +0 & 0 & 0 & \cdots & 1 & 1 \\ +0 & 0 & 0 & \cdots & -1 & 2 +\end{array} +\right). +$$ + +Thus Im(1 − α$_{*}$ ) is spanned by the elements b$_{i}$ − b$_{i+1}$ for 0 ≤ i ≤ n − 3 together with the +element b$_{n}$$_{−}$$_{2}$ + ∑ n − 2 i =0 b$_{i}$ . Using this one checks that + +$$ +(5.2) \quad \{b_0 - b_{n-2}, b_1 - b_{n-2}, \dots, b_{n-3} - b_{n-2}, nb_{n-2}\} \text{ is a basis for } \mathrm{Im}(1 - \alpha_*). +$$ + +From (5.1) one sees that + +$$ +(5.3) \quad \{b_0 - b_{n-2}, b_1 - b_{n-2}, \ldots, b_{n-3} - b_{n-2}, b_{n-2}\} \text{ is a basis for } H_1(\Lambda). +$$ + +In particular, rank($\operatorname{Im}(1 - \alpha_*)$) = rank($H_1(\Lambda)$), forcing $\ker(1 - \alpha_*) = \{0\}$. Moreover, combining (5.3) with (5.2) shows that coker$(1 - \alpha_*) \cong \mathbb{Z}/n\mathbb{Z}$. Thus Theorem 4.13 implies that $H_2(\Lambda \times_\alpha \mathbb{Z}) = \{0\} = \ker(1 - \alpha_*) = \{0\}$, and that $H_1(\Lambda \times_\alpha \mathbb{Z})$ is an extension of $\mathbb{Z}$ by $\mathbb{Z}/n\mathbb{Z}$ and hence is equal to $\mathbb{Z} \oplus (\mathbb{Z}/n\mathbb{Z})$. In particular, for $n=2$, the graph $\Lambda \times_\alpha \mathbb{Z}$ has the same homology as the Klein bottle. + +*Example* 5.2. Let $T_1$ be the 1-graph with a single vertex and a single edge as in Example 2.2(1), and let $\Lambda$ and $\Gamma = \Lambda \times_\alpha \mathbb{Z}$ be as in Example 5.1 with $n=2$. Then $\Lambda \times T_1$ has the same skeleton as $\Gamma$. To compute the homology of $\Lambda \times T_1$, we can use the Künneth theorem (Theorem 4.3): each of $T_1$ and $\Lambda$ consists of a single simple closed undirected path, so it is routine to verify that $H_i(T_1) = H_i(\Lambda) = \mathbb{Z}$ for each of $i=0,1$. Hence $H_i(\Lambda \times T_1) = \mathbb{Z}^{(2)}$ for all $i$. So the homology of $\Lambda \times T_1$ is the same as that of the 2-torus (see Example 4.5), and in particular is not equal to that of $\Gamma$, even though they have the same skeleton. + +We next describe a suite of examples of 2-graphs whose homology mirrors that of the +sphere, the torus, the Klein bottle and the projective plane. We have presented examples +matching the Klein bottle and the torus previously (see Examples 5.1 and 4.5), but we +provide presentations here which suggest standard planar diagrams for these four spaces. + +*Remark* 5.3. For a number of the following examples, we give a non-standard presentation of the skeleton and factorisation rules. Specifically for Examples 5.4–5.7, we present a commuting diagram (in the category $\Lambda$) which includes all 2-cubes as commuting squares. These diagrams are not the same as the skeletons because they involve some repeated vertices and edges. We present our examples this way to suggest planar diagrams for their topological realisations (see Section 6); indeed, we will sometimes refer to these commuting diagrams, very imprecisely, as planar diagrams for the associated 2-graphs. + +When using this presentation of a 2-graph, one must check that the collection of squares specified in the diagram is complete: since vertices may be repeated in a planar diagram, it is possible that there are some bi-coloured paths in the skeleton which do not appear as the sides of a square in the diagram, and in this case, the diagram may not completely specify a 2-graph, and is in any case not a planar diagram for the 2-graph in the sense just discussed. +---PAGE_BREAK--- + +*Example 5.4.* Let $\Lambda$ be the 2-graph described by the following planar diagram (see Remark 5.3). + +The skeleton of $\Lambda$ is pictured in (5.4). The Greek letters in the centres of the commuting squares in the above diagram are the morphisms of degree $(1, 1)$. So $\alpha = ce = ga$, $\beta = de = ha$, etc. + +Since $\Lambda$ is connected, $H_0(\Lambda) \cong \mathbb{Z}$ by Proposition 3.12. We have $\partial_2(\alpha - \beta + \gamma - \delta) = 0$ by a straightforward calculation and one can check that $\partial_2(n_1\alpha + n_2\beta + n_3\gamma) = 0$ implies $n_1 = n_2 = n_3 = 0$, so $H_2(\Lambda) = \ker(\partial_2) \cong \mathbb{Z}$. Moreover, $\partial_2(C_2(\Lambda))$ is spanned by $\partial_2(\alpha)$, $\partial_2(\beta)$ and $\partial_2(\gamma)$. + +One checks that the set $\{\partial_2(\alpha), \partial_2(\beta), \partial_2(\gamma), d, e, f, g, h\}$ forms a basis for $C_1(\Lambda)$. So $C_1(\Lambda) = \partial_2(C_2(\Lambda)) \oplus \mathbb{Z}\{d, e, f, g, h\}$. Since $H_0(\Lambda) = \mathbb{Z}$ and $C_0(\Lambda)$ has rank 6, the image of $\partial_1$ has rank 5. It follows that $H_1(\Lambda) = \{0\}$. Hence $\Lambda$ has the same homology as the sphere $S^2$. If we draw its skeleton as follows, the resemblance between $\Lambda$ and a combinatorial sphere is striking. + +*Example 5.5.* Consider the 2-graph $\Sigma$ with planar diagram (see Remark 5.3) on the left and skeleton on the right in the following diagram. +---PAGE_BREAK--- + +Let $\Lambda$ be the 1-graph with two vertices connected by two parallel edges used in Example 5.2; we observed in the same example that the homology of $\Lambda$ is that of the circle. Then $\Sigma$ is isomorphic to $\Lambda \times \Lambda$, so by the Künneth theorem it has the homology of the 2-torus as in Example 5.2. + +*Example 5.6.* We thank Mike Whittaker for his contributions to the construction and analysis of this example. + +Let $\Lambda$ be the 2-graph with planar diagram (see Remark 5.3) on the left and skeleton on the right in the following diagram. As above, the Greek letters in the centres of squares denote the morphisms in $\Lambda^{(1,1)}$ — so $\alpha = ga = ce$ etc. + +We claim that $H_0(\Lambda) \cong \mathbb{Z}$, $H_1(\Lambda) \cong \mathbb{Z}/2\mathbb{Z}$ and $H_2(\Lambda) = \{0\}$. Indeed, $C_0(\Lambda) = \mathbb{Z}\{u, v, w, x, y\}$, $C_1(\Lambda) = \mathbb{Z}\{a, b, c, d, e, f, g, h\}$ and $C_2(\Lambda) = \mathbb{Z}\{\alpha, \beta, \gamma, \delta\}$. Since $\Lambda$ is connected, $H_0(\Lambda) \cong \mathbb{Z}$ which implies that $\partial_1(C_1(\Lambda))$ has rank 4. Since rank $C_1(\Lambda) = 8$, rank ker($\partial_1$) = 4 also. If $\partial_2(n_1\alpha + n_2\beta + n_3\gamma + n_4\delta) = 0$, then consideration of the coefficients of $c$ and $h$ forces $n_1 = -n_3 = n_2$, and then that the coefficient of $a$ is zero forces $n_1 = n_2 = 0$, and hence $n_3 = 0$ also. Now considering the coefficient of $d$ shows that $n_4 = -n_2 = 0$. So $\partial_2$ is injective, forcing $H_2(\Lambda) = \{0\}$, and also that rank $\partial_2(C_2(\Lambda)) = 4$. We observed above that rank ker($\partial_1$) = 4; hence + +$$ \mathrm{rank}(H_1(\Lambda)) = \mathrm{rank}(\ker(\partial_1)) - \mathrm{rank}(\partial_2(C_2(\Lambda))) = 0. $$ + +It is routine to check that $\{c-d, g-h, c+f-b-h, d+e-a-h\}$ is a basis for ker($\partial_1$). To determine the image of $\partial_2$, first note that $c+f-b-h = \partial_2(\gamma)$ and $d+e-a-h = \partial_2(\beta)$. Moreover $(c-d)+(g-h)$ is the image of $\gamma-\delta$, which implies that $H_1(\Lambda)$ is generated by the class of $c-d$. Finally, $2(c-d) = \partial_2(\alpha - \beta + \gamma - \delta)$, and since $\{\alpha, \beta, \gamma, \alpha - \beta + \gamma - \delta\}$ is a basis for $C_2(\Lambda)$, it follows that $H_1(\Lambda) \cong \mathbb{Z}/2\mathbb{Z}$ as required. + +These homology groups are the same as those of the projective plane. + +*Example 5.7.* Consider the 2-graph $\Lambda$ with planar diagram (see Remark 5.3) on the left and skeleton on the right in the following diagram. +---PAGE_BREAK--- + +One can check, by calculating with bare hands, that the homology of this 2-graph is the same as that of the 2-graph $\Lambda \times_{\alpha} \mathbb{Z}$ of Example 5.1 with $n = 2$; that is, the same homology as the Klein bottle. Alternatively, one can deduce this from the topological realisation (see Remark 5.9 below). + +*Example 5.8.* In Example 5.6, we realised the homology of the projective plane using a 2-graph $\Lambda$. This suggests that there ought to be a 2-graph with the homology of the sphere carrying a free action of $\mathbb{Z}/2\mathbb{Z}$ such that the quotient is isomorphic to $\Lambda$. By [25, Remark 5.6] (see also [34]), such a 2-graph must be a skew product of $\Lambda$ by a functor taking values in $\mathbb{Z}/2\mathbb{Z}$. Here we present such an example. There is a functor $c : \Lambda \to \mathbb{Z}/2\mathbb{Z}$ determined by $c^{-1}(0) = \{b, c, g\}$ and $c^{-1}(1) = \{a, d, e, f, h\}$, and the skew-product graph $\Lambda \times_c \mathbb{Z}/2\mathbb{Z}$ has the desired property. The visual intuition that has pervaded this section appears again: one can check without too much difficulty that the skeleton of $\Lambda \times_c (\mathbb{Z}/2\mathbb{Z})$ can be drawn as follows (we have not labeled the edges since their labels can be deduced from the definition of the skew product and the labels of the vertices). + +This picture suggests how to view the action of $\mathbb{Z}/2\mathbb{Z}$ on the skew-product graph as the action of the antipodal map on the sphere. + +A similar situation arises for the Klein bottle and torus. Let $\Gamma$ denote the crossed product graph $\Lambda \times_{\alpha} \mathbb{Z}$ of Example 5.1 with $n = 2$, so that the homology of $\Gamma$ coincides with that of the Klein bottle. Let $c : \Gamma \to \mathbb{Z}/2\mathbb{Z}$ be the functor $c(\lambda, n) = n$ (mod 2). One can check that $\Gamma \times_c (\mathbb{Z}/2\mathbb{Z})$ is isomorphic to $\Lambda \times C_2$ where $\Lambda$ is the 1-graph from Example 5.1 (with $n = 2$), and $C_2$ is the path category of the simple directed cycle of length 2. In particular, by the Künneth theorem, the homology of $\Gamma \times_c (\mathbb{Z}/2\mathbb{Z})$ is isomorphic to that of the torus. So our 2-graph representative $\Gamma$ of the Klein bottle can be realised as a quotient of a 2-graph representative of a torus by a free $\mathbb{Z}/2\mathbb{Z}$ action. + +*Remark 5.9.* As observed in [21], the topological realisations of the 2-graphs of Examples 5.4–5.7 (see Section 6) are indeed homeomorphic to each of the sphere, the torus, the projective plane and the Klein bottle as their homology suggests. In particular, Theorem 6.3 below combined with the descriptions of their topological realisations in [21] provide an alternative proof that these 2-graphs have the homology we have claimed for them. + +## 6. CONNECTION WITH HOMOLOGY OF TOPOLOGICAL SPACES + +In this section, we show that the homology of the topological realisation $X_{\Lambda}$ of a k-graph as defined in [21] agrees with the homology of $\Lambda$ defined in §2. The corresponding fact for a cubical set was known already to Grandis: he indicates at the end of [14, Section 1.8] that the result is well known, with a reference to [32] for the simplicial case. +---PAGE_BREAK--- + +However, we have been unable to locate the details for cubical sets in the literature, so we include a proof of our result based on that given for simplicial complexes by Hatcher [17]. We prove in Appendix B that the topological realisation of a $k$-graph we define here is homeomorphic to the topological realisation $\mathcal{R}\tilde{Q}(\Lambda)$ of the associated cubical set $\tilde{Q}(\Lambda)$ (see Appendix A). + +In [21], the topological realisation of a $k$-graph $\Lambda$ is defined as follows. For $n \in \mathbb{N}^k$, let $[0, n] := \{t \in \mathbb{R}^k : 0 \le t \le n\}$. For $t \in \mathbb{R}^k$, let $\lfloor t \rfloor$ be the element of $\mathbb{Z}^k$ such that $\lfloor t \rfloor_i = \lfloor t_i \rfloor = \max\{n \in \mathbb{Z} : n \le t_i\}$ for all $i \le k$. Similarly, define $\lfloor t \rfloor$ by $\lfloor t \rfloor_i = \min\{n \in \mathbb{Z} : t_i \le n\}$ for $i \le k$. Consider the following equivalence relation on $\bigsqcup_{\lambda \in \Lambda} (\{\lambda\} \times [0, d(\lambda)])$: for $\mu, \nu \in \Lambda$ and $s, t \in \mathbb{R}^k$ with $0 \le s \le d(\mu)$ and $0 \le t \le d(\nu)$, we define + +$$ (6.1) \qquad (\mu, s) \sim (\nu, t) \iff s - \lfloor s \rfloor = t - \lfloor t \rfloor \text{ and } \mu(\lfloor s \rfloor, \lfloor s \rfloor) = \nu(\lfloor t \rfloor, \lfloor t \rfloor). $$ + +The topological realisation $X_\Lambda$ is the quotient space $(\bigsqcup_{\lambda \in \Lambda} \{\lambda\} \times [0, d(\lambda)]) / \sim$. As in [21] we let $[\lambda, t]$ denote the equivalence class of the point $(\lambda, t)$. + +**Definition 6.1.** For $r \in \mathbb{N}$, let $\mathbf{I}^r$ denote the unit cube $[0, 1]^r$ in $\mathbb{R}^r$. Fix an $r$-cube $\lambda \in Q_r(\Lambda)$. Express $d(\lambda) = e_{i_1} + \dots + e_{i_r}$ where $i_1 < \dots < i_r$. Let $\iota_\lambda : \mathbf{I}^r \to X_\Lambda$ denote the map $(t_1, \dots, t_r) \mapsto [\lambda, \sum_{m=1}^r t_m e_{i_m}]$. Then $\Phi(\lambda) := \iota_\lambda$ defines a homomorphism $\Phi : C_r(\Lambda) \to C_r^{\text{top}}(X_\Lambda)$. + +*Remark 6.2.* The map $\Phi$ intertwines the boundary maps, so is a chain map. It therefore induces a homomorphism $\Phi_* : H_*(\Lambda) \to H_*^{\text{top}}(X_\Lambda)$. + +It will be shown in [21] that each $k$-graph morphism $\theta : \Lambda \to \Gamma$ induces a continuous map $\tilde{\theta} : X_\Lambda \to X_\Gamma$ such that $\tilde{\theta} \circ \iota_\lambda = \iota_{\theta(\lambda)}$ for all $\lambda \in Q(\Lambda)$. Hence both the chain map $\Phi$ and the homomorphism $\Phi_*$ of homology are natural in $\Lambda$. (with respect to $k$-graph morphisms). + +**Theorem 6.3.** Let $\Lambda$ be a $k$-graph. For each $r \ge 0$, the map $\Phi_* : H_r(\Lambda) \to H_r^{\text{top}}(X_\Lambda)$ is an isomorphism. Moreover this isomorphism is natural in $\Lambda$. + +Our proof parallels the argument of the first three paragraphs of [17, Theorem 2.27] where it is shown that the singular homology of a $\Delta$-complex (see [17, page 103]) is the same as its simplicial homology. We first need to do some setting up. + +*Remark 6.4.* We claim that Massey’s definition of singular homology, which is based on cubes, is equivalent to the usual one based on simplices. By the uniqueness theorem of [31], if $X$ has the homotopy type of a CW-complex, then any homology theory on $X$ which satisfies the Eilenberg-Steenrod axioms [8] and which is additive in the sense that it carries disjoint unions to direct sums is naturally isomorphic to the usual singular homology. The Eilenberg-Steenrod axioms and additivity are all verified for Massey’s singular homology in [30, Chapter VII]: Axiom 1 is (3.4), Axiom 2 is (3.5), Axiom 3 is (7.6.1), Axiom 4 is Theorem 5.1, Axiom 5 is Theorem 6.1, Axiom 6 is Theorem 6.2, Axiom 7 is Example 2.1, and additivity is Proposition 2.7. Alternatively that Massey’s homology agrees with the simplicial formulation also follows from the original uniqueness theorem [8, Theorem 10.1] since we can triangulate $X_\Lambda$ by adding a vertex at the centre of each cube (thereby dividing each $r$-cube into $2^r r!$ $r$-simplices). + +To run Hatcher’s argument, we use the cellular structure of $X_\Lambda$ regarded as a CW-complex. For $0 \le m \le k$ let $X_m$ denote the union of the images of the $\iota_\lambda$ where $\lambda$ ranges +---PAGE_BREAK--- + +over all $r$-cubes with $r \le m$. We formally define $C_r^\Lambda(X_m) = C_r(\Lambda)$ if $m \ge r$ and to be zero otherwise. We obtain a nested sequence + +$$C_*^\Lambda(X_0) \subseteq C_*^\Lambda(X_1) \subseteq \dots \subseteq C_*^\Lambda(X_k) = C_*(\Lambda)$$ + +of complexes. In particular, for $l \le m$ we may form the quotient complex + +$$C_{*}^{\Lambda}(X_{m}, X_{l}) := C_{*}^{\Lambda}(X_{m})/C_{*}^{\Lambda}(X_{l}),$$ + +which has relative homology groups $H_*^\Lambda(X_m, X_l)$. Then + +$$ (6.2) \qquad H_r^\Lambda(X_m, X_{m-1}) \cong C_r^\Lambda(X_m, X_{m-1}) = \begin{cases} C_r(\Lambda) & \text{if } m=r, \\ \{0\} & \text{otherwise.} \end{cases} $$ + +Since every short exact sequence of complexes induces a long exact sequence in homology (see [17, Theorem 2.16]), we obtain a long exact sequence + +$$ (6.3) \quad \begin{aligned} & \cdots \to H_{r+1}^\Lambda(X_m, X_{m-1}) \to H_r^\Lambda(X_{m-1}) \to H_r^\Lambda(X_m) \to H_r^\Lambda(X_m, X_{m-1}) \\ & \longrightarrow H_{r-1}^\Lambda(X_{m-1}) \to \cdots \to H_0^\Lambda(X_m, X_{m-1}). \end{aligned} $$ + +The map $\Phi : C_*(\Lambda) \to C_*^{\text{top}}(X_\Lambda)$ induces a map from $C_*(X_m)$ to $C_*^{\text{top}}(X_m)$ for each $m$. Hence, it induces a map, also called $\Phi$, from $C_*^\Lambda(X_m, X_{m-1})$ to $C_*^{\text{top}}(X_m, X_{m-1})$. + +The crucial step in Hatcher's proof of [17, Theorem 2.27] is the following isomorphism. + +**Lemma 6.5.** With notation as above, the induced map + +$$ \Phi_* : H_r^\Lambda(X_m, X_{m-1}) \to H_r^{\text{top}}(X_m, X_{m-1}) $$ + +is an isomorphism for each $r, m$. + +*Proof.* Suppose that $r \ne m$. Then $H_r^\Lambda(X_m, X_{m-1}) = \{0\}$ by (6.2) and $H_r^{\text{top}}(X_m, X_{m-1}) = \{0\}$ by [17, Lemma 2.3.4(a)]. Hence $\Phi_* : H_r^\Lambda(X_m, X_{m-1}) \to H_r^{\text{top}}(X_m, X_{m-1})$ is an isomorphism for $m \ne r$. Since + +$$ H_r^\Lambda(X_r, X_{r-1}) \cong C_r(\Lambda) = \mathbb{Z}Q_r(\Lambda) \cong H_r^{\text{top}}(Q_r(\Lambda) \times \mathbf{I}^r, Q_r(\Lambda) \times \partial\mathbf{I}^r), $$ + +it suffices to show that the canonical map $Q_r(\Lambda) \times \mathbf{I}^r \to X_r$ given by $(\lambda, t) \mapsto \iota_\lambda(t)$ induces an isomorphism + +$$ H_r^{\text{top}}(Q_r(\Lambda) \times \mathbf{I}^r, Q_r(\Lambda) \times \partial\mathbf{I}^r) \cong H_r^{\text{top}}(X_r, X_{r-1}). $$ + +To see this, observe that $(X_r, X_{r-1})$ is a good pair (see [17, p. 114]) in the sense that $X_{r-1}$ is a nonempty closed subset of $X_r$ which is a deformation retract of the open set + +$$ X_{r-1} \cup \{[\lambda, t] : \lambda \in Q_r(\Lambda), \min\{t_i, 1-t_i\} < 1/3 \text{ for } 1 \le i \le r\}. $$ + +Let $X_r/X_{r-1}$ be the quotient of $X_r$ obtained by identifying $X_{r-1}$ to a point. That $(X_r, X_{r-1})$ is a good pair combines with [17, Proposition 2.22] and Remark 6.4 to show that + +$$ H_r^{\text{top}}(X_r, X_{r-1}) \cong H_r^{\text{top}}(X_r/X_{r-1}). $$ + +Moreover, $\Phi_r$ induces a homeomorphism of $(Q_r(\Lambda) \times \mathbf{I}^r)/(Q_r(\Lambda) \times \partial\mathbf{I}^n)$ with $X_r/X_{r-1}$. Since $(Q_r(\Lambda) \times \mathbf{I}^r, Q_r(\Lambda) \times \partial\mathbf{I}^r)$ is also a good pair, the result follows from another application of [17, Proposition 2.22]. □ +---PAGE_BREAK--- + +*Proof of Theorem 6.3.* The naturality of $\Phi_*$ was observed in Remark 6.2. So we just need to show that $\Phi_*$ is an isomorphism. + +Both $H_r(\Lambda)$ and $H_r(X_\Lambda)$ are trivial for $r > k$, so we may assume that $0 \le r \le k$. Fix $m \in \mathbb{N}$. If $r \le m$ then we may regard the map $\Phi : C_r(\Lambda) \to C_r^{\text{top}}(X_\Lambda)$ given in Definition 6.1 as a map from $C_r^\Lambda(X_m)$ to $C_r^{\text{top}}(X_m)$; whereas if $r > m$ then both $C_r^\Lambda(X_m)$ and $C_r^{\text{top}}(X_m)$ are trivial, and we define $\Phi : C_r^\Lambda(X_m) \to C_r^{\text{top}}(X_m)$ to be the trivial map between trivial groups. As in Remark 6.2, $\Phi$ intertwines the boundary maps, and so induces a homomorphism $\Phi_* : H_*^\Lambda(X_m) \to H_*^{\text{top}}(X_m)$. + +We claim that these maps are all isomorphisms. We proceed by induction on $m$. Our base case is $m=0$. Since $X_0$ is equal to the discrete space $\Lambda^0$, each of $H_0^\Lambda(X_0)$ and $H_0^{\text{top}}(X_0)$ is canonically isomorphic to $\mathbb{Z}\Lambda^0$, and $\Phi_*$ is the identity map. Moreover, for $r \ge 1$, we have $H_r^\Lambda(X_0) = H_r^{\text{top}}(X_0) = \{0\}$, so $\Phi_*$ is trivially an isomorphism. Now fix $m \ge 1$ and suppose as an inductive hypothesis that $\Phi_*$ is an isomorphism between $H_*^\Lambda(X_{m-1})$ and $H_*^{\text{top}}(X_{m-1})$. Fix $r \ge 0$. Since $\Phi_*$ induces a map of short exact sequences of complexes, the naturality of the connecting map in the long exact sequence arising from a short exact sequence of complexes yields the following commuting diagram. + +The inductive hypothesis ensures that the second and fifth vertical maps are isomorphisms, and the first and fourth maps are isomorphisms by Lemma 6.5. Thus the Five Lemma (see, for example, [17, p 129]) implies that the middle vertical map is also an isomorphism, completing the induction. Hence, $\Phi_* : H_r^\Lambda(X_m) \to H_r^{\text{top}}(X_m)$ is an isomorphism for all $m$. Since $H_r(\Lambda) = H_r^\Lambda(X_k)$ and $H_r^{\text{top}}(X) = H_r^{\text{top}}(X_k)$ for all $r \ge 0$ the desired result follows. $\square$ + +## 7. COHOMOLOGY AND TWISTED *-GRAPHS + +In this section we introduce cohomology for *-graphs and indicate how a $\mathbb{T}$-valued 2-cocycle may be used to twist a *-graph $C^*$-algebra. We first define the cohomology of a *-graph and provide a Universal Coefficient Theorem. We then show how to associate to each $\mathbb{T}$-valued 2-cocycle $\phi$ on $\Lambda$ a twisted $C^*$-algebra $C_\phi^*(\Lambda)$. We obtain as relatively elementary examples all noncommutative tori and the Heegaard-type quantum 3-spheres of [1]. We will study cohomology for *-graphs and the structure of twisted *-graph $C^*$-algebras in greater detail in [28]. + +**Notation 7.1.** Let $\Lambda$ be a *-graph and let $A$ be an abelian group. For $r \in \mathbb{N}$, we write $C^r(\Lambda, A)$ for the collection of all functions $f : Q_r(\Lambda) \to A$. We identify $C^r(\Lambda, A)$ with $\operatorname{Hom}(C_r(\Lambda), A)$ in the usual way. Define maps $\delta^r : C^r(\Lambda, A) \to C^{r+1}(\Lambda, A)$ by + +$$ \delta^r(f)(\lambda) := f(\partial_{r+1}(\lambda)) = \sum_{i=1}^{r+1} \sum_{l=0}^{1} (-1)^{i+l} f(F_i^l(\lambda)). $$ + +Then $(C^*(\Lambda, A), \delta^*)$ is a cochain complex. +---PAGE_BREAK--- + +Mac Lane [29, Chapter II, Equation (3.1)] associates a cochain complex to a chain complex and an abelian group in a similar way, but with a slightly different sign convention for the boundary map. The resulting cohomology is isomorphic to the following. + +**Definition 7.2.** We define the cohomology $H^*(\Lambda, A)$ of the $k$-graph $\Lambda$ with coefficients in $A$ to be the cohomology of the complex $C^*(\Lambda, A)$; that is $H^r(\Lambda, A) := \ker(\delta^r)/\operatorname{Im}(\delta^{r-1})$. For $r \ge 0$, we write $Z^r(\Lambda, A) := \ker(\delta^r)$ for the group of $r$-cocycles, and for $r > 0$, we write $B^r(\Lambda, A) = \operatorname{Im}(\delta^{r-1})$ for the group of $r$-coboundaries. + +**Theorem 7.3 (Universal Coefficient Theorem).** Let $\Lambda$ be a $k$-graph, and let $A$ be an abelian group. For each $r \ge 0$, there is a short exact sequence + +$$ +0 \to \operatorname{Ext}(H_{r-1}(\Lambda), A) \xrightarrow{\alpha} H^r(\Lambda, A) \xrightarrow{\beta} \operatorname{Hom}(H_r(\Lambda), A) \to 0, +$$ + +and the maps $\alpha$ and $\beta$ are natural in $A$ and $\Lambda$. + +*Proof.* This follows directly from Mac Lane's theorem [29, Theorem III.4.1] applied to the complex $C_*(\Lambda)$. $\square$ + +Recall from [25] that a k-graph Λ is row-finite if vΛⁿ is finite for all v ∈ Λ⁰ and n ∈ Nᵏ, and is locally convex if, whenever 1 ≤ i ≠ j ≤ k and λ ∈ Λᵉᵢ with r(λ)Λᵉⱼ ≠ ∅, we have s(λ)Λᵉⱼ ≠ ∅ also. + +We will follow the usual convention of writing the binary operation in an abelian group A additively, except when A = T where it is written multiplicatively. + +**Definition 7.4 (cf. [36, Equation (3.1)] and [37, Theorem C.1(i)-(ii)])**. Let $\Lambda$ be a row-finite locally convex $k$-graph and fix $\phi \in Z^2(\Lambda, \mathbb{T})$. A Cuntz-Krieger $\phi$-representation of $\Lambda$ in a $C^*$-algebra $A$ is a set $\{p_v : v \in \Lambda^0\} \subseteq A$ of mutually orthogonal projections and a set $\{s_\lambda : \lambda \in \bigcup_{i=1}^k \Lambda^{e_i}\} \subseteq A$ satisfying + +(1) for all $\lambda \in \Lambda^{e_i}$, $s_{\lambda}^{*} s_{\lambda} = p_{s(\lambda)}$; + +(2) for all $1 \le i < j \le k$ and $\mu, \mu' \in \Lambda^{e_i}$, $\nu, \nu' \in \Lambda^{e_j}$ such that $\mu\nu = \nu'\mu'$, + +$s_{\nu'} s_{\mu'} = \phi(\mu\nu)s_{\mu}s_{\nu}$; and + +(3) for all $v \in \Lambda^0$ and all $i = 1, \dots, k$ such that $v\Lambda^{e_i} \neq \emptyset$, + +$$ +p_v = \sum_{\lambda \in v\Lambda^{e_i}} s_\lambda s_\lambda^*. +$$ + +The condition that a set $\{p_v : v \in \Lambda^0\}$ consists of mutually orthogonal projections is characterised by the algebraic relations $p_v^* = p_v^2 = p_v$ and $p_v p_w = \delta_{v,w} p_v$ for all $v, w \in \Lambda^0$. Given any collection $\{p_v : v \in \Lambda^0\}$ in a *-algebra satisfying these relations, and given any family $\{s_\lambda : \lambda \in \bigcup_{i=1}^k \Lambda^{e_i}\}$ in the same *-algebra satisfying relation (1), the norm of the image of each $p_v$ and of each $s_\lambda$ under any representation on Hilbert space is at most 1. So as in [2, Definition 1.2], there is a universal $C^*$-algebra generated by a Cuntz-Krieger $\phi$-representation of $\Lambda$. A priori, this could be the zero algebra; but we will exhibit some interesting examples (see Examples 7.7, 7.9, 7.10) where it is not, and we will show in the forthcoming article [28] that in fact there is always a Cuntz-Krieger $\phi$-representation of $\Lambda$ in which every generator is nonzero. + +**Definition 7.5.** Let $\Lambda$ be a row-finite locally convex $k$-graph. Let $\phi \in Z^2(\Lambda, \mathbb{T})$. We define $C_{\phi}^{*}(\Lambda)$ to be the universal $C^{*}$-algebra generated by a Cuntz-Krieger $\phi$-representation of $\Lambda$. +---PAGE_BREAK--- + +**Proposition 7.6.** Let $\Lambda$ be a row-finite locally convex k-graph. + +(1) Let $1$ denote the identity element of $C_2(\Lambda, \mathbb{T})$. Then $C_1^*(\Lambda)$ is canonically isomorphic to the $k$-graph algebra $C^*(\Lambda)$ defined in [36]. + +(2) Let $\psi, \phi \in Z^2(\Lambda, \mathbb{T})$, and suppose that $\alpha \in C^1(\Lambda, \mathbb{T})$ satisfies $\phi = \delta^1(\alpha)\psi$ so that $\phi$ and $\psi$ are cohomologous. Let $\{p_v^\psi : v \in \Lambda^0\}$, $\{s_\lambda^\psi : \lambda \in \bigsqcup_{i=1}^k \Lambda^{e_i}\}$ be the universal generating Cuntz-Krieger $\psi$-representation of $\Lambda$ and similarly for $\phi$. Then there is an isomorphism $\pi : C_\psi^*(\Lambda) \to C_\phi^*(\Lambda)$ such that $\pi(p_v^\psi) = p_v^\phi$ for all $v \in \Lambda^0$ and $\pi(s_\lambda^\psi) = \alpha(\lambda)s_\lambda^\phi$ for all $\lambda \in \bigsqcup_{i=1}^k \Lambda^{e_i}$. + +*Proof.* (1) The combination of [37, Theorem C.1 and Lemma B.4] shows that $C^*(\Lambda)$ is the universal $C^*$-algebra generated by elements satisfying the relations of Definition 7.4 with $\phi(\mu\nu) = 1$ for all $\mu\nu \in Q_2(\Lambda)$. + +(2) For $\lambda \in \bigsqcup_{i=1}^k \Lambda^{e_i}$, let $t_\lambda := \alpha(\lambda)s_\lambda^\phi$. If $\mu\nu = \nu'\mu'$ where $\mu, \mu' \in \Lambda^{e_i}, \nu, \nu' \in \Lambda^{e_j}$ and $1 \le i < j \le k$, then $\delta^1(\alpha) = \alpha(\mu')^{-1}\alpha(\nu')^{-1}\alpha(\mu)\alpha(\nu)$. Hence + +$$\alpha(\nu')\alpha(\mu')\phi(\mu\nu) = \alpha(\nu')\alpha(\mu')\delta^1(\alpha)(\mu\nu)\psi(\mu\nu) = \alpha(\mu)\alpha(\nu)\psi(\mu\nu).$$ + +Using this, we calculate: + +$$t_{\nu'}t_{\mu'} = \alpha(\nu')\alpha(\mu')s_{\nu'}s_{\mu'} = \alpha(\nu')\alpha(\mu')\phi(\mu\nu)s_\mu s_\nu = \alpha(\mu)\alpha(\nu)\psi(\mu\nu)s_\mu s_\nu = \psi(\mu\nu)t_\mu t_{\nu'}.$$ + +So $\{t_\lambda : \lambda \in \bigsqcup_{i=1}^k \Lambda^{e_i}\}$ satisfies Definition 7.4(2) for the cocycle $\psi$. Hence the collections $\{p_v^\phi : v \in \Lambda^0\}$ and $\{t_\lambda : \lambda \in \bigsqcup_{i=1}^k \Lambda^{e_i}\}$ in $C_\phi^*(\Lambda)$ constitute a Cuntz-Krieger $\psi$-representation of $\Lambda$. The universal property of $C_\psi^*(\Lambda)$ therefore gives a homomorphism $\pi : C_\psi^*(\Lambda) \to C_\phi^*(\Lambda)$ such that $\pi(p_v^\psi) = p_v^\phi$ for all $v \in \Lambda^0$ and $\pi(s_\lambda^\psi) = t_\lambda = \alpha(\lambda)s_\lambda^\phi$ for all $\lambda \in \bigsqcup_{i=1}^k \Lambda^{e_i}$. Reversing the roles of $\psi$ and $\phi$ in the above calculation yields an inverse, so $\pi$ is an isomorphism. $\square$ + +*Example 7.7.* Let $T_2$ denote $\mathbb{N}^2$ regarded as a 2-graph with degree functor the identity map (see Examples 2.2(1)). Fix $\theta \in [0, 1]$. There is precisely one 2-cube in $T_2$, namely (1, 1). Define $\phi \in Z^2(T_2, \mathbb{T})$ by $\phi(1, 1) = e^{2\pi i \theta}$. By definition, $C_\phi^*(T_2)$ is the universal $C^*$-algebra generated by unitaries $S_{e_1}$ and $S_{e_2}$ satisfying + +$$S_{e_2}S_{e_1} = e^{2\pi i \theta} S_{e_1}S_{e_2}.$$ + +That is, $C_\phi^*(T_2)$ is the rotation algebra $A_\theta$. + +*Remark 7.8.* Theorem 2.1 of [22] says that the obstruction to a product system over $\mathbb{N}^2$ of C-correspondences being the product system associated to the 2-graph $T_2$ is measured by the element $\omega \in \mathbb{T}$ which implements the module isomorphism $\mathbb{C} \otimes \mathbb{C} \to \mathbb{C} \otimes \mathbb{C}$ between $X_{(1,0)} \otimes X_{(0,1)}$ and $X_{(0,1)} \otimes X_{(1,0)}$. We may regard $H^2(T_2, \mathbb{T})$ as the receptacle for this obstruction. + +*Example 7.9.* More generally consider the $k$-graph $T_k$ for $k \ge 2$. Then the twisted $k$-graph $C^*$-algebras over $T_k$ correspond exactly to the noncommutative tori (see for example [20], [9]; note that their sign conventions differ). Let $\theta$ be a skew-symmetric $k \times k$ real matrix, then the associated noncommutative torus $A_\theta$ is the universal $C^*$-algebra generated by $k$ unitaries $u_1, \dots, u_k$, satisfying (see [20]) + +$$ (7.1) \qquad u_n u_m = e^{2\pi i \theta_{m,n}} u_m u_n \quad \text{for all } 1 \le m, n \le k. $$ +---PAGE_BREAK--- + +Recall that $Q_2(T_k) = \{e_m + e_n \mid 1 \le m < n \le k\}$. Set $\phi_\theta(e_m + e_n) = e^{2\pi i \theta_{m,n}}$. Then $\phi(\theta)$ is a 2-cocycle. Moreover $C_{\phi(\theta)}^*(T_k)$ is the universal $C^*$-algebra generated by $k$ unitaries $S_{e_1}, \dots, S_{e_k}$ satisfying (7.1). Hence, $A_\theta \cong C_{\phi(\theta)}^*(T_k)$. + +*Example 7.10.* In [1] the authors describe $C^*$-algebras $C(S_{pq\theta}^3)$ where $p, q, \theta$ are parameters in $[0, 1]$. They show that $C(S_{pq\theta}^3) \cong C(S_{00\theta}^3)$ [1, Theorem 2.8] for all $p, q, \theta$. By definition, $C(S_{00\theta}^3)$ is the universal $C^*$-algebra generated by elements $S$ and $T$ satisfying + +$$ (7.2) \qquad (1 - SS^*)(1 - TT^*) = 0, $$ + +$$ (7.3) \qquad S^*S = T^*T = 1, $$ + +$$ (7.4) \qquad ST = e^{2\pi i \theta} TS, \text{ and} $$ + +$$ (7.5) \qquad ST^* = e^{-2\pi i \theta} T^*S. $$ + +It was shown in [16, Remark 3.3] that $C(S_{000}^3)$ is isomorphic to the Cuntz-Krieger algebra of the unique 2-graph $\Lambda$ with skeleton $E_\Lambda$ as pictured below. + +Specifically, the isomorphism $C(S_{000}^3) \to C^*(\Lambda)$ carries $S$ to $s_a+s_b+s_c$ and $T$ to $s_f+s_g+s_h$. +Note that $T_2 = \mathbb{N}^2$ so the degree map on $\Lambda$ yields a 2-graph morphism $f: \Lambda \to T_2$. +A routine computation shows that $f_*$ induces an isomorphism on homology. Hence by +Theorem 7.3, $f^*$ induces an isomorphism $H^2(T_2, \mathbb{T}) \cong H^2(\Lambda, \mathbb{T})$. + +Let $\alpha = ah = hb$, $\beta = cg = fc$ and $\tau = af = fa$; so $Q_2(\Lambda) = \{\alpha, \beta, \tau\}$. For each $\theta \in [0, 1)$ the 2-cocycle on $T_2$ determined by $(1, 1) \mapsto e^{-2\pi i \theta}$ pulls back to a 2-cocycle $\phi_\theta$ on $\Lambda$ satisfying $\phi_\theta(\alpha) = \phi_\theta(\beta) = \phi_\theta(\tau) = e^{-2\pi i \theta}$ (the preceding paragraph shows that every 2-cocycle on $\Lambda$ is cohomologous to one of this form). Fix $\theta \in [0, 1)$ and let $\{s_\lambda : \lambda \in \bigcup_{i=1}^k \Lambda^{e_i}\}$ and $\{p_v : v \in \Lambda^0\}$ be the generators of $C_{\phi(\theta)}^*(\Lambda)$. Define $\bar{S}, \bar{T} \in C_{\phi(\theta)}^*(\Lambda)$ by $\bar{S} := s_a+s_b+s_c$ and $\bar{T} = s_f+s_g+s_h$. We have + +$$ \overline{ST} = s_a s_f + s_c s_g + s_a s_h = e^{2\pi i \theta} s_f s_a + e^{2\pi i \theta} s_f s_c + e^{2\pi i \theta} s_h s_b = e^{2\pi i \theta} \overline{TS}. $$ + +So $\overline{S}, \overline{T}$ satisfy (7.4). Moreover + +$$ +\begin{align*} +\overline{T^*S} &= \overline{T^*} p_u \overline{S} = (s_f^* + s_g^* + s_h^*)(s_\alpha s_\alpha^* + s_\beta s_\beta^* + s_\tau s_\tau^*)(s_a + s_b + s_c) \\ +&= s_f^*(s_\beta s_\beta^*) s_c + s_f^*(s_\tau s_\tau^*) s_a + s_h^*(s_\alpha s_\alpha^*) s_a \\ +&= s_f^*(e^{2\pi i \theta} s_f s_c)(s_g^* s_c^*) s_c + s_f^*(e^{2\pi i \theta} s_f s_a)(s_f^* s_a^*) s_a + s_h^*(e^{2\pi i \theta} s_h s_b)(s_h^* s_a^*) s_a \\ +&= e^{2\pi i \theta} (s_c s_g^* + s_a s_f^* + s_b s_h^*) = e^{2\pi i \theta} (s_a + s_b + s_c)(s_f^* + s_g^* + s_h^*) \\ +&= e^{2\pi i \theta} \overline{ST}^*, +\end{align*} +$$ + +which establishes (7.5). That $\overline{S}, \overline{T}$ also satisfy (7.2) and (7.3) is routine. Hence by the universal property of $C(S_{00\theta}^3)$ the map $S \to \overline{S}$ and $T$ to $\overline{T}$ extends to a homomorphism $\rho$ from $C(S_{00\theta}^3)$ to $C_{\phi(\theta)}^*(\Lambda)$. +---PAGE_BREAK--- + +Now let $S$ and $T$ be the generators of $C(S_{00\theta}^3)$. Define + +$$q_w = 1 - SS^*, \quad q_v = 1 - TT^*, \quad \text{and} \quad q_u = SS^*TT^*,$$ + +and + +$t_\eta = q_{r(\eta)}Sq_{s(\eta)}$ for $\eta \in \Lambda^{e_1}$, and $t_\eta = q_{r(\eta)}Tq_{s(\eta)}$ for $\eta \in \Lambda^{e_2}$. + +It is routine to check that the pair $\{q_u, q_v, q_w\}$, $\{t_a, t_b, t_c, t_f, t_g, t_h\}$ is a Cuntz-Krieger $\phi(\theta)$-representation of $\Lambda$ in $C(S_{00\theta}^3)$. So the universal property of $C_{\phi(\theta)}^*(\Lambda)$ yields a homomorphism $\psi : C_{\phi(\theta)}^*(\Lambda) \to C(S_{00\theta}^3)$ such that $\psi(p_x) = q_x$ for $x \in \Lambda^0$ and $\psi(s_\eta) = t_\eta$ for $\eta \in \Lambda^{e_1} \cup \Lambda^{e_2}$. One verifies that $\psi = \rho^{-1}$ and it follows that $C_{\phi(\theta)}^*(\Lambda) \cong C(S_{00\theta}^3)$. + +Our analysis of $H^2(\Lambda, T)$, together with Proposition 7.6, therefore shows that the collection of twisted 2-graph $C^*$-algebras associated to $\Lambda$ is precisely the collection of algebras $C(S_{00\theta}^3)$, and hence precisely the collection of algebras $C(S_{pq\theta}^3)$ by [1, Theorem 2.8]. + +## APPENDIX A. CONNECTIONS WITH CUBICAL HOMOLOGY + +In this section we show that each $k$-graph determines a cubical set $\tilde{Q}(\Lambda)$ and that our homology is isomorphic to that of $\tilde{Q}(\Lambda)$ as defined by Grandis [14]. To define $\tilde{Q}(\Lambda)$ we must make sense of degeneracy maps and degenerate cubes in a $k$-graph (see Definition A.1 below), and avoiding this was one motivation for providing a self-contained approach in Section 3 above. We could instead have made use of Khusainov’s approach [23] using semicubical sets. This is in a sense more natural for $k$-graphs since it does not involve degeneracies: it is straightforward to show that the collection $Q_*(\Lambda)$ of cubes in a $k$-graph forms a semicubical set. However, the sign convention for the boundary maps in Khusainov’s definition of homology differs from those of both Grandis and Massey [30]. + +Recall the following definition adapted from [14, §1.2]. In order to avoid a clash of notation we use $f_i$ for the degeneracy maps; we also use 1, 0 in place of +, -. + +**Definition A.1.** A cubical set is a triple $X = (X_r, \partial_i^\ell, f_i)$ consisting of a sequence $(X_r)_{r=0}^\infty$ of sets, together with, for each $r \in \mathbb{N}$, maps + +$$ \partial_i^\ell : X_r \to X_{r-1}, \quad l \in \{0,1\}, \ 1 \le i \le r \quad \text{and} \quad f_i : X_{r-1} \to X_r, \quad 1 \le i \le r $$ + +satisfying the cubical relations + +$$ (A.1) \qquad \partial_i^\ell \partial_j^m = \partial_j^m \partial_{i+1}^\ell \quad \text{if } j \le i, $$ + +$$ (A.2) \qquad f_i f_j = f_{i+1} f_j \quad \text{if } j \le i, $$ + +$$ (A.3) \qquad \partial_i^\ell f_j = \begin{cases} f_j \partial_{i-1}^\ell & \text{if } j < i, \\ \mathrm{id} & \text{if } j = i, \\ f_{j-1} \partial_i^\ell & \text{if } j > i. \end{cases} $$ + +The maps $\partial_i^\ell$ are called *faces* and the $f_i$ are called *degeneracies*. + +We now introduce the $k$-graph analog $\mathbf{1}$ of the model cocubical set $\mathbb{I}$ described in [14, §1.2] (that is, an object satisfying conditions dual to those set out in Definition A.1). Recall from Section 2 that for $r \ge 1$, $\mathbf{1}_r = \sum_{i=1}^r e_i$ (and $\mathbf{1}_0 := 0 \in \mathbb{N}^0$). We define (see Examples 2.2). + +$$ \mathbf{1}_r = \begin{cases} \Omega_{r,\mathbf{1}_r} & \text{if } r \ge 1; \\ \Omega_0 & \text{if } r = 0. \end{cases} $$ +---PAGE_BREAK--- + +For $\ell = 0, 1$ define $\varepsilon_0^\ell : \mathbb{N}^0 \to \mathbb{N}^1$ by $\varepsilon_0^\ell(0) = \ell$. For $1 \le i \le r+1$ and $\ell \in \{0, 1\}$ define +$\varepsilon_i^\ell : \mathbb{N}^r \to \mathbb{N}^{r+1}$ by + +$$ +\varepsilon_i^\ell(n_1, \ldots, n_r) = (n_1, \ldots, n_{i-1}, \ell, n_i, \ldots, n_r). +$$ + +If $m \le n \le \mathbf{1}_r$ in $\mathbb{N}^r$, then $\varepsilon_i^\ell(m) \le \varepsilon_i^\ell(n) \le \mathbf{1}_{r+1}$ in $\mathbb{N}^{r+1}$; so we may extend $\varepsilon_i^\ell$ to a +quasimorphism from $\mathbf{1}_r$ to $\mathbf{1}_{r+1}$ by setting $\varepsilon_i^\ell(m,n) := (\varepsilon_i^\ell(m), \varepsilon_i^\ell(n))$. + +Define $\eta_1 : \mathbb{N}^1 \to \mathbb{N}^0$ by $\eta_1(n) = 0$ for all $n \in \mathbb{N}$. For $r \ge 2$ and $1 \le i \le r$ we define +$\eta_i : \mathbb{N}^r \to \mathbb{N}^{r-1}$ by deleting the $i^{\text{th}}$ coordinate: + +$$ +\eta_i(n_1, \dots, n_r) := (n_1, \dots n_{i-1}, n_{i+1}, \dots n_r). +$$ + +If $m \le n \le \mathbf{1}_r$ in $\mathbb{N}^r$, then $\eta_i(m) \le \eta_i(n) \le \mathbf{1}_{r-1}$ in $\mathbb{N}^{r-1}$; so $\eta_i$ extends to a quasimorphism from $\mathbf{1}_r$ to $\mathbf{1}_{r-1}$ such that $\eta_i(m,n) = (\eta_i(m), \eta_i(n))$. + +**Proposition A.2.** The collection $\mathbf{1} = (\mathbf{1}_n, \varepsilon_i^\ell, \eta_i)$ forms a cocubical set. + +*Proof.* It is routine but tedious to check that the duals of the relations (A.1), (A.2) and (A.3) hold. $\square$ + +Now we build a cubical set $\tilde{Q}(\Lambda)$ from a $k$-graph $\Lambda$ by considering collections of maps +from $\mathbf{1}$ into $\Lambda$: Given $t, r, k \in \mathbb{N}$, a homomorphism $h : \mathbb{N}^r \to \mathbb{N}^k$ is called an *admissible* +map of rank $t$, or just an *admissible map*, if there exist $1 \le i_1 < \cdots < i_t \le r$ and +$1 \le j_1 < \cdots < j_t \le k$ such that + +$$ +(A.4) \qquad h(e_{i_p}) = e_{j_p} \text{ for } p \le t \quad \text{and} \quad h(e_i) = 0 \text{ if } i \notin \{i_1, \ldots, i_t\}. +$$ + +Let $\Lambda$ be a $k$-graph and fix $r \in \mathbb{N}$. A quasimorphism $\varphi : \mathbf{1}_r \to \Lambda$ is said to be an $r$-cube if there is an admissible map $h : \mathbb{N}^r \to \mathbb{N}^k$ such that $d_\Lambda \circ \varphi = h \circ d_{\mathbf{1}_r}$. We say that an $r$-cube $\varphi$ has rank $t$ if the associated admissible map has rank $t$. For $r \ge 0$ let + +$$ +\tilde{Q}_r(\Lambda) = \{\varphi : \mathbf{1}_r \to \Lambda : \varphi \text{ is an } r\text{-cube}\}. +$$ + +For $1 \le i \le r + 1$ and $\ell \in \{0, 1\}$, define $\bar{\epsilon}_i^\ell : \tilde{Q}_{r+1}(\Lambda) \to \tilde{Q}_r(\Lambda)$ by + +$$ +\bar{\epsilon}_i^\ell(\varphi) := \varphi \circ \epsilon_i^\ell +$$ + +and for $1 \le i \le r$, define $\bar{\eta}_i : \tilde{Q}_{r-1}(\Lambda) \to \tilde{Q}_r(\Lambda)$ by + +$$ +\bar{\eta}_i(\varphi) := \varphi \circ \eta_i. +$$ + +*Remark A.3.* Let $\varphi$ be an $(r+1)$-cube of rank $t$ with admissible map $h : \mathbb{N}^{r+1} \to \mathbb{N}^k$ given as in equation (A.4) above. If $j = i_p$ for some $p$, then $\bar{\epsilon}_j^\ell(\varphi)$ is an $r$-cube whose rank is $t-1$. Otherwise it is an $r$-cube of rank $t$. In either case, the associated admissible map $h' : \mathbb{N}^r \to \mathbb{N}^k$ is given by + +$$ +(A.5) \qquad h'(e_i) = \begin{cases} e_{j_p} & \text{if } i < j \text{ and } i = i_p \text{ for some } p \\ e_{j_p} & \text{if } i \ge j \text{ and } i = i_p - 1 \text{ for some } p \\ 0 & \text{otherwise.} \end{cases} +$$ + +So $h'(t_1, \dots, t_{r-1}) = h(t_1, \dots, t_{j-1}, 0, t_j, \dots, t_{r-1})$. + +Similarly, if $\varphi$ is an $r$-cube of rank $t$ with admissible map $h : N^r \to N^k$ given in +equation (A.4) above, then $\bar{\eta}_j(\varphi)$ is an $(r+1)$-cube of rank $t$ whose admissible map is +---PAGE_BREAK--- + +given by + +$$ +(A.6) \qquad h''(e_i) = \begin{cases} e_{jp} & \text{if } i < j \text{ and } i = i_p \text{ for some } p \\ e_{jp} & \text{if } i > j \text{ and } i = i_p + 1 \text{ for some } p \\ 0 & \text{otherwise.} \end{cases} +$$ + +So $h''(t_1, \dots, t_{r+1}) = h(t_1, \dots, t_{j-1}, t_{j+1}, \dots, t_{r+1}).$ + +**Theorem A.4.** Let $\Lambda$ be a k-graph. Then $\tilde{Q}(\Lambda) = (\tilde{Q}_r(\Lambda), \bar{\epsilon}_i^\ell, \bar{\eta}_i)$ is a cubical set. + +*Proof.* This follows from Proposition A.2. $\square$ + +In [14, §2.1] the homology of a cubical set is defined as follows: Let $X = (X_r, \partial_i^\ell, f_i)$ be a cubical set, then for $n \ge 1$ we define + +$$ +\mathrm{Deg}_r(X) = \bigcup_{i=1}^{r} \mathrm{Im}(f_i : X_{r-1} \to X_r) \subseteq X_r +$$ + +and set $\mathrm{Deg}_0(X) = \emptyset$. The (normalised) chain complex $(C_*(X), \partial_*)$ is defined by + +$$ +C_r(X) = \mathbb{Z}X_r/\mathbb{Z}\operatorname{Deg}_r(X) = \mathbb{Z}\overline{X}_r \text{ where } \overline{X}_r = X_r \setminus \operatorname{Deg}_r(X) +$$ + +$$ +\partial_r(x) = \sum_{i,l} (-1)^{i+l} \partial_i^l x \quad \text{where } x \in \bar{X}_r. +$$ + +The homology of $X$ is then the homology of the complex $(C_*(X), \partial_*)$, so that + +$$ +H_r(X) = \ker \partial_r / \operatorname{Im} \partial_{r+1}. +$$ + +An $r$-cube $\varphi : 1_r \rightarrow \Lambda$ is called degenerate if its rank is strictly less than $r$. Otherwise it is said to be nondegenerate. We define + +$$ +\overline{Q}_r(\Lambda) = \{\varphi : 1_r \to \Lambda : \varphi \text{ is a nondegenerate } r\text{-cube}\} +$$ + +$$ +D_r(\Lambda) = \{\varphi : 1_r \to \Lambda : \varphi \text{ is a degenerate } r\text{-cube}\}, +$$ + +so $\widetilde{Q}_r(\Lambda) = \overline{Q}_r(\Lambda) \sqcup D_r(\Lambda).$ + +**Lemma A.5.** Let $\Lambda$ be a $k$-graph. Then + +(1) for $1 \le i \le r$ and $\ell = 0, 1$, $\bar{\epsilon}_i^\ell : \widetilde{Q}_{r+1}(\Lambda) \to \widetilde{Q}_r(\Lambda)$ preserves nondegenerate cubes, that is for $\varphi \in \overline{Q}_{r+1}(\Lambda)$ we have $\bar{\epsilon}_i^\ell(\varphi) \in \overline{Q}_r(\Lambda)$; + +(2) for $1 \le i \le r$ and any $\varphi \in \widetilde{Q}_{r-1}(\Lambda)$ we have $\bar{\eta}_i(\varphi) \in D_r(\Lambda)$; + +(3) for all $r \ge 1$ we have $D_r(\Lambda) = \bigcup_{i=1}^r \bar{\eta}_i(\widetilde{Q}_{r-1}(\Lambda))$. + +*Proof.* For (1), suppose that $\varphi : 1_{r+1} \to \Lambda$ has rank $r+1$. Then $\bar{\epsilon}_i^\ell(\varphi) : 1_r \to \Lambda$ has rank $r$; so $\bar{\epsilon}_i^\ell(\varphi) \in \overline{Q}_r(\Lambda)$. + +For (2), suppose that $\varphi : 1_{r-1} \to \Lambda$ has rank $t \le r-1$. Then $\bar{\eta}_i(\varphi) : 1_r \to \Lambda$ has rank $t < r$; so $\bar{\eta}_i(\varphi) \in D_r(\Lambda)$. + +For (3), suppose that $\varphi \in D_r(\Lambda)$, that is $\varphi : 1_r \to \Lambda$ has rank $t < r$. Then there is an admissible map $h : N^r \to N^k$ of rank $t$ such that $d_\Lambda \circ \varphi = h \circ d_{1_r}$. Let $1 \le i \le r$ be such that $h(e_i) = 0$. Since $\varphi$ does not depend on the $i^{\text{th}}$ coordinate, we have $\varphi = \bar{\eta}_i\bar{\epsilon}_i^0(\varphi)$; hence, $\varphi = \bar{\eta}_i(\varphi')$ where $\varphi' = \bar{\epsilon}_i^0(\varphi) \in \widetilde{Q}_{r-1}(\Lambda)$. $\square$ + +Grandis builds his directed homology from the complex given in the following lemma (see [14, §2.1]). +---PAGE_BREAK--- + +**Lemma A.6.** Let $\Lambda$ be a k-graph. Let + +$$ +\overline{C}_r(\Lambda) = \mathbb{Z}\overline{Q}_r(\Lambda) +$$ + +(A.7) + +$$ +\bar{\partial}_r(\lambda) = \sum_{\ell=0}^{1} \sum_{i=1}^{r} (-1)^{i+\ell} \bar{\varepsilon}_i^{\ell}(\lambda) \quad \lambda \in \bar{Q}_r(\Lambda) +$$ + +Then $(\overline{C}(\Lambda)_*, \overline{\partial}_*)$ is a chain complex. + +*Proof.* Theorem A.4 implies that $\tilde{Q}(\Lambda) = (\tilde{Q}_r(\Lambda), \bar{\epsilon}_i^\ell, \bar{\eta}_i)$ is a cubical set. By Lemma A.5 (1) we see that $\bar{\epsilon}_i^\ell(\overline{Q}_r(\Lambda)) \subset \overline{Q}_{r-1}(\Lambda)$ and so $\bar{\partial}_r$ is well defined. That $\bar{\partial}_r \circ \bar{\partial}_{r+1} = 0$ follows from the property (A.1) of $\bar{\epsilon}_i^\ell$. Hence, $(\overline{C}_*(\Lambda), \overline{\partial}_*)$ is a complex. $\square$ + +Our aim is to show that the homology $\overline{H}_*(\Lambda)$ defined by the complex $(\overline{C}_*(\Lambda), \overline{\partial}_*)$ is the same as the homology of the complex $(C_*(\Lambda), \partial_*)$ described in §1. We do this in Theorem A.9 by showing that the complexes are isomorphic. Recall the definition of $Q_r(\Lambda)$ given in §2: + +$$ +Q_r(\Lambda) = \{\lambda \in \Lambda : d(\lambda) \le \mathbf{1}_k, |d(\lambda)| = r\}. +$$ + +**Lemma A.7.** Let $\Lambda$ be a k-graph. For $r \ge 0$ and $\lambda \in Q_r(\Lambda)$ there is a unique $\varphi_\lambda \in \overline{Q}_r(\Lambda)$ such that $\varphi_\lambda(0, \mathbf{1}_r) = \lambda$. Conversely, given $\varphi \in \overline{Q}_r(\Lambda)$, the path $\lambda = \varphi(0, \mathbf{1}_r) \in Q_r(\Lambda)$ satisfies $\varphi_\lambda = \varphi$. The map $\lambda \mapsto \varphi_\lambda$ is a bijection from $Q_r(\Lambda)$ to $\overline{Q}_r(\Lambda)$ with inverse $\varphi \mapsto \varphi(0, \mathbf{1}_r)$. + +*Proof.* The result is trivial when $r = 0$ because $\mathbb{1}_0 = \{\emptyset\}$. + +Fix $r \ge 1$ and $\lambda \in Q_r(\Lambda)$. Let $d(\lambda) = e_{i_1} + \cdots + e_{i_r}$, and define an admissible map $h : \mathbb{N}^r \to \mathbb{N}^k$ by $h(e_j) = e_{ij}$ for $j = 1, \dots, r$. Define $\varphi_\lambda : \mathbb{1}_r \to \Lambda$ by + +$$ +\varphi_{\lambda}(m, n) = \lambda(h(m), h(n)) +$$ + +Then $\varphi_{\lambda}: \mathbb{1}_r \rightarrow \Lambda$ is a nondegenerate $r$-cube with $\varphi_{\lambda}(0, \mathbb{1}_r) = \lambda$. The factorisation property ensures that there is only one nondegenerate cube with range $\lambda$. + +Now fix $\varphi \in \overline{Q}_r(\Lambda)$. Suppose that $d(\varphi(0, \mathbf{1}_r)) = e_{i_1} + \cdots + e_{i_r}$ with $1 \le i_1 < \cdots < i_r \le k$. Let $\lambda = \varphi(0, \mathbf{1}_r)$ and define $h : \mathbb{N}^r \to \mathbb{N}^k$ by $h(e_j) = e_{ij}$. Then for $(m, n) \in \mathbb{1}_r$ we have + +$$ +\varphi_{\lambda}(m, n) = \lambda(h(m), h(n)) = \varphi(m, n); +$$ + +so $\varphi_{\lambda} = \varphi$ as required. $\square$ + +Recall from Section 2 that for $\lambda \in Q_r(\Lambda)$, if we express $d(\lambda) = e_{i_1} + \cdots + e_{i_r}$ with $1 \le i_1 < \cdots < i_r \le k$, then + +$$ +F_j^0(\lambda) = \lambda(0, d(\lambda) - e_{ij}) \text{ and } F_j^1(\lambda) = \lambda(e_{ij}, d(\lambda)). +$$ + +**Lemma A.8.** Let $\Lambda$ be a k-graph and $r \ge 1$. Then for $\lambda \in Q_r(\Lambda)$ we have + +$$ +(A.8) \qquad \bar{\epsilon}_j^\ell(\varphi_\lambda)(0, 1_{r-1}) = F_j^\ell(\lambda) \text{ in } Q_{r-1}(\Lambda). +$$ +---PAGE_BREAK--- + +*Proof.* Let $d(\lambda) = e_{i_1} + \dots + e_{i_r}$ and define $h : \mathbb{N}^r \to \mathbb{N}^k$ by $h(e_j) = e_{ij}$ for $j \le r$. Then + +$$ +\begin{align*} +\bar{\epsilon}_j^\ell(\varphi_\lambda)(0, \mathbf{1}_{r-1}) &= \varphi_\lambda(\epsilon_j^\ell(0), \epsilon_j^\ell(\mathbf{1}_{r-1})) \\ +&= \lambda(h(\epsilon_j^\ell(0)), h(\epsilon_j^\ell(\mathbf{1}_{r-1}))) \\ +&= \begin{cases} \lambda(0, d(\lambda) - e_{ij}) & \text{if } \ell = 0 \\ \lambda(e_{ij}, d(\lambda)) & \text{if } \ell = 1 \end{cases} \\ +&= F_j^\ell(\lambda). +\end{align*} +$$ + +**Theorem A.9.** Let $\Lambda$ be a k-graph then the bijection of Lemma A.7 induces an isomorphism of complexes $(C_*(\Lambda), \partial_*) \cong (\overline{C}_*(\Lambda), \overline{\partial}_*)$. Hence $\overline{H}_*(\Lambda) \cong H_*(\Lambda)$. + +*Proof.* By Lemma A.7 the map $\lambda \mapsto \varphi_\lambda$ induces an isomorphism $\theta_r : C_r(\Lambda) \to \overline{C}_r(\Lambda)$. Let $\lambda \in Q_r(\Lambda)$. By Lemma A.8 we have $\theta_{r-1}(F_i^\ell(\lambda)) = \bar{\epsilon}_i^\ell(\varphi_\lambda)$ for $i = 1, \dots, r$ and $\ell = 0, 1$. Hence, by (3.1) and (A.7) we have + +$$ +\bar{\partial}_r \theta_r(\lambda) = \theta_{r-1} \partial_r(\lambda) +$$ + +and the result follows. + +APPENDIX B. TOPOLOGICAL REALISATIONS + +Given a *k*-graph Λ we show that the topological realisation X_Λ of Λ is homeomorphic to the topological realisation R̃Q(Λ) of the associated cubical set ˜Q(Λ) as defined in [14, §1.8]. We define the cocubical set I* = (I', ε'_i, ˙η_i) of [14] as follows (we modify Grandis' notation to align with ours from Appendix A). For r ≥ 0 let I'r be the unit cube in R'. For 1 ≤ i ≤ r + 1 and ℓ ∈ {0, 1} define the coface maps ε'_i : I' → I'^{r+1} and for 1 ≤ i ≤ r define codegeneracy maps η_i : I' → I'^{r-1} by + +$$ +\dot{\varepsilon}_i^\ell(t)_j = \begin{cases} t_j & \text{if } j < i \\ \ell & \text{if } j = i \\ t_{j-1} & \text{if } j > i \end{cases} \quad \text{and} \quad \dot{\eta}_i(t)_j = \begin{cases} t_j & \text{if } j < i \\ t_{j+1} & \text{if } j \ge i. \end{cases} +$$ + +Recall from [14] that $\mathcal{R}\tilde{Q}(\Lambda)$ is a topological space endowed with maps $\hat{\varphi} : \mathbf{I}^r \to \mathcal{R}\tilde{Q}(\Lambda)$ for each $\varphi \in \tilde{Q}_r(\Lambda)$ satisfying + +$$ +(B.1) \qquad \hat{\varphi} \circ \dot{\varepsilon}_i^\ell = (\bar{\varepsilon}_i^\ell(\varphi))^\wedge \quad \text{and} \quad \hat{\varphi} \circ \dot{\eta}_i = (\bar{\eta}_i(\varphi))^\wedge, +$$ + +and is uniquely determined by the property that for any topological space $X$ and any collection of continuous maps $\{\tilde{\varphi} : \mathbf{I}^r \to X \mid 1 \le r, \varphi \in \tilde{Q}_r(\Lambda)\}$ satisfying + +$$ +(B.2) \qquad \tilde{\varphi} \circ \dot{\varepsilon}_i^\ell = (\bar{\varepsilon}_i^\ell(\varphi))^\sim \quad \text{and} \quad \tilde{\varphi} \circ \dot{\eta}_i = (\bar{\eta}_i(\varphi))^\sim, +$$ + +there is a unique continuous map $\pi : \mathcal{R}\tilde{Q}(\Lambda) \to X$ satisfying $\pi \circ \hat{\varphi} = \tilde{\varphi}$ for all $\varphi \in \tilde{Q}(\Lambda)$. + +Fix $\varphi \in \tilde{Q}_r(\Lambda)$ and let $h: \mathbb{N}^r \to \mathbb{N}^k$ be the associated admissible map. As in [21] extend $h$ to a map from $\mathbb{R}^r$ to $\mathbb{R}^k$ by setting $h(t) := \sum_{i=1}^{r} t_i h(e_i)$. We define a map $\tilde{\varphi}: \mathbf{I}_r \to X_\Lambda$ by + +$$ +(B.3) \qquad \tilde{\varphi}(t) = [\varphi(0, \mathbf{1}_r), h(t)]. +$$ + +**Lemma B.1.** Let $\Lambda$ be a k-graph. The maps $\tilde{\varphi} : I_r \to X_\Lambda$ of (B.3) are continuous, and satisfy (B.2). +---PAGE_BREAK--- + +*Proof.* Fix $\varphi \in \tilde{Q}_r(\Lambda)$ with associated admissible map $h$. Since $t \mapsto (\varphi(0, \mathbf{1}_r), h(t))$ is continuous from $\mathbf{I}^r$ to $\{\varphi(0, \mathbf{1}_r)\} \times [0, h(\mathbf{1}_r)]$, and since the quotient map from $\bigsqcup_{\lambda \in Q(\Lambda)} \{\lambda\} \times [0, d(\lambda)]$ to $X_\Lambda$ is also continuous, the map $\tilde{\varphi}$ is continuous. We check the identities (B.2). The calculations are routine but tedious so we only give a detailed proof of the first identity $\tilde{\varphi} \circ \dot{\varepsilon}_i^\ell = (\bar{\varepsilon}_i^\ell(\varphi))^\sim$, this being the more complicated of the two calculations. The second identity follows from similar calculations. Define $h': \mathbb{R}^{r-1} \to \mathbb{R}^k$ as in Remark A.3 by $h'(t_1, \dots, t_{r-1}) = h(t_1, \dots, t_{i-1}, 0, t_i, \dots, t_{r-1})$. For $t \in \mathbf{I}_{r-1}$ + +$$ +(B.4) \quad (\tilde{\varphi} \circ \dot{\varepsilon}_i^\ell)(t) = \tilde{\varphi}(\dot{\varepsilon}_i^\ell(t)) = [\varphi(0, \mathbf{1}_r), h(\dot{\varepsilon}_i^\ell(t))] = [\varphi(0, \mathbf{1}_r), h'(t) + \ell h(e_i)]. +$$ + +Since $h'$ is the admissible map associated to $\bar{\epsilon}_i^\ell(\varphi)$, we also have + +$$ +(B.5) \quad (\bar{\varepsilon}_i^\ell(\varphi))^\sim(t) = [\bar{\varepsilon}_i^\ell(\varphi)(0, \mathbf{1}_{r-1}), h'(t)] = [\varphi(\varepsilon_i^\ell(0, \mathbf{1}_{r-1})), h'(t)] +$$ + +Since $\ell$ is an integer, $h'(t) + \ell h(e_i) - [h'(t) + \ell h(e_i)] = h'(t) - [h'(t)]$. Moreover, by the factorisation property, we have + +$$ +\varphi(0, \mathbf{1}_r) = \varphi(0, \varepsilon_i^\ell(0))\varphi(\varepsilon_i^\ell(0, \mathbf{1}_{r-1}))\varphi(\varepsilon_i^\ell(\mathbf{1}_{r-1}), \mathbf{1}_r). +$$ + +Hence, considering separately the cases $\ell=0$ and $\ell=1$, one can verify that + +$$ +\varphi(0, \mathbf{1}_r)([h'(t) + \ell h(e_i)], [h'(t) + \ell h(e_i)]) = \varphi(\varepsilon_i^\ell(0, \mathbf{1}_{r-1}))([h'(t)], [h'(t)]). +$$ + +The definition (6.1) of the equivalence relation $\sim$ then gives + +$$ +(\varphi(0, \mathbf{1}_r), h'(t) + \ell h(e_i)) \sim (\varphi(\varepsilon_i^\ell(0, \mathbf{1}_{r-1})), h'(t)). +$$ + +Combining this with (B.4) and (B.5) establishes the first identity in (B.2). $\square$ + +By Lemma B.1 and the defining property of $\mathcal{R}\tilde{Q}(\Lambda)$, there is a unique continuous map $\pi : \mathcal{R}\tilde{Q}(\Lambda) \rightarrow X_{\Lambda}$ such that $\pi \circ \hat{\varphi} = \tilde{\varphi}$ for all $\varphi \in \tilde{Q}(\Lambda)$. + +**Theorem B.2.** Let $\Lambda$ be a k-graph. The map $\pi : \mathcal{R}\tilde{Q}(\Lambda) \to X_\Lambda$ is a homeomorphism. + +*Proof.* We construct a continuous inverse $\psi$ for $\pi$. Define $\psi_0 : \bigsqcup_{d(\lambda) \le 1_k} \{\lambda\} \times [0, d(\lambda)] \to \mathcal{R}\tilde{Q}(\Lambda)$ by + +$$ +\psi_0(\lambda, t) := \hat{\varphi}_\lambda(t), +$$ + +where $\varphi_\lambda : 1_{|\lambda|} \to \Lambda$ is the $k$-graph quasimorphism canonically associated to $\lambda$. The map $\psi_0$ is clearly continuous. + +If $\psi([\mu, s]) := \psi_0(\mu, s)$ determines a well-defined map $\psi : X_\Lambda \to \mathcal{R}\tilde{Q}(\Lambda)$, then it will be +continuous by definition of the topology on $X_\Lambda$, and will be an inverse for $\pi$. So suppose +that $(\mu, s) \sim (\nu, t)$ where $\mu, \nu \in Q(\Lambda)$. Let $I_{(\mu,s)} := \{j : d(\mu)_j = 1 \text{ and } s_j \in \{0, 1\}\}$, and +define $I_{(\mu,t)}$ similarly. List $I_{(\mu,s)} = \{j_1, \dots, j_p\}$ where $j_1 < \dots < j_p$. Define $F_{(\mu,s)}$ to be the +composition of face maps $F_{(\mu,s)} = F_{j_1}^{s_{j_1}} \circ \dots \circ F_{j_p}^{s_{j_p}}$ (with the convention that if $I_{(\mu,s)} = \emptyset$, +then $F_{(\mu,s)}$ is the identity map), and define $F_{(\nu,t)}$ similarly. Then + +$$ +F_{(\mu,s)}(\mu) = \mu([s], [s]) = \nu([t], [t]) = F_{(\nu,t)}(\nu) +$$ + +because $[\mu, s] = [\nu, t]$. Let $s' := s - [s]$ and $t' := t - [t]$. Then + +$$ +(\mu, s) \sim (F_{(\mu,s)}(\mu), s') = (F_{(\nu,t)}(\nu), t') \sim (\nu, t), +$$ +---PAGE_BREAK--- + +so it suffices to show that $\psi_0(\mu, s) = \psi_0(F_{(\mu,s)}(\mu), s')$. Let $\dot{\epsilon}_{(\mu,s)} : \mathbf{I}_{|\mu|-|I_{(\mu,s)}|} \to \mathbf{I}_{|\mu|}$ be the composition $\dot{\epsilon}_{j_p}^{s_{j_p}} \circ \dots \circ \dot{\epsilon}_{j_1}^{s_{j_1}}$. Let $\bar{\epsilon}_{(\mu,s)}$ be the composition of face maps in $\tilde{Q}(\Lambda)$ corresponding to $F_{(\mu,s)}$. It is routine to see that + +$$\varphi_{F_{(\mu,s)}(\mu)} = \bar{\epsilon}_{(\mu,s)}(\varphi_{\mu}).$$ + +Hence the identities (B.1) imply that + +$$\hat{\varphi}_{F_{(\mu,s)}(\mu)} = (\bar{\epsilon}_{(\mu,s)}(\varphi_{\mu}))^{\wedge} = \hat{\varphi}_{\mu} \circ \dot{\epsilon}_{(\mu,s)}.$$ + +In particular, if $\overline{s}$ and $\overline{s}'$ are the elements of $\mathbf{I}_{|\mu|}$ and $\mathbf{I}_{|\mu|-|I_{(\mu,s)}|}$ which map to $s$ and $s'$ under the associated admissible maps, then + +$$\psi_0(F_{(\mu,s)}(\mu), s') = \hat{\varphi}_{F_{(\mu,s)}(\mu)}(\overline{s}') = \hat{\varphi}_{\mu} \circ \dot{\epsilon}_{(\mu,s)}(\overline{s}') = \hat{\varphi}_{\mu}(\overline{s}) = \psi_0(\mu, s). \quad \square$$ + +## REFERENCES + +[1] P. F. Baum, P. M. Hajac, R. Matthes, and W. Szymański, *The K-theory of Heegaard-type quantum 3-spheres*, K-Theory **35** (2005), 159–186. + +[2] B. Blackadar, *Shape theory for C*-algebras, Math. Scand. **56** (1985), 249–275. + +[3] B. Blackadar, *K-theory for operator algebras*. MSRI Publications vol. 5, Cambridge University Press, 1998. + +[4] K. Brown, *Cohomology of groups*, Graduate Texts in Mathematics, **87**, Springer-Verlag, New York-Berlin, 1982. + +[5] R. Brown and P. J. Higgins, *The equivalence of $\omega$-groupoids and cubical T-complexes*, Cahiers Topologie Géom. Différentielle **22** (1981), 349–370. + +[6] K. R. Davidson and D. Yang, *Periodicity in rank 2 graph algebras*, Canad. J. Math. **61** (2009), 1239–1261. + +[7] K. R. Davidson and D. Yang, *Representations of higher rank graph algebras*, New York J. Math. **15** (2009), 169–198. + +[8] S. Eilenberg and N. Steenrod, Foundations of algebraic topology, Princeton University Press, Princeton, New Jersey, 1952, xv+328. + +[9] G. Elliott and H. Li, *Strong Morita equivalence of higher-dimensional noncommutative tori*. II, Math. Ann. **341** (2008), 825–844. + +[10] D. G. Evans, *On the K-theory of higher-rank graph C*-algebras*, New York J. Math. **14** (2008), 1–31. + +[11] C. Farthing, P. S. Muhly and T. Yeend, *Higher-rank graph C*-algebras: an inverse semigroup and groupoid approach, Semigroup Forum **71** (2005), 159–187. + +[12] C. Farthing, D. Pask and A. Sims, *Crossed products of k-graph C*-algebras by $\mathbb{Z}^l$, Houston J. Math. **35** (2009), 903–933. + +[13] L. Fajstrup, M. Raußen, E. Goubault, *Algebraic topology and concurrency*, Theoret. Comput. Sci. **357** (2006), 241–278. + +[14] M. Grandis, *Directed combinatorial homology and noncommutative tori (The breaking of symmetries in algebraic topology)*, Math. Proc. Cambridge Philosophical Soc. **138** (2005), 233–262. + +[15] M. Grandis and L. Mauri, * Cubical sets and their site*, Theory Appl. Categ. **11** (2003), 185–211. + +[16] P. M. Hajac, R. Matthes, W. Szymanski, *Wojciech, A locally trivial quantum Hopf fibration*, Algebr. Represent. Theory **9** (2006), 121–146. + +[17] A. Hatcher, *Algebraic topology*, Cambridge University Press, Cambridge, 2002, xii+544. + +[18] R. Hazelwood, I. Raeburn, A. Sims and S. B. G. Webster, *On some fundamental results about higher-rank graphs and their C*-algebras*, in preparation. + +[19] S. B. Isaacson, *Symmetric cubical sets*, J. Pure Appl. Algebra **215** (2011), 1146–1173. + +[20] B. Itzá-Ortiz and N. C. Phillips, *Realization of a simple higher-dimensional noncommutative torus as a transformation group C*-algebra*, Bull. London Math. Soc. **40** (2008) 217-226. + +[21] S. Kaliszewski, A. Kumjian, J. Quigg and A. Sims, *Topological realisations of higher-rank graphs*, in preparation. + +[22] S. Kaliszewski, N. Patani and J. Quigg, *Obstructions to a general characterisation of graph correspondences*, preprint 2010 (arXiv:1010.3185v1 [math.OA]). +---PAGE_BREAK--- + +[23] A. A. Khusainov, *Homology groups of semi-cubical sets*, Sibirsk. Mat. Zh. **49** (2008), 224–237. + +[24] A. Kumjian and D. Pask, *$C^*$-algebras of directed graphs and group actions*, Ergodic Theory Dynam. Systems **19** (1999), 1503–1519. + +[25] A. Kumjian and D. Pask, *Higher rank graph $C^*$-algebras*, New York J. Math. **6** (2000), 1–20. + +[26] A. Kumjian, D. Pask and A. Sims, *$C^*$-algebras associated to coverings of $k$-graphs*, Doc. Math. **13** (2008), 161–205. + +[27] A. Kumjian, D. Pask and A. Sims, *Generalised morphisms of $k$-graphs: $k$-morphs*, Trans. Amer. Math. Soc. **363** (2011), 2599–2626. + +[28] A. Kumjian, D. Pask and A. Sims, *Cohomology of $k$-graphs and twisted $C^*$-algebras*, in preparation. + +[29] S. Mac Lane, *Homology*. Die Grundlehren der mathematischen Wissenschaften, Band 114. Springer-Verlag, Berlin-New York, 1967. + +[30] W. Massey, *A basic course in algebraic topology*, Graduate Texts in Mathematics **127** Springer-Verlag, Berlin-New York, 1991. + +[31] J. Milnor, *On axiomatic homology theory*, Pacific J. Math. **12** (1962), 337–341. + +[32] J. R. Munkres, *Elements of algebraic topology*, Addison-Wesley Publishing Company, Menlo Park, CA, 1984, ix+454. + +[33] D. Pask, J. Quigg and I. Raeburn, *Fundamental groupoids of $k$-graphs*, New York J. Math. **10** (2004), 195–207. + +[34] D. Pask, J. Quigg and I. Raeburn, *Coverings of $k$-graphs*, J. Algebra **289** (2005), 161–191. + +[35] M. Pimsner and D. Voiculescu, *Exact sequences for $K$-groups and Ext-groups of certain cross-product $C^*$-algebras*, J. Operator Theory **4** (1980), 93–118. + +[36] I. Raeburn, A. Sims and T. Yeend, *Higher-rank graphs and their $C^*$-algebras*, Proc. Edinb. Math. Soc. (2) **46** (2003), 99–115. + +[37] I. Raeburn, A. Sims and T. Yeend, *The $C^*$-algebras of finitely aligned higher-rank graphs*, J. Funct. Anal. **213** (2004), 206–240. + +[38] G. Robertson and T. Steger, *Affine buildings, tiling systems and higher rank Cuntz-Krieger algebras*, J. reine angew. Math. **513** (1999), 115–144. + +[39] J-P. Serre, *Trees*. Springer-Verlag, Berlin-New York, 1980. + +[40] A. Skalski and J. Zacharias, *Entropy of shifts on higher-rank graph $C^*$-algebras*, Houston J. Math. **34** (2008), 269–282. + +[41] A. Skalski and J. Zacharias, *Poisson transform for higher-rank graph algebras and its applications*, J. Operator Theory **63** (2010), 425–454. + +[42] J. Stillwell, *Classical Topology and Combinatorial Group Theory*, Graduate Texts in Mathematics **72** Springer-Verlag, Berlin-New York, 1980. + +[43] S. Yamashita, *Some results on product system $C^*$-algebras and topological higher-rank graphs*, preprint 2009 (arXiv:0911.2978v1 [math.OA]). + +ALEX KUMJIAN, DEPARTMENT OF MATHEMATICS (084), UNIVERSITY OF NEVADA, RENO NV 89557-0084, USA + +*E-mail address: alex@unr.edu* + +DAVID PASK, AIDAN SIMS, SCHOOL OF MATHEMATICS AND APPLIED STATISTICS, UNIVERSITY OF WOLLONGONG, NSW 2522, AUSTRALIA + +*E-mail address: dpask, asims@uow.edu.au* \ No newline at end of file diff --git a/samples_new/texts_merged/7334540.md b/samples_new/texts_merged/7334540.md new file mode 100644 index 0000000000000000000000000000000000000000..cdca7ec0965725f6a2754b1d85cf573b1214608b --- /dev/null +++ b/samples_new/texts_merged/7334540.md @@ -0,0 +1,56 @@ + +---PAGE_BREAK--- + +# Solution to 2018-1 Problem 1 + +VIERA ČERŇANOVÁ + +Trnava University, Faculty of Education, +Department of Mathematics and Computer Science, +Priemyselná 4, 918 43 Trnava, Slovakia +e-mail:vieracernanova@hotmail.com + +**Abstract.** We give a solution to 2018-1 Problem 1. + +**Keywords.** sangaku, square, equilateral triangle, incircle. + +Mathematics Subject Classification (2010). 51M04. + +**Problem 1.** ABCD is a square (see Figure 1), $F$ and $E$ are the points on the sides $AB$ and $DA$, respectively, such that $CEF$ is an equilateral triangle, $G$ and $H$ are points on the segment $EF$ such that $AGH$ is an equilateral triangle. Prove or disprove that the diameter of the incircle of $CEF$ equals $AG$. + +FIGURE 1. + +¹This article is distributed under the terms of the Creative Commons Attribution License which permits any use, distribution, and reproduction in any medium, provided the original author(s) and the source are credited. +---PAGE_BREAK--- + +**Solution.** Denote $I$ the foot of perpendicular from $G$ to $AB$, and $\mathcal{K}$ the incircle of $CEF$. Set $\theta = \angle BCF = \angle IAG$. Notice that $IF = IG$. Then + +$$CB = AI + IF + FB$$ + +implies + +$$CF \cos \theta = AG \cos \theta + AG \sin \theta + CF \sin \theta,$$ + +and consequently + +$$\frac{AG}{CF} = \frac{\cos \theta - \sin \theta}{\cos \theta + \sin \theta} = \frac{\cos 2\theta}{1 + \sin 2\theta} = \frac{\sqrt{3}/2}{1 + 1/2} = \frac{\sqrt{3}}{3}.$$ + +Finally, if $d$ is a diameter of $\mathcal{K}$, then + +$$d = \frac{2\sqrt{3}}{3}CF = AG.$$ + +**Remark.** The equilateral triangle $AGH$ is homothetic to $CEF$ through a homothety $\mathcal{H}$ with center in the common midpoint $M$ of the segments $EF$ and $GH$, and ratio $-AG/CF$. + +Applying $\mathcal{H}, \mathcal{H}^2, \dots$ to the square $ABCD$, the triangle $CEF$ and the circle $\mathcal{K}$, we obtain a sequence of squares, equilateral triangles and their incircles alternating on both sides of $EF$ (see Figure 2). + +FIGURE 2. + +Let $O$ be the center of $\mathcal{K}$. Notice that for $n = 0, 1, 2, \dots$, $\mathcal{H}^n(O)$ coincides with $\mathcal{H}^{n+2}(C)$. To prove this, it suffices to verify $AO = AA'$, where $A' = \mathcal{H}(A) = \mathcal{H}^2(C)$. + +From the squares, we obtain + +$$AA' = \frac{\sqrt{3}}{3}CA = \frac{\sqrt{3}}{3}(CM + MA) = \frac{\sqrt{3}+1}{3}CM.$$ + +Since $C, M, A, O$ are collinear and $MO$ is inradius of $\triangle CEF$, + +$$AO = AM + MO = \frac{\sqrt{3}+1}{3}CM.$$ \ No newline at end of file diff --git a/samples_new/texts_merged/7342615.md b/samples_new/texts_merged/7342615.md new file mode 100644 index 0000000000000000000000000000000000000000..15a064b087df3b972ec8d276f83508fe3bf2e0db --- /dev/null +++ b/samples_new/texts_merged/7342615.md @@ -0,0 +1,309 @@ + +---PAGE_BREAK--- + +# Analytical Tools for Point Source Interferometry + +Gregory W. Hoth, Bruno Pelle, John Kitching, and Elizabeth A. Donley + +NIST, 325 Broadway, Boulder, CO 80305 + +## ABSTRACT + +Light pulse atom interferometry can be used to realize high-performance sensors of accelerations and rotations. In order to broaden the range of applications of these sensors, it is desirable to reduce their size and complexity. Point source interferometry (PSI) is a promising technique for accomplishing both of these goals. With PSI, rotations are measured by detecting the orientation and frequency of spatial fringe patterns in the atomic state. These spatial fringes are primarily due to a correlation between an atom's initial velocity and its final position, which is created by the expansion of a cold atom cloud. However, the fringe patterns are also influenced by the structure of the initial atomic distribution. We summarize several methods that can be used to investigate the relationship between the spatial fringe pattern and the initial atomic distribution. This relationship will need to be understood in detail to realize an accurate gyroscope based on PSI. + +**Keywords:** Light pulse atom interferometer, cold atom gyroscope, point source interferometry + +## 1. INTRODUCTION + +Light pulse atom interferometers (LPAIs) have achieved excellent performance as sensors of acceleration,1,2 rotation,3-6 gravity,7,8 and gravity gradients.9 So far, LPAIs have mostly been realized as large, laboratory scale experiments. In order to realize the full potential of these sensors for applications such as inertial navigation and gravimetric surveys, it is desirable to reduce the size and complexity of these systems to enable them to move outside the laboratory environment.6,10,11 Towards this goal, we are investigating the Point Source Interferometry (PSI) technique introduced by Dickerson12 et al. as an approach to realizing a compact, high performance LPAI gyroscope. + +In PSI, a $\frac{\pi}{2}-\pi-\frac{\pi}{2}$ pulse sequence is applied to an expanding cloud of cold, two-level atoms, and the expanded cloud is imaged with state-selective detection.12 With this three-pulse sequence, the initial $\frac{\pi}{2}$ pulse puts each atom into a superposition of two states with different momenta.13 After the first pulse, there is a free expansion period of duration $T_R$ which allows the two parts of the superposition to separate in space. Then, the $\pi$ pulse exchanges the momentum kick between the two parts of the superposition. After a second free expansion period with duration $T_R$, the two parts of the superposition overlap again, and the final $\frac{\pi}{2}$ pulse closes the interferometer. The pulses are typically implemented with stimulated Raman transitions.13,14 With this approach, the internal state of the atoms after the pulse sequence depends on the phase shift between the two paths the atoms can take through the interferometer. With the three-pulse sequence, both accelerations (a) and rotations ($\Omega$) of the apparatus produce phase shifts, which are given by + +$$ \Phi_a = \vec{k}_{\text{eff}} \cdot \vec{a} T_R^2, \quad (1) $$ + +$$ \Phi_{\Omega} = 2\vec{k}_{\text{eff}} \cdot (\vec{\Omega} \times \vec{v}) T_R^2, \quad (2) $$ + +where $\vec{k}_{\text{eff}}$ is the effective wave-vector for the Raman transitions, $\vec{v}$ is the velocity of the atoms, and $T_R$ is the time between consecutive pulses. + +In order to realize a gyroscope, we must be able to isolate the phase shift due to rotations. With PSI, this is accomplished by exploiting the correlation between an atom's initial velocity and its final position created by + +E-mail: gregory.hoth@nist.gov +---PAGE_BREAK--- + +the expansion of the cloud. In particular, we can make the approximation $\vec{r} \approx \vec{v} T_{\text{ex}}$, where $\vec{r}$ is an atom's final position and $T_{\text{ex}}$ is the total expansion time. With this approximation, Eq. 2 becomes + +$$ \Phi_{\Omega} = \left( \frac{2T_{\text{R}}^2 (\vec{k}_{\text{eff}} \times \vec{\Omega})}{T_{\text{ex}}} \right) \cdot \vec{r} = \vec{k}_{\Omega} \cdot \vec{r}. \quad (3) $$ + +Through the cloud expansion, the velocity-dependent rotation phase shift becomes a spatial gradient in the interferometer phase described by $\vec{k}_{\Omega}$. This phase gradient will give rise to a spatial fringe pattern which can be detected by imaging the cloud. By measuring the frequency and orientation of the spatial fringes, we can infer two components of $\vec{\Omega}$. + +Point source interferometry has several features which suggest it is a promising candidate for a compact LPAI gyroscope. The spatial fringe pattern makes it possible to isolate the rotation phase shift with only one atomic source. In other three-pulse LPAI gyroscopes, two counter-propagating sources are required to distinguish the effects of rotations and accelerations.3,6,15,16 With PSI, the cold atom cloud does not have to be launched, which further simplifies the experimental sequence compared to other cold-atom LPAI gyroscopes. The rotational dynamic range can also be increased by the use of spatially resolved detection. Finally, PSI could enable the characterization of the wave-front aberrations of the beam used to drive the interferometer pulse sequence.12 These aberrations are an important limitation to the long-term stability of state of the art LPAI sensors.15,17 + +However, the benefits offered by PSI come with a cost. In a real system, the correlation between the atoms' initial velocities and their final positions is not completely determined by the expansion time. It also depends on the detailed structure of the initial distribution. In previous work,18 we have shown that the structure of the initial distribution can cause shifts in the gyroscope scale factor. Navigation grade gyroscopes are expected19 to have a scale factor stability of a few parts-per-million (ppm), and so the initial distribution will need to be carefully controlled to realize a high-performance PSI gyroscope. If the initial distribution has a Gaussian density profile and a velocity distribution characterized by a temperature $T$, then the bias introduced by the initial distribution can be described as a scale factor shift. Here, we show that this is not the case for most initial distributions. Therefore, it is important to investigate other biases on the rotation measurement that can be introduced by structure in the initial distribution. + +In this work, we describe several analytical tools that can be used to investigate the relationship between the initial distribution and the spatial fringe patterns. Section 2 derives an expression for the PSI signal in the point-source limit. Section 3 develops a model for the PSI fringes in the case of an initial cloud with an extended spatial distribution, $n_0(r)$, and a temperature, $T$, by treating the initial cloud as a collection of many point sources. The case of a cloud with a Gaussian initial density profile is considered in detail. In this case, it is possible to obtain an analytical solution for the PSI fringes that reveals several new features. Section 4 considers the spatial fringes in the Fourier domain. This picture reveals that essentially any structure in the initial distribution is expected to bias the frequency of the spatial fringes away from the point-source limit. Section 5 develops a model of the PSI fringes in phase space. This phase-space picture both provides an intuitive explanation of the effects of a finite initial cloud size and makes it possible to consider initial distributions where the velocity distribution is not described by a temperature. Finally, Section 6 summarizes the conclusions from these models. + +## 2. THE POINT-SOURCE LIMIT + +It is useful to consider the case where the atoms are initially concentrated in an infinitesimally small point source. This case is analytically tractable, and it reveals the essential physics of the PSI measurement. In this section, we will derive an expression for the density distribution of the population in one of the interferometer states after the $\frac{\pi}{2} - \pi - \frac{\pi}{2}$ pulse sequence in the point-source limit. In the following sections, we will leverage this simple case to model a cloud with a spatially extended initial distribution. To begin, consider a point source with $N$ two-level atoms at temperature $T$. The atoms have a velocity distribution given by + +$$ p(\vec{v}) = \frac{N}{(2\pi)^{\frac{3}{2}} \sigma_v^3} \exp(-\vec{v}^2 / 2\sigma_v^2), \quad (4) $$ +---PAGE_BREAK--- + +where the width of the velocity distribution is $\sigma_v = \sqrt{k_B T/m}$. At $t=0$, the cloud begins to expand. Since the initial cloud is a point source, the position of each atom after an expansion time $T_{\text{ex}}$ is given by $\vec{r} = \vec{v} T_{\text{ex}}$. The cloud expansion essentially maps the velocity distribution into a spatial distribution. Applying this principle to Eq. 4 gives + +$$n(\vec{r}, T_{\text{ex}}) = \frac{N}{(2\pi)^{\frac{3}{2}} \sigma_{\text{ps}}^3} \exp(-\vec{r}^2/2\sigma_{\text{ps}}^2), \quad (5)$$ + +where $\sigma_{\text{ps}} = \sigma_v T_{\text{ex}}$ characterizes the width of the expanded point source. + +As the cloud expands, a $\frac{\pi}{2} - \pi - \frac{\pi}{2}$ pulse sequence is applied. The effect of the interferometer pulse sequence is to change the internal state of the atoms. The probability for an atom to change its internal state can be described by + +$$p = (1 + c \cos(\Phi)) / 2, \quad (6)$$ + +where c is the contrast and $\Phi$ is the interferometer phase shift. In the point-source limit, the rotation phase shift can be expressed as a phase gradient given by Eq. 3. + +By combining Eqs. 3, 5, and 6, we obtain an expression for the final density profile of one of the interferometer states in the point-source limit. The result is + +$$n_{\text{ps}}(\vec{r}, t) = \frac{N \exp(-\vec{r}^2/2\sigma_{\text{ps}}^2) \left(1 + c \cos(\vec{k}_{\Omega} \cdot \vec{r} + \phi_0)\right)}{(2\pi)^{\frac{3}{2}} \sigma_{\text{ps}}^3} \quad (7)$$ + +where $\phi_0$ is a phase offset due to other sources of interferometer phase shifts. The density distribution for the other output state has the same form with the sign of the contrast reversed. + +In this derivation, we have neglected the effects of gravity and the Raman momentum kick on the motion of the atoms because neither of these effects alter the wave-vector of the spatial fringes. The effects of gravity and the Raman momentum kick during the $\frac{\pi}{2} - \pi - \frac{\pi}{2}$ pulse sequence are accounted for in the derivation of the interferometer phase shifts. If $\vec{g}$ is not parallel to $\vec{k}_{\text{eff}}$ and the cloud is allowed to fall before the beginning of the interferometer pulse sequence, then the change in the atoms' velocity due to gravity will produce a phase shift via Eq. 2. However, this phase shift will be the same for all the atoms in the cloud so it will not affect the phase gradient in Eq. 3. The Raman momentum kick and gravity do influence the final position of the atoms during detection. It is straight forward to include these effects in Eq. 7, but these details would only muddy the waters. + +### 3. MANY POINT SOURCES—A GAUSSIAN INITIAL CLOUD + +The case of an initial cloud with a density distribution $n_0(\vec{r})$ and a temperature $T$ can be described as a collection of many point sources. The evolution of each point source during the interferometer sequence can be described by Eq. 7, and the final distribution is given by the sum of all the expanded point sources. + +It is useful to begin by considering a small volume $dV$ of the initial cloud located at a position $\vec{R}$. The number of atoms contained in this region is given by $N_i = n_0(R) dV$. After a time $T_{\text{ex}}$, the atoms from this region will have expanded into a density distribution described by $(n_0(R) dV) n_{\text{ps}}(\vec{r}-\vec{R}, T_{\text{ex}})$ where $n_{\text{ps}}(\vec{s}, T_{\text{ex}})$ describes the profile of an expanded point source centered on $\vec{s}=0$. The final density distribution can be obtained by integrating over all possible values of $\vec{R}$, which gives + +$$n(\vec{r}, T_{\text{ex}}) = \int d^3R \, n_0(R) n_{\text{ps}}(\vec{r} - \vec{R}, T_{\text{ex}}) = n_0(\vec{r}) * n_{\text{ps}}(\vec{r}, T_{\text{ex}}), \quad (8)$$ + +where the $*$ operator represents convolution. + +In order to assess the implications of a finite initial size, it is useful to consider a Gaussian initial density distribution. In this case, it is possible to evaluate Eq. 8 analytically. Formally, the initial density distribution can be described by Eq. 5 with a characteristic width $\sigma_0$, and the point-source solution is described by Eq. 7. After evaluating the integral, we find that the final density distribution of each of the interferometer states is described by a Gaussian modulated by a spatial fringe as was the case for the point-source solution in Eq. 7. +---PAGE_BREAK--- + +However, the cloud size, the fringe frequency, and the fringe contrast are all modified. The final cloud size becomes $\sigma_f = \sqrt{\sigma_0^2 + \sigma_{\text{ps}}^2}$, which is the familiar result for an expanding cloud with a Gaussian initial density profile. The spatial fringe wave-vector becomes + +$$ \vec{k}_{\Omega,g} = \vec{k}_{\Omega,\text{ps}} (1 - \sigma_0^2 / \sigma_f^2), \quad (9) $$ + +where $\vec{k}_{\Omega,\text{ps}}$ is the expected phase gradient in the point-source limit (Eq. 3). The fringe contrast becomes + +$$ c(\Omega) = c_0 \exp(-k_{\Omega,\text{ps}}^2 \sigma_0^2 (1 - \sigma_0^2 / \sigma_f^2) / 2), \quad (10) $$ + +where $c_0$ is the interferometer contrast with $\Omega = 0$. + +Looking at Eq. 9 and Eq. 10, we can see that a finite initial cloud size has two effects. It causes the spatial fringe contrast to decrease as a function of $\Omega$, and it leads to a shift in the spatial fringe frequency. The loss of contrast can be understood by noting that when the cloud has a finite initial size, atoms with different initial velocities will end up at the same final position. Since our detection method is only sensitive to the final position of the atoms, we must average over this distribution of phase shifts, which will wash out the spatial fringes. + +The velocity spread at a point in the expanded cloud can be estimated by modeling the initial cloud as a uniform ball with diameter $d$. The maximum velocity spread will be the difference in velocities between atoms that start on opposite sides of the initial cloud, which is given by $\delta v = d/T_{\text{ex}}$. Via Eq. 2, this velocity spread corresponds to a range of phase shifts $\delta\phi = 2k_{\text{eff}}T_R^2\Omega\delta v$. The maximum observable rotation rate roughly corresponds to $\delta\phi = 2\pi$, which leads to + +$$ \Omega_{\max} = \frac{\pi}{k_{\text{eff}}d} \frac{T_{\text{ex}}}{T_R^2}. \quad (11) $$ + +The prediction of this simple model of the fringe contrast loss can be compared to the result obtained from Eq. 8 by calculating a rotation rate that characterizes the contrast loss. A convenient choice is the rotation rate where the contrast has fallen to 50 % of its initial value. For the case of a Gaussian initial cloud, Eq. 10 leads to + +$$ \Omega_{50\%} = \frac{\sqrt{\ln 4} T_{\text{ex}}}{k_{\text{eff}} \sigma_0 2 T_R^2} \left(1 - \left(\sigma_0 / \sigma_f\right)^2\right)^{-1/2}. \quad (12) $$ + +Looking at Eqs. 11 and 12, we can see that these two models make similar predictions for the rotation rate that characterizes the contrast loss. In the limit $\sigma_f \gg \sigma_0$, the two rotation rates differ by only a constant, which indicates that the simple picture of the contrast loss captures the essential physics. + +Unlike the contrast loss, it is difficult to precisely identify the source of the spatial frequency shift with this picture. By evaluating the integral in Eq. 8, we added up a large number of truncated sinusoids with central frequency $k_\Omega$ and obtained a truncated sinusoid with a different central frequency $k'_\Omega$. If we were working with the more familiar case of infinite sinusoids, this would not be possible. In the case of truncated sinusoids, it is possible for interference effects to shift the dominant frequency because each sinusoid actually contains a range of frequencies. + +This interference effect can be visualized by comparing the spatial fringes from different parts of the initial cloud as shown in Fig. 1. In the left panel, each expanded point-source has a width that is equal to the initial cloud width ($\sigma_{\text{ps}} = \sigma_0$). In this small expansion regime, the spatial fringes from different regions of the initial cloud are out of phase with each other. As a result, they interfere and produce a final cloud with a spatial frequency that is significantly shifted from the frequency of the individual point sources. In the right panel, the point sources have expanded to be significantly larger than the initial cloud ($\sigma_{\text{ps}} = 3\sigma_0$), and so the spatial fringes from the individual point sources are more in phase with each other. As a result, the individual point sources combine to produce a cloud with a spatial frequency that is much closer to the point-source limit. + +This spatial interference effect provides an explanation for the frequency shift caused by the structure of the initial distribution, but it is difficult to generalize. We can gain more insight into the connection between the spatial fringe frequency and the structure of the initial distribution by studying the spatial fringe patterns in the Fourier domain. +---PAGE_BREAK--- + +Figure 1. A visualization of the interference effect which leads to the spatial fringe frequency shift. Seven point sources (colored lines) are used to approximate the spatial fringes produced with a Gaussian initial density distribution. The number of atoms in each point source is determined by the initial cloud shape. The sum of the seven point sources (solid black line) can be compared to a single point source (dashed line). In order to emphasize the spatial fringe, we plot the difference of the density distribution for the two interferometer states, which eliminates the offset in Eq. 7. (a) The width of each point source, $\sigma_{\text{ps}}$, is equal to the initial cloud size, $\sigma_0$, which corresponds to a final cloud size $\sigma_f = \sqrt{2}\sigma_0$. In this case, the fringes from the individual point sources are out of phase with each other. When all the individual point sources are added together, the frequency of the resulting spatial fringe is clearly different than the frequency of the individual point sources. (b) $\sigma_{\text{ps}} = 3\sigma_0$, which corresponds to a final cloud size $\sigma_f = \sqrt{10}\sigma_0$. In this case, the fringes from the individual point sources are more in phase and the frequency shift is much smaller. + +## 4. PSI FRINGES IN THE FOURIER DOMAIN + +So far, we have seen that the structure of the initial distribution can bias the frequency of the detected spatial fringes away from the prediction of the point-source limit. For a Gaussian initial density distribution, this bias takes the form of a shift in the scale factor connecting the spatial fringe to the detected fringe frequency, but it is not clear if this shift is somehow unique to the case of a Gaussian cloud. In this section, we will see that frequency shifts are expected to occur with essentially any initial distribution. We will also see that it is challenging to identify a general procedure that can be used to exactly determine the rotation rate from a measurement of the spatial fringe pattern. This indicates that detailed knowledge of the initial distribution will be necessary to realize an accurate PSI gyroscope. As a first step, we will take a closer look at the fringe patterns produced by a single point source. + +### 4.1 One Point Source + +Consider the density distribution for a single point source (Eq. 7) again. For simplicity, we'll work in one dimension with perfect fringe contrast ($c=1$) and set the total atom number $N=1$. With these simplifications, a point source centered at position $x_c$ expands into a density distribution given by + +$$n_{\text{ps}}(x) = \frac{1}{\sqrt{2\pi}\sigma_{\text{ps}}} \exp\left(-\frac{(x-x_c)^2}{2\sigma_{\text{ps}}^2}\right) \frac{(1+\cos(k_{\Omega}(x-x_c)+\phi))}{2} \quad (13)$$ + +The Fourier transform of the point-source solution is given by + +$$\hat{n}_{\text{ps}}(k) = \frac{e^{-ikx_c}}{\sqrt{8\pi}} \left( \exp\left(-\frac{k^2\sigma_{\text{ps}}^2}{2}\right) + \frac{e^{i\phi}}{2} \exp\left(-\frac{(k+k_{\Omega})^2\sigma_{\text{ps}}^2}{2}\right) + \frac{e^{-i\phi}}{2} \exp\left(-\frac{(k-k_{\Omega})^2\sigma_{\text{ps}}^2}{2}\right) \right). \quad (14)$$ + +The Fourier transform of the expanded point source, $\hat{n}_{\text{ps}}(k)$, has an intuitively appealing structure. It is a sum of three Gaussian peaks centered at $k = \pm k_{\Omega}$ and $k = 0$. Each peak has a width $\sigma_k = 1/\sigma_{\text{ps}}$, determined by the spatial width of the expanded point source. This structure is illustrated in Fig. 2a. +---PAGE_BREAK--- + +Figure 2. An illustration of the Fourier spectrum of the spatial fringes from a single point source in various cases. Panel a) illustrates a case where the spatial fringes are well resolved. The three peak structure is intuitive, but it only occurs if the spatial frequency $k_Ω$ is sufficiently large ($k_Ω \gg 1/σ_{ps}$). Panels b) and c) show that when $k_Ω \approx 1/σ_{ps}$, the Fourier spectrum can have only one peak or even peaks at frequencies other than $k_Ω$, depending on the interferometer phase $φ$. Panel d) illustrates the real-space density distribution for the three cases shown panels in a) to c). + +It is important to consider how we can determine the rotation rate from a measurement of the density distribution in practice. One approach is to estimate the dominant frequency in the Fourier spectrum of the imaged density distribution and convert it to a rotation rate with Eq. 3. This approach will work well as long as the point-source spatial fringe frequency satisfies $k_Ω \gg 1/σ_{ps}$. Physically, this corresponds to the case where one can observe several periods of the fringe pattern across the expanded cloud. In this case, the dominant frequency in the Fourier spectrum coincides with $k_Ω$ as we intuitively expect. However, when $k_Ω \sim 1/σ_{ps}$, the Fourier peaks are not resolved. In this case, the Fourier spectrum can depend strongly on the interferometer phase $φ$ and peaks can appear at frequencies other than $k_{Ω,ps}$ as shown in Fig. 2b,c. This makes it difficult to determine the rotation rate from the Fourier spectrum for small $Ω$. + +In the point-source limit, we can accurately estimate the rotation rate by combining several measurements of the density distribution with different overall phases. Each point in the cloud can be described by a fringe of the form $n(x) = y_0 + A \cos(\phi_0 + \varphi(x))$, where $y_0$ is the fringe offset, $A$ is the fringe amplitude, $\varphi(x)$ is the spatial phase shift, and $\phi_0$ is an overall phase-shift which can be controlled experimentally. The three fringe parameters, $A$, $y_0$, and $\varphi(x)$, can be determined from three separate images of the density distribution that correspond to different overall phases. A convenient choice is the set $\phi_0 = 0, \frac{\pi}{2},$ and $\pi$. In this case, the spatial phase is given +---PAGE_BREAK--- + +by + +$$ +\varphi(r) = \arctan \left( \frac{I_0 - I_{\pi}}{2I_{\pi/2} - (I_0 + I_{\pi})/2} \right), \quad (15) +$$ + +where $I_\phi$ refers to the image with the corresponding value of $\phi_0$. For a single point source, $\varphi(x) = k_\Omega x$ with $k_\Omega$ +given by Eq. 3 in all cases. However, things get more complicated if we allow even two point sources. + +4.2 Two Point Sources + +Consider two point sources with the same number of atoms separated by a distance x₀. Using the Fourier shift theorem, F(n(x − x₀)) = exp(−ikx₀) ŝ(k), the two point-source Fourier spectrum can be expressed as + +$$ +\hat{n}_{2\text{ps}}(k) = (1 + \exp(-ikx_0)) \hat{n}_{\text{ps}}(k). \tag{16} +$$ + +It is easy to find cases where the dominant frequency in the two point-source spectrum is not kΩ. For example, +if kΩx₀ = π, then n̂₂ps(kΩ) = 0. The Fourier spectrum for this case is shown as a blue curve in Fig. 3a. Note +that while we have complete destructive interference at kΩ, there are still two peaks in the Fourier spectrum on +either side of kΩ. If we were dealing with infinite sine waves, we would have perfect destructive interference and +these peaks would vanish. However, because we are dealing with truncated sine waves, some oscillations remain +at frequencies other than kΩ. + +We can also find cases where the Fourier spectrum is nearly a three peak spectrum, like the single point-source +spectrum shown in Fig. 2a, except that the dominant frequency is not kΩ. For example, the purple curve in +Fig. 3a illustrates the case where kΩx₀ = 3.5, and the dominant peak in the Fourier spectrum is shifted to a +slightly higher frequency. A close inspection will show that there are also two small additional peaks at lower +frequencies in this case. + +The spatial phase for these two examples is plotted in Fig. 3c. In the case $k_{\Omega}x_{0} = \pi$, the phase is a pure gradient with slope $k_{\Omega}$ except for a $\pi$ phase jump at $x = 0.5$. The case with $k_{\Omega}x_{0} = 3.5$ is more complicated. Looking at Fig. 3c, we can see $\varphi(x)$ is well approximated by a gradient with a slope slightly larger than $k_{\Omega}$. However, if we subtract the expected phase gradient $k_{\Omega}x$, we can see that $\varphi(x)$ is actually nonlinear as shown in Fig. 3d. + +It is useful to examine how these results change as the point sources expand for a longer time before detection. +Formally, this corresponds to increasing $\sigma_{\text{ps}}$. In the limit $\sigma_{\text{ps}} \to \infty$, we are working with the familiar case of +infinite sinusoids. By adding many infinite sinusoids together, we can change the amplitude and the overall phase +of the sinusoids, but the frequency of the sinusoids remains constant. Based on this limit, we can expect that as +$\sigma_{\text{ps}}$ increases, the dominant frequency in the Fourier spectrum and the spatially resolved phase should converge +to the values that we expect for a single point source, possibly with a reduced amplitude or an overall phase shift. +Figure 4 illustrates this principle for the case of $k_0 x_0 = 3.5$. In the case of $k_{\Omega} x_0 = \pi$, the oscillations completely +vanish in the limit $\sigma_{\text{ps}} \to \infty$, which is just what we expect for two sinusoids with an equal amplitude and a +$\pi$ phase shift. In the case $k_{\Omega} x_0 = 3.5$, we see that the peak in the Fourier spectrum converges to $k_{\Omega}$, and the +difference between the spatially resolved phase and the expected phase gradient becomes a constant phase shift. + +This two point-source model offers several new insights. First, we can see how two truncated sine waves with central frequency *k* can interfere to produce oscillations with a different dominant frequency, *k'*. The reason is that the truncated sinusoids each contain a range of frequencies and each frequency will have a different phase shift (represented by the exp(−ikx₀) factor in Eq. 16). Since each frequency component has a different phase shift, it is possible to suppress the oscillation at *k* while enhancing the oscillation at *k'*. Second, detailed knowledge of the source distribution is needed to accurately estimate Ω from a measurement of either the spatial phase or the dominant spatial frequency. We can see this by considering the variety of structure in the spatial phase and Fourier spectrum in just the examples considered so far. In the point-source limit, the spatial phase is a pure gradient, and the magnitude of the gradient is linearly related to the rotation rate. With only two point sources, both of these features disappear. If the spatial phase is approximated as a gradient, it will be important to ensure that any frequency biases introduced by non-linearities in the spatial phase are accounted for and acceptably small. One approach to minimizing these frequency biases is to ensure that the cloud expansion is sufficiently large. In the long expansion limit ($\sigma_{\text{ps}} \to \infty$), we must recover the dominant frequency and spatial phase gradient predicted by the point source model. +---PAGE_BREAK--- + +Figure 3. Examples of interference effects with two point sources. The model parameters are $\sigma_{\text{ps}} = 3$ and $x_0 = 1$. (a) Examples of the two point-source model where the dominant spatial frequency is not $k_{\Omega}$. If $k_{\Omega}x_0 = \pi$ (blue), there is no oscillation at $k_{\Omega}$ at all. If $k_{\Omega}x_0 = 3.5$ (purple), the dominant frequency is shifted to a higher frequency. The dashed lines indicate $k_{\Omega}$ for these two cases. (b) Real-space density distribution for these two cases of the two point-source model. (c) Spatial phase for these two cases of the two point-source model (solid lines) compared to the phase gradient we would expect for a single point source (dashed lines). If $k_{\Omega}x_0 = \pi$, the spatial phase is a gradient with slope $k_{\Omega}$, except for a $\pi$ phase jump at $x = 0.5$. For $k_{\Omega}x_0 = 3.5$, the spatially resolved phase is well approximated by a gradient with a slightly higher slope than $k_{\Omega}$, but a closer look reveals that the phase is no longer a pure gradient as shown in panel d). (d) $\varphi(x) - k_{\Omega}x$ for the case $k_{\Omega}x_0 = 3.5$. By examining this difference, we can see that in this case the spatial phase is no longer a pure gradient. This is also illustrated in Fig. 4. + +## 4.3 Other Initial Density Distributions + +Now we will revisit the model for an arbitrary initial density distribution. Since Eq. 8 is a convolution, its Fourier transform has the form + +$$ \hat{n}(k,t) = \sqrt{2\pi} \hat{n}_0(k) \hat{n}_{\text{ps}}(k,t), \quad (17) $$ + +where $\hat{n}_0(k)$ is the Fourier transform of the initial distribution and $\hat{n}_{\text{ps}}$ is the Fourier transform of the point-source solution. Based on Eq. 17, we can expect that for any localized initial density distribution, there will be a decay of the spatial fringe contrast at large rotation rates and a shift in the frequency of the spatial fringes compared to the point-source limit. This is because any localized density distribution will have a Fourier transform that rolls off at large $k$ so we can think of $\hat{n}_0(k)$ as slowly decaying envelope. The decreasing amplitude of $\hat{n}_0(k)$ will lead to a decay in the contrast, and the slope of the envelope will cause a shift in the central frequency of the spatial fringes. The details of the contrast decay and the spatial fringe frequency shift will depend on the initial density profile. + +These general features can be illustrated by comparing two initial density profiles: a Gaussian and a box, both with full width characterized by $2\sigma_0$. The initial density distributions and their Fourier transforms are +---PAGE_BREAK--- + +Figure 4. Effect of increasing $\sigma_{\text{ps}}$ in the two point-source model with $k_{\Omega} = 3.5$ and $x_0 = 1$. (a) An illustration of the evolution of the spectrum as the point sources expand. The peak in the Fourier spectrum reduces in amplitude and converges to the expected frequency. (b) Difference between the spatial phase and the point-source phase gradient. As $\sigma_{\text{ps}}$ increases, this difference smooths out to a constant so that the spatial phase is equal to the point-source phase gradient with an overall offset. + +shown in Fig. 5a)-b). By looking at the Fourier transform of the initial density distributions, we can conclude +that the contrast will decay more slowly for the box-like density distribution than the Gaussian one. We can +also predict that the shifts in the central frequency of the spatial fringes will be smaller for the uniform box than +for the Gaussian because the roll-off is slower. + +These conclusions can be verified by examining the spatial fringe patterns produced with these initial density +distributions. Two cases are illustrated in Fig. 5c)-d). In panel c), the cloud has roughly tripled in size ($\sigma_{\text{ps}} = 3\sigma_0$), and one can clearly see shifts in the dominant frequency for both initial cloud shapes. As predicted, the initially Gaussian cloud leads to a larger frequency shift and a smaller fringe contrast. In panel d), $\sigma_{\text{ps}} = 15\sigma_0$ and the dominant frequency in the spectrum cannot be distinguished from $k_{\Omega}$ by eye for either initial distribution. + +**5. PSI IN PHASE SPACE** + +In the previous two sections, we have modeled the PSI fringes produced by a cloud with a finite initial size by +breaking the initial distribution down into many point sources. With this approach, we've identified two main +effects of the initial distribution. First, the spatial fringe contrast will decrease as a function of Ω. Second, +the dominant frequency of the spatial fringes for a given Ω will be shifted from the frequency calculated in the +point-source limit (Eq. 3). So far, we have explained this frequency shift as a consequence of the interference +of truncated sinusoids. This picture has allowed us to build some useful mathematical formalism, but it is also +rather abstract. It is important to note that all of the models presented so far assume the velocity distribution +of the atoms can be characterized by a uniform temperature. + +We can gain more insight into the origins of the frequency shift by tracking the distribution of the atoms in +both position and velocity. This distribution is often called the phase-space density $\rho(x, v)$. In this section, we +will develop an alternative perspective on the PSI fringes by studying how the phase-space density evolves as +the cloud expands. The first step is to derive an evolution equation for $\rho$. When the cloud is freely expanding, +the velocity of each individual atom is constant. In a time $dt$, each atom moves a distance $dx = vdt$. Thus, we +have $\rho(x + vdt, v, t + dt) = \rho(x, v, t)$. After expanding $\rho$ to first order, we find that + +$$ \frac{\partial \rho}{\partial t} = -v \frac{\partial \rho}{\partial x}, \qquad (18) $$ + +which has solutions of the form $\rho(x, v, t) = f(x - vt)$. In particular, if $\rho_0(x, v)$ describes the phase-space density at $t=0$, then at future times + +$$ \rho(x, v, t) = \rho_0(x - vt, v). \tag{19} $$ +---PAGE_BREAK--- + +Figure 5. A comparison of the spatial fringes produced with a box-shaped initial cloud and a Gaussian initial cloud with parameters $\sigma_{ps} = 2$ and $k_\Omega = 0.8$. Panel a) shows the initial density distributions. Both are normalized to have area 1. The rectangle has a diameter of $d = 2\sigma_0$. Panel b) shows the Fourier spectrum of these initial distributions. According to Eq. 17, the Fourier spectrum of the initial density distribution can be thought of as a filter acting on the point source spectrum. Panels c) and d) show examples of the Fourier spectrum of the expanded cloud for these two initial density distributions. In panel c), $\sigma_{ps} = 3\sigma_0$ so the cloud has roughly tripled in size. For both initial cloud profiles, the dominant frequency is clearly shifted from $k_\Omega$, but the frequency shift is smaller for the box-like distribution. In panel d), $\sigma_{ps} = 15\sigma_0$, and the dominant frequency cannot be distinguished from $k_\Omega$ by eye. + +The phase space density for one of the interferometer states can be found by multiplying $\rho$ by the probability for an atom to occupy that state. This leads to + +$$ \rho_e(x, v, t) = \rho_0(x - vt, v) \frac{1 + c \cos (2k_{\text{eff}}v\Omega T_R^2 + \phi_0)}{2}, \quad (20) $$ + +where $\rho_e$ is the phase-space density for atoms in state $|e\rangle$. The density distribution at time $t$ can be found by integrating over all velocities + +$$ n(x,t) = \int dv \rho(x,v,t). \quad (21) $$ + +So far, this is more mathematical abstraction. The real power of this approach comes from visualizing the geometry of phase space. This is typically done by plotting position on the horizontal axis and velocity on the vertical axis. With this approach, the expansion of the cloud can be visualized by noting that atoms in the upper half of the plane move to the right, and the atoms in the lower half of the plane move to the left. At $t=0$, a point source is represented by a vertical line. At future times, the point-source phase-space density remains a straight line described by $v = x/T_{\text{ex}}$, which corresponds to a perfect correlation between the atoms' position and their velocity. The cloud expansion causes the line to rotate in the $x-v$ plane. +---PAGE_BREAK--- + +When the cloud has a finite initial size, its initial phase-space density can be visualized as a blob that is roughly symmetric around the origin. The cloud expansion stretches the initial phase-space blob horizontally. Since phase-space volume is conserved, this stretching also causes the phase-space density to thin out vertically so that it tends to become like a long thin cigar. This thinning out of the phase-space distribution corresponds to the build up of the correlation between the atoms' final position and their initial velocity. The expansion of the cloud in phase-space is illustrated for both a point source and a cloud with a Gaussian initial density distribution and a temperature T in Fig. 6. The lower half of the figure illustrates the distribution of interferometer phase shifts and the detected fringes. In these phase-space pictures, the interferometer phase shift is constant along horizontal lines because the phase shift depends only on the atoms' velocity and not their position. + +By comparing the phase-space distribution for the Gaussian cloud to the distribution for a point-source, we can see two effects of a finite initial size. First, the Gaussian cloud has a range of velocities at every point. As we saw in Sec. 3, this blurring of the correlation between the atoms' position and their velocity tends to reduce the contrast of the spatial fringes. Second, the expanded Gaussian cloud is tilted at a different angle than the point-source phase-space distribution. This tilt indicates that the average velocity is lower for the Gaussian initial cloud then we would expect based on the point-source limit. With the phase-space formalism, it is straightforward to calculate the average velocity at a given position for the Gaussian cloud case. The result is + +$$v_{\text{avg}}(x) = \left(1 - \frac{\sigma_0^2}{\sigma_f^2}\right) v_{\text{ps}} \quad (22)$$ + +where $v_{\text{ps}} = x/T_{\text{ex}}$ is the velocity the atoms would have if the cloud were a point source. For the Gaussian cloud, this reduction in the average velocity corresponds exactly to the shift in the spatial fringe frequency we first calculated in Eq. 9. With this perspective, we can see that the spatial frequency shift is fundamentally due to the imperfect correlation between the atoms' initial velocity and their final position. + +## 6. CONCLUSION + +We have described three pictures that can be used to quantify the relationship between the initial atomic distribution and the detected spatial fringe patterns. In the first picture, the final density distribution is calculated as the convolution of the initial density distribution with the point-source solution. This space-domain approach can yield exact solutions for a few initial distributions, and it is a useful tool for studying the implications of particular density profiles, but it is difficult to draw general conclusions about the relationship between the initial distribution and the fringe pattern with this approach. In the second picture, we consider the detected density distribution in the Fourier domain. With this picture, the Fourier transform of the initial density distribution can be thought of as a transfer function which filters the point-source solution. This picture revealed that frequency shifts are expected for essentially any localized initial distribution. The case of a Gaussian initial cloud is somewhat special because the frequency shift takes the form of a scale factor shift, and the spatial phase is expected to be a pure gradient. This will not be the case for most initial distributions. In the third picture, we consider the phase space density. By tracking the distribution of the atoms in both position and velocity, we can see that the spatial fringe frequency shift comes about because the atom's final position is an imperfect proxy for their initial velocity. + +In order to realize a high performance, PSI gyroscope, these finite size effects will have to be understood and controlled with excellent precision and stability. One approach would be to use an optical trap to control the initial atomic distribution.²⁰ Despite the challenges posed by these finite size effects, we believe the advantages offered by the PSI technique indicate that this route to a compact, cold-atom LPAI gyroscope is worth pursuing. + +## ACKNOWLEDGMENTS + +This work was funded by NIST. NIST is a US government agency and this work is not subject to copyright. +---PAGE_BREAK--- + +Figure 6. An illustration of the phase space picture of the PSI fringes. (Top left) The initial phase-space density for a point source (dashed line) and a Gaussian cloud. (Top right) Phase-space density for the point source and the Gaussian cloud after a time $T_{\text{ex}}$. The point-source phase-space density simply rotates, but the phase-space density for the Gaussian cloud elongates and thins out. (Bottom left) Phase-space distribution colored with the interferometer phase. Detection corresponds to averaging over the vertical velocity axis. (Bottom right) Detected spatial fringes in the point source and Gaussian cloud cases. +---PAGE_BREAK--- + +REFERENCES + +[1] McGuinness, H. J., Rakholia, A. V., and Biedermann, G. W., "High data-rate atom interferometer for measuring acceleration," *Applied Physics Letters* **100**, 011106 (Jan. 2012). + +[2] Lautier, J., Volodimer, L., Hardin, T., Merlet, S., Lours, M., Pereira Dos Santos, F., and Landragin, A., "Hybridizing matter-wave and classical accelerometers," *Applied Physics Letters* **105**, 144102 (Oct. 2014). + +[3] Gustavson, T. L., Landragin, A., and Kasevich, M. A., "Rotation sensing with a dual atom-interferometer Sagnac gyroscope," *Classical and Quantum Gravity* **17**, 2385-2398 (June 2000). + +[4] Barrett, B., Geiger, R., Dutta, I., Meunier, M., Canuel, B., Gauguet, A., Bouyer, P., and Landragin, A., "The Sagnac effect: 20 years of development in matter-wave interferometry," *Comptes Rendus Physique* **15**, 875-883 (Dec. 2014). + +[5] Berg, P., Abend, S., Tackmann, G., Schubert, C., Giese, E., Schleich, W., Narducci, F., Ertmer, W., and Rasel, E., "Composite-Light-Pulse Technique for High-Precision Atom Interferometry," *Physical Review Letters* **114**, 063002 (Feb. 2015). + +[6] Rakholia, A. V., McGuinness, H. J., and Biedermann, G. W., "Dual-Axis High-Data-Rate Atom Interferometer via Cold Ensemble Exchange," *Physical Review Applied* **2**, 054012 (Nov. 2014). + +[7] Merlet, S., Bodart, Q., Malossi, N., Landragin, A., Santos, F. P. D., Gitlein, O., and Timmen, L., "Comparison between two mobile absolute gravimeters: optical versus atomic interferometers," *Metrologia* **47**(4), L9 (2010). + +[8] Hu, Z.-K., Sun, B.-L., Duan, X.-C., Zhou, M.-K., Chen, L.-L., Zhan, S., Zhang, Q.-Z., and Luo, J., "Demonstration of an ultrahigh-sensitivity atom-interferometry absolute gravimeter," *Physical Review A* **88**, 043610 (Oct. 2013). + +[9] Biedermann, G. W., Wu, X., Deslauriers, L., Roy, S., Mahadeswaraswamy, C., and Kasevich, M. A., "Testing gravity with cold-atom interferometers," *Physical Review A* **91**, 033629 (Mar. 2015). + +[10] Hauth, M., Freier, C., Schkolnik, V., Senger, A., Schmidt, M., and Peters, A., "First gravity measurements using the mobile atom interferometer GAIN," *Applied Physics B* **113**, 49-55 (Apr. 2013). + +[11] Battelier, B., Barrett, B., Fouché, L., Chichet, L., Antoni-Micollier, L., Porte, H., Napolitano, F., Lautier, J., Landragin, A., and Bouyer, P., "Development of compact cold-atom sensors for inertial navigation," *Proceedings of SPIE, Quantum Optics* **9900**, 990004 (Apr. 2016). + +[12] Dickerson, S. M., Hogan, J. M., Sugarbaker, A., Johnson, D. M. S., and Kasevich, M. A., "Multiaxis Inertial Sensing with Long-Time Point Source Atom Interferometry," *Physical Review Letters* **111**, 083001 (Aug. 2013). + +[13] Kasevich, M. and Chu, S., "Atomic interferometry using stimulated Raman transitions," *Physical Review Letters* **67**, 181-184 (July 1991). + +[14] Bordé, C. J., "Atomic interferometry with internal state labelling," *Physics Letters A* **140**, 10-12 (Sept. 1989). + +[15] Gauguet, A., Canuel, B., Lévèque, T., Chaibi, W., and Landragin, A., "Characterization and limits of a cold-atom Sagnac interferometer," *Physical Review A* **80**, 063604 (Dec. 2009). + +[16] Tackmann, G., Berg, P., Abend, S., Schubert, C., Ertmer, W., and Rasel, E. M., "Large-area Sagnac atom interferometer with robust phase read out," *Comptes Rendus Physique* **15**, 884-897 (Dec. 2014). + +[17] Schkolnik, V., Leykauf, B., Hauth, M., Freier, C., and Peters, A., "The effect of wavefront aberrations in atom interferometry," *Applied Physics B* **120**, 311-316 (June 2015). + +[18] Hoth, G. W., Pelle, B., Riedl, S., Kitching, J., and Donley, E. A., "Point source atom interferometry with a cloud of finite size," *Applied Physics Letters* **109**, 071113 (Aug. 2016). + +[19] Durfee, D. S., Shaham, Y. K., and Kasevich, M. A., "Long-Term Stability of an Area-Reversible Atom-Interferometer Sagnac Gyroscope," *Physical Review Letters* **97**, 240801 (Dec. 2006). + +[20] Grimm, R., Weidemüller, M., and Ovchinnikov, Y. B., "Optical Dipole Traps for Neutral Atoms," in [*Advances In Atomic, Molecular, and Optical Physics*], Walther, B. B. and Walther, H., eds., **42**, 95-170, Academic Press (2000). \ No newline at end of file diff --git a/samples_new/texts_merged/7563909.md b/samples_new/texts_merged/7563909.md new file mode 100644 index 0000000000000000000000000000000000000000..7adad07cd9f7551ebed0cd10161403bb6bd5d1fc --- /dev/null +++ b/samples_new/texts_merged/7563909.md @@ -0,0 +1,1334 @@ + +---PAGE_BREAK--- + +# From Cantor to Semi-hyperbolic Parameters along External Rays + +Yi-Chiuan Chen and Tomoki Kawahira + +March 2, 2019 + +**Abstract** + +For the quadratic family $f_c(z) = z^2 + c$ with $c$ in the exterior of the Mandelbrot set, it is known that every point in the Julia set moves holomorphically. Let $\hat{c}$ be a semi-hyperbolic parameter in the boundary of the Mandelbrot set. In this paper we prove that for each $z = z(c)$ in the Julia set, the derivative $dz(c)/dc$ is uniformly $O(1/\sqrt{|c-\hat{c}|})$ when $c$ belongs to a parameter ray that lands on $\hat{c}$. We also characterize the degeneration of the dynamics along the parameter ray. + +## 1 Introduction and main results + +Let $\mathbb{M}$ be the *Mandelbrot set*, the connectedness locus of the quadratic family + +$$ \{f_c : z \mapsto z^2 + c\}_{c \in \mathbb{C}} $$ + +That is, the Julia set $J(f_c)$ is connected if and only if $c \in \mathbb{M}$. For $c \notin \mathbb{M}$, it is well-known that the Julia set $J(f_c)$ is a Cantor set, and the critical point $z=0$ does not belong to the Julia set. Moreover, $f_c$ with $c \notin \mathbb{M}$ is *hyperbolic*: i.e., there exist positive numbers $\alpha_c$ and $\beta_c$ such that $|Df_c^n(z)| \ge \alpha_c(1+\beta_c)^n$ for any $n \ge 0$ and $z \in J(f_c)$. + +**Holomorphic motion of the Cantor Julia sets.** For $c \notin \mathbb{M}$, because of hyperbolicity, every point in $z \in J(f_c)$ moves holomorphically with $c$. In other words, we have a *holomorphic motion* ([BR, L, Mc, MSS]) of the Cantor Julia sets over any simply connected domain in $\mathbb{C}-\mathbb{M}$. In this paper, we obtain some results regarding limiting behavior of this holomorphic motion when $c$ approaches the boundary of $\mathbb{M}$. + +Let us describe it more precisely: For a technical reason, we consider the holomorphic motion of a Cantor Julia set over the topological disk $\mathbb{X} = \mathbb{C}-\mathbb{M}\cup\mathbb{R}_+$, where $\mathbb{R}_+$ denotes the set of positive real numbers. For any base point $c_0 \in \mathbb{X}$, there exists a unique map $H: \mathbb{X} \times J(f_{c_0}) \to \mathbb{C}$ such that + +(1) $H(c_0, z) = z$ for any $z \in J(f_{c_0})$; + +(2) For any $c \in \mathbb{X}$, the map $z \mapsto H(c, z)$ is injective on $J(f_{c_0})$ and it extends to a quasiconformal map on $\bar{\mathbb{C}}$. + +(3) For any $z_0 \in J(f_{c_0})$, the map $c \mapsto H(c, z_0)$ is holomorphic on $\mathbb{X}$. + +(4) For any $c \in \mathbb{X}$, the map $h_c(z) := H(c, z)$ satisfies $h_c(J(f_{c_0})) = J(f_c)$ and $f_c \circ h_c = h_c \circ f_{c_0}$ on $J(f_{c_0})$. + +See [Mc, §4] for more details. + +2010 Mathematics Subject Classification. Primary 37F45; Secondary 37F99. +---PAGE_BREAK--- + +**Parameter rays.** Let $\mathbb{D}$ denote the open disk of radius one centered at the origin. There is a unique biholomorphic function $\Phi_M$ from $\bar{\mathbb{C}} - \mathbb{M}$ to $\bar{\mathbb{C}} - \bar{\mathbb{D}}$ satisfying $\Phi_M(c)/c \to 1$ ($c \to \infty$) with which the set + +$$ \mathcal{R}_M(\theta) := \{\Phi_M^{-1}(re^{i2\pi\theta}) : 1 < r < \infty\} $$ + +is defined and called the *parameter ray* of angle $\theta \in T = \mathbb{R}/\mathbb{Z}$ of the Mandelbrot set $M$. (This is a hyperbolic geodesic of the simply connected domain $\bar{\mathbb{C}} - \mathbb{M}$ starting at infinity.) See Figure 1. Given $\theta$, if the limit $\hat{c} = \lim_{r \to 1^+} \Phi_M^{-1}(re^{i2\pi\theta})$ exists, then $\hat{c} \in \partial M$ is called the *landing point* of the parameter ray $\mathcal{R}_M(\theta)$. We also say that $\theta$ is an *external angle* of the parameter $\hat{c}$. + +Figure 1: The Mandelbrot set and the parameter rays of angles 9/56, 1/6, 11/56, 15/56, 5/12, and 1/2. + +**Example (Real Cantor Julia sets).** When $c \notin \mathbb{M}$ approaches $\hat{c} = -2$ along the real axis (equivalently, along the parameter ray of angle $1/2$), $J(f_c)$ is contained in the real axis and its motion is depicted in Figure 2. + +**Semi-hyperbolic parameters and Misiurewicz points.** We are concerned with boundary behavior of the holomorphic motion given by the map $H$ above, along the parameter rays that land on a fairly large subset of $\partial \mathbb{M}$. + +We say a parameter $\hat{c}$ in $\partial \mathbb{M}$ is *semi-hyperbolic* if the critical point is non-recurrent and belongs to the Julia set. For each semi-hyperbolic parameter $\hat{c} \in \partial \mathbb{M}$, there exists at least one parameter ray $\mathcal{R}_M(\theta)$ landing at $\hat{c}$. (See [D2, Theorem 2]. Indeed, there are at most finite number of parameter rays landing at $\hat{c}$. See Remark 7.2.) Note that for the quadratic polynomial $z^2 + c$ (more generally, unicritical polynomials of the form $z^d + c$), $\hat{c} \in \partial \mathbb{M}$ being semi-hyperbolic implies it is a Collet-Eckmann parameter. (See [PRLS, Main Theorem & +---PAGE_BREAK--- + +Figure 2: Each horizontal slice of the black part is the Julia set of parameter $c \in [-2.733, -2)$. The gray part is the real slice of $J(f_c)$ for $c \in [-2, -1.875]$. Note that $J(f_{-2}) = [-2, 2]$. + +p.51] also [RL, p.291 & 299].) Shishikura [Shi] showed that for any open set $U$ intersecting with $\partial M$, the semi-hyperbolic parameters in $U$ form a dense subset of Hausdorff dimension 2 of $U \cap \partial M$. By a result of Douady [D2], the parameter ray $\mathcal{R}_M(\theta)$ lands on a semi-hyperbolic parameter if and only if $\theta \in T$ is non-recurrent under the angle-doubling $t \mapsto 2t$ (mod 1). Hence every interval of $T$ contains uncountably many angles for which the parameter rays land on semi-hyperbolic parameters. The geometric and dynamical properties of the Julia sets of semi-hyperbolic parameters are deeply investigated in a work of Carleson-Jones-Yoccoz [CJY]. For example, if $\hat{c} \in \partial M$ is semi-hyperbolic, then $J(f_{\hat{c}})$ is a locally connected dendrite such that $\bar{C} - J(f_{\hat{c}})$ is a John domain. + +A typical example of semi-hyperbolic parameter is a Misiurewicz point: We say a parameter $\hat{c}$ is Misiurewicz if the critical point of $f_{\hat{c}}$ is a pre-periodic point. (By a pre-periodic point $z$ we mean $f_{\hat{c}}^l(z) = f_{\hat{c}}^{l+p}(z)$ for some integers $l$ and $p$ but $f_{\hat{c}}^n(z) \neq z$ for all $n \ge 1$.) It is known that such a Misiurewicz point $\hat{c}$ eventually lands on a repelling periodic cycle in the dynamics of $f_{\hat{c}}$, and that the Misiurewicz points are contained in the boundary of the Mandelbrot set. It is also known that the parameter $\hat{c}$ is Misiurewicz if and only if $\hat{c}$ is the landing point of $\mathcal{R}_M(\theta)$ for some rational $\theta$ of even denominator. (See [DH1, Exposé VIII] and [CG, VIII, 6] for example.) Holomorphic motions of the Julia sets along such rays are depicted in Figure 3. + +**Main results.** Let $z_0$ be any point in $J(f_{c_0})$. Then the map $c \mapsto z(c) := H(c, z_0)$ is holomorphic over $X = \mathbb{C} - M \cup \mathbb{R}_+$. If we choose a semi-hyperbolic parameter $\hat{c} \in \partial M$, there exists a parameter ray $\mathcal{R}_M(\theta) \subset X$ of angle $\theta \in T - \{0\}$ that lands on $\hat{c}$. As $c$ moves along the parameter ray $\mathcal{R}_M(\theta)$ toward $\hat{c}$, $z(c) = H(c, z_0)$ moves along an analytic curve in the plane. + +Our main theorem states that the speed of such a motion is uniformly bounded by a function of $|c - \hat{c}|$: + +**Theorem 1.1 (Main Theorem).** Let $\hat{c} \in \partial M$ be a semi-hyperbolic parameter that is the landing point of $\mathcal{R}_M(\theta)$. Then there exists a constant $K > 0$ that depends only on $\hat{c}$ such that for any $c \in \mathcal{R}_M(\theta)$ sufficiently close to $\hat{c}$ and any $z = z(c) \in J(f_c)$, the point $z(c)$ moves holomorphically with + +$$ \left| \frac{dz(c)}{dc} \right| \le \frac{K}{\sqrt{|c-\hat{c}|}}. $$ + +The proof is given in Section 5. By this theorem we obtain one-sided Hölder continuity of the holomorphic motion along the parameter ray: +---PAGE_BREAK--- + +**Theorem 1.2** (Holomorphic Motion Lands). Let $\hat{c} \in \partial\mathbb{M}$ be a semi-hyperbolic parameter that is the landing point of $\mathcal{R}_{\mathbb{M}}(\theta)$, and let $c = c(r) := \Phi_{\mathbb{M}}^{-1}(re^{i2\pi\theta})$ with parameter $r \in (1, 2]$. Then for any $z(c(2))$ in $J(f_c(2))$, the improper integral + +$$z(\hat{c}) := z(c(2)) + \lim_{\delta \to +0} \int_2^{1+\delta} \frac{dz(c)}{dc} \frac{dc(r)}{dr} dr$$ + +exists in the Julia set $J(\hat{c})$. In particular, $z(c)$ is uniformly one-sided Hölder continuous of exponent $1/2$ at $c = \hat{c}$ along $\mathcal{R}_{\mathbb{M}}(\theta)$. More precisely, there exists a constant $K'$ depending only on $\hat{c}$ such that + +$$|z(c) - z(\hat{c})| \le K' \sqrt{|c - \hat{c}|}$$ + +for any $c = c(r) \in \mathcal{R}_{\mathbb{M}}(\theta)$ with $1 < r \le 2$. + +This theorem implies: + +**Theorem 1.3** (From Cantor to Semi-hyperbolic). For any semi-hyperbolic parameter $\hat{c} \in \partial\mathbb{M}$ and any parameter ray $\mathcal{R}_{\mathbb{M}}(\theta)$ landing at $\hat{c}$, the conjugacy $H(c, \cdot) = h_c : J(f_{c_0}) \to J(f_c)$ converges uniformly to a semiconjugacy $h_{\hat{c}} : J(f_{c_0}) \to J(\hat{c})$ from $f_{c_0}$ to $\hat{f}_{\hat{c}}$ as $c \to \hat{c}$ along $\mathcal{R}_{\mathbb{M}}(\theta)$. + +The proofs of these theorems are given Section 8. In Theorem 1.6 below, we will specify where the semiconjugacy $h_{\hat{c}}: J(f_{c_0}) \to J(\hat{c})$ fails to be injective. Indeed, the semiconjugacy is injective except on a countable subset. + +By Theorems 1.2 and 1.3, we have a semiconjugacy $h_{\hat{c}} \circ h_c^{-1} : J(f_c) \to J(\hat{c})$ with $|h_{\hat{c}} \circ h_c^{-1}(z) - z| = O(\sqrt{|c-\hat{c}|})$ as $c \to \hat{c}$ along $\mathcal{R}_{\mathbb{M}}(\theta)$. Thus we obtain: + +**Corollary 1.4** (Hausdorff Convergence). The Hausdorff distance between $J(f_c)$ and $J(\hat{c})$ is $O(\sqrt{|c-\hat{c}|})$ as $c \to \hat{c}$ along $\mathcal{R}_{\mathbb{M}}(\theta)$. + +This result is compatible with a result by Rivera-Letelier [RL]. See Remark 1.7. + +**Symbolic dynamics.** Let + +$$\Sigma_3 := \{\mathbf{s} = \{s_0, s_1, s_2, \dots\} : s_n = *, 0 \text{ or } 1 \text{ for all } n \ge 0\}$$ + +be the space consisting of sequences of *'s, 0's and 1's with the product topology, and $\sigma$ be the left shift in $\Sigma_3$, $\sigma(\mathbf{s}) = \mathbf{s}' = (s'_0, s'_1, s'_2, \dots)$ with $s'_i = s_{i+1}$. Let + +$$\Sigma_2 := \{\mathbf{s} = \{s_0, s_1, s_2, \dots\} : s_n = 0 \text{ or } 1 \text{ for all } n \ge 0\} \subset \Sigma_3$$ + +be a closed subspace of $\Sigma_3$. A point $\mathbf{e} \in \Sigma_2$ is said to be *aperiodic* if $\sigma^n(\mathbf{e}) \neq \mathbf{e}$ for any $n \ge 0$. Two points **a** and **s** in $\Sigma_2$ are said to be *equivalent* with respect to aperiodic $\mathbf{e} \in \Sigma_2$, denoted by **a** ~ ~e~ **s**, if there is $k \ge 0$ such that $a_n = s_n$ for all $n \ne k$ and $\sigma^{k+1}(\mathbf{a}) = \sigma^{k+1}(\mathbf{s}) = \mathbf{e}$. It is plain to verify that the relation ~e~ is indeed an equivalence relation, and is the smallest equivalence relation that identifies 0e with 1e. + +Note that for $c \notin \mathbb{M}$ the dynamics of $f_c$ on the Julia set is conjugate to that of $\sigma$ on $\Sigma_2$. We will use an aperiodic **e** to represent the (itinerary of the) non-recurrent critical orbit of the semi-hyperbolic $f_\hat{c}$. Then **a** and **s** in $\Sigma_2$ are equivalent with respect to this **e** if and only if the points in $J(f_c)$ corresponding to **a** and **s** will degenerate to a point that eventually lands on the critical value $\hat{c}$ in $J(\hat{f}_\hat{c})$ as $c$ moves along the parameter ray landing on $\hat{c}$. +---PAGE_BREAK--- + +Figure 3: Holomorphic motion along the parameter rays of angles 1/6, 5/12, 9/56, 11/56, and 15/56. +---PAGE_BREAK--- + +Let $\mathcal{T}: \mathbb{T} \to \mathbb{T}$, $t \mapsto 2t$ (mod 1) be the angle-doubling map. Fix $\theta \in \mathbb{T} - \{0\}$, the two points $\theta/2$ and $(\theta+1)/2$ divide $\mathbb{T}$ into two open semi-circles $\mathbb{T}_0^\theta$ and $\mathbb{T}_1^\theta$ with $\theta \in \mathbb{T}_0^\theta$. Define the *itinerary* of a point $t$ under $\mathcal{T}$ with respect to $\theta$ as $\mathcal{E}^\theta(t) = \{\mathcal{E}^\theta(t)_n\}_{n \ge 0}$ with + +$$ +\mathcal{E}^{\theta}(t)_n = \begin{cases} 0 & \text{for } \mathcal{T}^n(t) \in \mathbb{T}_0^{\theta} \\ 1 & \text{for } \mathcal{T}^n(t) \in \mathbb{T}_1^{\theta} \\ * & \text{for } \mathcal{T}^n(t) \in \left\{\frac{\theta}{2}, \frac{\theta+1}{2}\right\}. \end{cases} +$$ + +The itinerary of $\theta$ itself, $\mathcal{E}^\theta(\theta)$, is called the kneading sequence of $\theta$. + +Another consequence of Theorem 1.2 is as follows. + +**Theorem 1.5** (Symbolic Dynamics at Semi-hyperbolic Parameter). Let $\hat{c}$ be a semi-hyperbolic parameter with an external angle $\theta$ and **e** = $\mathcal{E}^{\theta}(\theta)$ be the kneading sequence of $\theta$. Then ($J(f_{\hat{c}})$, $f_{\hat{c}}$) is topologically conjugate to $(\Sigma_2/\sim_{\mathbf{e}}, \tilde{\sigma})$, where $\tilde{\sigma}$ is induced by the shift transformation $\sigma$ of $\Sigma_3$. + +Theorem 1.5 also implies that the semiconjugacy in Theorem 1.3 is one-to-one except at countable points where it is two-to-one. + +**Theorem 1.6** (Almost Injectivity). Let $h_\hat{c} : J(f_{c_0}) \to J(f_{\hat{c}})$ be the semiconjugacy given in Theorem 1.3. For any $w \in J(f_{\hat{c}})$, the preimage $h_{\hat{c}}^{-1}\{w\}$ has at most two points, and it consists of two distinct points if and only if $f_{\hat{c}}^n(w) = 0$ for some $n \ge 0$. + +We prove these two theorems above in Section 17. More precise properties of the semi- +conjugacy can be found in Corollary 16.2. + +**Structure of the paper.** The structure of this paper is a little complicated, but we belive +this presentation requires less memory of the readers. In Section 2 we briefly summarize the +notation and properties of the dynamics of $f_c(z) = z^2 + c$ with semi-hyperbolic parameters. +Section 3 is devoted for “the derivative formula”, which is a key tool for our estimate. In +Section 4 we introduce the notion of “Z-cycle” to describe the behavior of the orbits. We +also present Lemmas A, B, and C about Z-cycles, whose proofs are given later. In Section +5 we prove the Main Theorem by assuming these lemmas. In Section 6 we introduce the +notion of “S-cycle” and “the S-cycle decompositions” of Z-cycles. We also present Lemmas +A’, B’, and C’, whose proofs are given later as well. Section 7 is devoted for Proposition S +about stability of landing dynamic rays, and some lemmas that come from the assumption +that the parameter $c$ moves along the parameter ray. In Section 8 we prove Theorems 1.2 +and 1.3. Then by assuming Lemmas A’, B’, and C’, we prove Lemmas A and B in Sections +9 and 10 respectively. Section 11 is devoted for some lemmas on hyperbolic metrics, and by +using them, we prove Lemmas B’, A’, C’, and C in Sections 12, 13, 14, and 15 respectively. +In Section 16 we work with symbolic dynamics, and finally in Section 17 we give proofs of +Theorems 1.5 and 1.6. + +*Remark* **1.7.** + +• The estimate in the Main Theorem is optimal. For example, if $\hat{c} = -2$ (that is the Misiurewicz parameter with $f_{\hat{c}}^2(0) = f_{\hat{c}}^3(0) = 2$), then for $c = -2 - \epsilon$ with $\epsilon > 0$ the repelling fixed point on the positive real axis is given by $(1 + \sqrt{9 + 4\epsilon})/2 = 2 + \epsilon/3 + o(\epsilon)$. Hence its preimages near the critical point are $z = \pm\sqrt{2\epsilon/3}(1 + o(\epsilon))$, whose derivatives are $dz/d\epsilon = \pm(1/\sqrt{6\epsilon})(1 + o(\epsilon))$. This implies that $|dz/dc|$ is compatible with $1/\sqrt{|c-\hat{c}|}$. See Figure 2. +---PAGE_BREAK--- + +* The results and the proofs in this paper are easily generalized to the unicritical family $\{z \mapsto z^d + c : c \in \mathbb{C}\}$, simply by replacing the square root ("$\sqrt{|c-\hat{c}|}$") by the $d$th root ("$|c-\hat{c}|^{1/d}$") in the Main Theorem. + +* In [CK] the authors give a simple proof of the Main Theorem for $\hat{c} = -2$. + +* In [D1], Douady showed that the Julia set $J(f_c)$ continuously depends on $c$ at any semi-hyperbolic parameter $\hat{c}$ in the sense of Hausdorff topology. Moreover, in [RL], Rivera-Letelier showed that the Hausdorff distance between $J(f_c)$ and $J(\hat{f}_c)$ is $O(|c-\hat{c}|^{1/2})$ for $c$ close enough to $\hat{c}$, and that the Hausdorff dimension of the Julia set $J(f_c)$ converges to that of $\hat{c}$ if $c$ tends to $\hat{c}$ along the parameter ray. Our results, in addition, give the convergence of the dynamics. + +* It is known that any parameter ray of odd denominator has a landing point $\hat{c}$ on $\partial\mathbb{M}$ such that $f_{\hat{c}}$ has a parabolic periodic point. However, when $c$ moves along such a parameter ray, $J(f_c)$ does not converge in the Hausdorff topology. The discontinuity comes from the "parabolic implosion", which is also described in Douady's article [D1]. + +* Suppose $\hat{c} \in \partial\mathbb{M}$ and $\hat{c} \in J(\hat{f}_c)$, and suppose $\hat{c}$ has an external angle $\theta$. There have been several results concerning the quotient dynamics for $\hat{f}_c$ by kneading sequences. If the kneading sequence $\mathcal{E}^\theta(\theta)$ is aperiodic, then the same statement as Theorem 1.5 that $(J(\hat{f}_c), \hat{f}_c)$ is topologically conjugate to $(\Sigma_2/\sim_{\mathcal{E}^\theta(\theta)}, \tilde{\sigma})$ has been known by Bandt and Keller [BK]. Let $\approx_\theta$ be the smallest equivalence relation that if $t, t'$ are points in $\mathbb{T}$ such that for every $n$ either $\mathcal{E}^\theta(t)_n = \mathcal{E}^\theta(t')_n$ or $\mathcal{E}^\theta(t)_n = *$ or $\mathcal{E}^\theta(t')_n = *$, then $t$ is equivalent to $t'$. They also showed that $(J(\hat{f}_c), \hat{f}_c)$ is topologically conjugate to $(\mathbb{T}/\approx_\theta, \tilde{\mathcal{T}})$ as well, where $\tilde{\mathcal{T}}$ is induced by the angle-doubling map $\mathcal{T}$ on $\mathbb{T}/\approx_\theta$. Besides, for $\hat{f}_c$ with locally connected Julia set and no irrational indifferent cycles, Kiwi [K2] defined $\equiv_{\hat{c}}$ to be the smallest equivalence relation in $\mathbb{T}$ which identifies $t$ and $t'$ whenever the landing points of the dynamic rays $\mathcal{R}_{\hat{c}}(t)$ and $\mathcal{R}_{\tilde{\hat{c}}}(t')$ coincide. (See Section 7 for the definition of the dynamic rays.) Then he showed that $(J(\hat{f}_c), \hat{f}_c)$ is topologically conjugate to $(\mathbb{T}/\equiv_{\hat{c}}, \tilde{\mathcal{T}})$, where $\tilde{\mathcal{T}}$ is induced by $\mathcal{T}$ on $\mathbb{T}/\equiv_{\hat{c}}$. (For $\hat{c}$ a Misiurewicz parameter, Kiwi's result has been obtained earlier in [AK]. However, in [K2] more general cases were considered including non-locally connected Julia sets.) + +## 2 Misiurewicz and semi-hyperbolic parameters + +In this section we briefly summarize the notation and properties of the dynamics of $f_c(z) = z^2 + c$ with semi-hyperbolic parameters. + +### Notation. + +* Let $\mathbb{N}$ denote the set of positive integers. We denote the set of non-negative integers by $\mathbb{N}_0 := \{0\} \cup \mathbb{N}$. + +* Let $\mathbb{D}(a,r)$ denote the disk in $\mathbb{C}$ centered at $a$ and of radius $r > 0$. When $a=0$ we denote it by $\mathbb{D}(r)$. + +* Let $N(A, r)$ denote the open $r$-neighborhood of the set $A \subset \mathbb{C}$ for $r > 0$. That is, $N(A, r) := \bigcup_{a \in A} \mathbb{D}(a, r)$. + +* For non-negative variables $X$ and $Y$, by $X \asymp Y$ we mean there exists an implicit constant $C > 1$ independent of $X$ and $Y$ such that $X/C \le Y \le CX$. +---PAGE_BREAK--- + +* When we say "for any $X \ll 1$" it means that "for any sufficiently small $X > 0$". + +* Let $c$ be a parameter for the quadratic family $\{f_c(z) = z^2 + c : c \in \mathbb{C}\}$. By $c \approx \hat{c}$ we mean there exists an implicit constant $\delta > 0$ independent of $c \neq \hat{c}$ such that $|c - \hat{c}| < \delta$. When we say "the constant $C$ independent of $c \approx \hat{c}$" it means that $C$ does not depend on $c \neq \hat{c}$ but it may depend on $\hat{c}$. + +**Misiurewicz and semi-hyperbolic parameters.** Let $\hat{c} \in \partial\mathbb{M}$ be a Misiurewicz point with $f_{\hat{c}}^{l}(0) = f_{\hat{c}}^{l+p}(0)$ where we choose the minimal $l$ and $p$ in $\mathbb{N}$. Then it is known that $f_{\hat{c}}^{l}(0)$ is actually a repelling periodic point. + +More generally, suppose that $\hat{c} \in \partial\mathbb{M}$ is semi-hyperbolic, and set $\hat{b}_n := f_{\hat{c}}^n(0)$ for each $n \ge 0$. Let $\Omega(\hat{c})$ denote the set of accumulation points of the set $\{\hat{b}_n\}_{n \ge 0}$, i.e., the $\omega$-limit set of $0$. Moreover, by a result of Carleson, Jones, and Yoccoz [CJY], $\Omega(\hat{c})$ is a hyperbolic set in the sense of [Shi]: i.e., $\Omega(\hat{c})$ is compact; $f_{\hat{c}}(\Omega(\hat{c})) \subset \Omega(\hat{c})$ (indeed, we have $f_{\hat{c}}(\Omega(\hat{c})) = \Omega(\hat{c})$); and there exist constants $\alpha, \beta > 0$ such that $|Df_{\hat{c}}^n(z)| \ge \alpha(1 + \beta)^n$ for any $z \in \Omega(\hat{c})$ and $n \ge 0$. For example, if $\hat{c}$ is Misiurewicz, the set $\Omega(\hat{c})$ is the repelling cycle on which the orbit of $0$ lands. + +For $\hat{c} \in \partial\mathbb{M}$ a semi-hyperbolic parameter, it is proved in [CJY] that there are constants $\epsilon > 0$, $C > 0$, and $0 < \eta < 1$ such that for all $z \in J(f_{\hat{c}})$, $n \ge 0$, and any connected component $B_n(z, \epsilon)$ of $f_{\hat{c}}^{-n}(\mathbb{D}(z, \epsilon))$, we have + +$$ \mathrm{diam}\, B_n(z, \epsilon) < C \eta^n. \quad (1) $$ + +In what follows we fix a $p \in \mathbb{N}$ such that $|Df_{\hat{c}}^p(z)| \ge 3$ for any $z \in \Omega(\hat{c})$.¹ We first check: + +**Proposition 2.1 (Critical Orbit Lands).** The critical orbit $\hat{b}_n = f_{\hat{c}}^n(0)$ ($n \in \mathbb{N}_0$) eventually lands on $\Omega(\hat{c})$. That is, there exists a minimal integer $l$ such that $\hat{b}_l = f_{\hat{c}}^l(0) \in \Omega(\hat{c})$. + +**Proof.** Suppose that $\hat{b}_n \notin \Omega(\hat{c})$ for every $n \in \mathbb{N}$. Since $|Df_{\hat{c}}^p(x)| \ge 3$ for any $x \in \Omega(\hat{c})$ we apply the Koebe distortion theorem (see [Du]) to find a $\delta > 0$ such that if $\hat{b}_n \in N(\Omega(\hat{c}), \delta) - \Omega(\hat{c})$, we have + +$$ \mathrm{dist}(\hat{b}_{n+p}, \Omega(\hat{c})) \ge 2\mathrm{dist}(\hat{b}_n, \Omega(\hat{c})). $$ + +(We also used compactness and invariance of $\Omega(\hat{c})$. See also Remark 2.3.) Hence there exists an accumulation point of the critical orbit in $\bar{\mathbb{C}} - N(\Omega(\hat{c}), \delta)$. However, it contradicts to the definition of $\Omega(\hat{c})$. ■ + +Another remarkable fact is that the hyperbolic set $\Omega(\hat{c})$ moves holomorphically and preserves the dynamics (See [Shi, §1]): + +**Proposition 2.2 (Holomorphic Motion of $\Omega(\hat{c})$).** There exist a neighborhood $\Delta$ of $\hat{c}$ in the parameter plane $\bar{\mathbb{C}}$ and a map $\chi: \Delta \times \Omega(\hat{c}) \to \bar{\mathbb{C}}$ with the following properties: + +(1) $\chi(\hat{c}, z) = z$ for any $z \in \Omega(\hat{c})$; + +(2) For any $c \in \Delta$, the map $z \mapsto \chi(c, z)$ is injective on $\Omega(\hat{c})$ and it extends to a quasiconformal map on $\bar{\mathbb{C}}$. + +(3) For any $z_0 \in \Omega(\hat{c})$, the map $c \mapsto \chi(c, z_0)$ is holomorphic on $\Delta$. + +(4) For any $c \in \Delta$, the map $\chi_c(z) := \chi(c, z)$ satisfies $f_c \circ \chi_c = \chi_c \circ f_{\hat{c}}$ on $\Omega(\hat{c})$.¹ + +Of course "3" does not have particular meaning. Any constant bigger than one will do. +---PAGE_BREAK--- + +**Definition of $V_j$'s.** Now we give a fundamental setting for the proofs of our results that will be assumed in what follows. + +• Set $\Omega(c) := \chi_c(\Omega(\hat{c}))$ for each $c \in \Delta$ given in Proposition 2.2. Then $\Omega(c)$ is a hyperbolic subset of the Julia set $J(f_c)$. Since $J(f_c)$ is a Cantor set when $c \notin \mathbb{M}$, $\Omega(c)$ is a totally disconnected set for any $c \in \Delta$. + +• Set $U_l := N(\Omega(\hat{c}), R_l)$ for a sufficiently small $R_l > 0$, such that + +– there is a constant $\mu \ge 2.5$ such that for any $c \approx \hat{c}$ and $z \in U_l$ we have + +$$|Df_c^p(z)| \ge \mu; \text{ and}$$ + +– for any $c \approx \hat{c}$, $U_l \in f_c^p(U_l)$. + +Such an $R_l$ exists because $|Df_c^p(z)| \ge 3$ on $\Omega(\hat{c})$ and the function $(c,z) \mapsto |Df_c^p(z)|$ is continuous. + +• We set $b_j(c) := \chi_c(\hat{b}_j) \in \Omega(c)$ for each $j \ge l$ and $c \in \Delta$. By taking a smaller $\Delta$ if necessary, we can also find an analytic family of pre-landing points $b_0(c)$, $b_1(c)$, ..., $b_{l-1}(c)$ over $\Delta$ such that $b_{j+1}(c) = f_c(b_j(c))$ and $\hat{b}_j = b_j(\hat{c})$ for each $j = 0, 1, \dots, l-1$. (For $j=0$, $b_0(c)$ is defined as a branch of $f_c^{-1}(b_1(c))$.) + +• Choose disjoint topological disks $V_j$ for $j = 0, 1, \dots, l-1$ such that + +– $V_0 := \mathcal{D}(0, \nu)$ for some $\nu \ll 1$. We will add more conditions for $\nu$ later. + +– For each $j = 1, \dots, l-1$, the topological disk $V_j$ contains $\hat{b}_j$ and satisfies $\operatorname{diam} V_j \asymp \nu^2$. More precisely, there exists a constant $C_0 > 1$ independent of $j$ such that $\nu^2/C_0 \le \operatorname{diam} V_j \le C_0\nu^2$. + +– For any $c \approx \hat{c}$ and each $j = 0, 1, \dots, l-2$, we have $f_c(V_j) \in V_{j+1}$. + +We also take a constant $C'_0 > 1$ such that for any $c \approx \hat{c}$, + +– the set $V_l := N(\Omega(\hat{c}), C'_0\nu^2)$ contains the topological disk $f_c(V_{l-1})$; and + +– at least for each $j = 0, 1, \dots, p-1$, $f_c^j(V_l) \in U_l$. + +We assume that $\nu$ is sufficiently small such that $V_j \cap V_l = \emptyset$ for each $j = 0, 1, \dots, l-1$. Let $\mathcal{V}$ denote the union $V_1 \cup V_2 \cup \dots \cup V_{l-1} \cup V_l$. See Figure 4. + +• Let $\xi$ be the distance from 0 to the closure of the set + +$$\{\hat{b}_1, \hat{b}_2, \dots, \hat{b}_{l-1}\} \cup U_l.$$ + +Since 0 is not recurrent (i.e., $0 \notin \Omega(\hat{c}))$, we have $\xi > 0$ if we take $R_l$ small enough. We may assume in addition that $0 < \xi \le 1$ if we reset $\xi := 1$ when $\xi > 1$. If necessary, we replace $\nu$ so that $R_l$ and $C_0\nu^2$ are smaller than $\xi/2$. Then we have $|Df_c(z)| = 2|z| \ge \xi$ for any $z \in \mathcal{V} \cup U_l$ and $c \approx \hat{c}$. + +*Remark 2.3.* The backward dynamics of $f^p$ near $\Omega(\hat{c})$ is uniformly shrinking with respect to the Euclidean metric. For example, one can find an $R > 0$ depending only on $\hat{c}$ such that for any $x \in \Omega(\hat{c})$ there exists a univalent branch $g$ of $f_{\hat{c}}^{-p}$ on $\mathcal{D}(f_{\hat{c}}^p(x), R)$ satisfying $g(f_{\hat{c}}^p(x)) = x$ and $g(\mathcal{D}(f_{\hat{c}}^p(x), R)) \subset \mathcal{D}(x, R/2)$. Indeed, we first take an $R_0 > 0$ such that $f_{\hat{c}}^p$ is univalent for any $\mathcal{D}(x, R_0)$ with $x \in \Omega(\hat{c})$. By the Koebe 1/4 theorem, $f_{\hat{c}}^p(\mathcal{D}(x, R_0))$ +---PAGE_BREAK--- + +Figure 4: $V_0, V_1, \dots, V_l$ and $U_l$. + +contains $\mathcal{D}(f_{\hat{c}}^p(x), R_0|Df_{\hat{c}}^p(x)|/4)$. Since $|Df_{\hat{c}}^p(x)| \ge 3$ on $\Omega(\hat{c})$, there is a univalent branch $g$ of $f_{\hat{c}}^{-p}$ on $\mathcal{D}(f_{\hat{c}}^p(x), 3R_0/4)$ with $g(f_{\hat{c}}^p(x)) = x$ and $|Dg(f_{\hat{c}}^p(x))| \le 1/3$. The Koebe distortion theorem implies that $g$ maps the disk $\mathcal{D}(f_{\hat{c}}^p(x), R)$ into $\mathcal{D}(x, R/2)$ by taking a sufficiently small $R < 3R_0/4$. + +We assume that the $R_l$ in the definition of $U_l$ is relatively smaller than this $R$, and we will implicitly apply this type of argument to the backward dynamics of $f_c$ near $U_l$ for $c \approx \hat{c}$. + +### 3 The derivative formula + +Recall that the map $H : X \times J(f_{c_0}) \to C$ in Section 1 gives a holomorphic motion of the Julia set $J(f_{c_0})$ over the simply connected domain $X = C - M \cup R_+$ with the base point $c_0 \in X$. For a given point $z_0 \in J(f_{c_0})$, we want to have some estimates for the derivative of the holomorphic function $z(c) = H(c, z_0)$ at $c \in X$. + +In fact, such a holomorphic motion always exists for any simply connected domain $Y$ in $C-M$ with any base point $c_0 \in Y$. For a given $c \in C-M$, the derivative of such a motion at $c$ is independent of the choice of the domain $Y$ containing $c$ and the basepoint $c_0$. For example, it is convenient to consider the motion over the simply connected domain $Y := C - M \cup R_-$ (where $R_-$ is the set of negative real numbers) and assume that $X$ and $Y$ share the base point $c_0 \in Y \cap X = C - M \cup R$. + +Now we prove: + +**Proposition 3.1.** For any $c \notin M$ and $z = z(c) \in J(f_c)$, we have + +$$ \left|\frac{d}{dc}z(c)\right| \leq \frac{1 + \sqrt{1 + 6|c|}}{\text{dist}(c, \partial M)}. $$ + +In particular, $|dz/dc| = O(1/\sqrt{|c|})$ as $c \to \infty$. +---PAGE_BREAK--- + +**Proof.** Let $\delta_c := \text{dist}(c, \partial M)$ and $d_c := (1 + \sqrt{1+4|c|})/2$ for $c \in \mathbb{C}$. Let $s(z) := \sup_{n \ge 0} |f_c^n(z)|$ for $z \in J(f_c)$. Since $f_c^{n+1}(z) = (f_c^n(z))^2 + c$, we have $s(z) \ge s(z)^2 - |c|$ and this implies $s(z) \le d_c$. Hence the Julia set $J(f_c)$ is contained in $\overline{\mathbb{D}(d_c)}$ for any $c \in \mathbb{C}$. + +Now assume that $c \notin M$. Then the disk $\mathbb{D}(c, \delta_c)$ is contained in either $\mathcal{X} = \mathcal{C} - M \cup \mathbb{R}_+$ or $\mathcal{Y} = \mathcal{C} - M \cup \mathbb{R}_-$, and the motion of $J(f_{c_0})$ restricted to this disk is well-defined. Let us consider a parameter $\zeta \in \mathbb{D}(c, \delta_c)$ such that $|\zeta - c| = \delta_c/2$. Since $\delta_c \le |c|$, we have $|\zeta| \le 3|c|/2$ and thus the Julia set $J(f_\zeta)$ is contained in $\overline{\mathbb{D}(d_{3|c|/2})}$. By applying the Cauchy integral formula, we obtain + +$$\left|\frac{d}{dc}z(c)\right| = \left|\frac{1}{2\pi i}\int_{|\zeta-c|=d_c/2} \frac{z(\zeta)}{(\zeta-c)^2} d\zeta\right| \le \frac{2d_{3|c|/2}}{\delta_c} = \frac{1+\sqrt{1+6|c|}}{\text{dist}(c, \partial M)}.$$ + +Since $\mathbb{M}$ is contained in $\overline{\mathbb{D}(2)}$, we have $|c| - 2 \le \delta_c \le |c|$. This implies $|dz/dc| = O(1/\sqrt{|c|})$ +as $c \to \infty$ + +**The derivative formula.** Our main theorem is based on the following formula (see also [CKLY]): + +**Proposition 3.2 (The Derivative Formula).** For any $c \notin M$ and $z = z(c) \in J(f_c)$, we have + +$$\frac{d}{dc}z(c) = -\sum_{n=1}^{\infty} \frac{1}{D f_c^n(z(c))}.$$ + +**Proof.** Set $f := f_c$ and $z_n = z_n(c) := f^n(z(c))$. Then the relation $z_{n+1} = z_n^2 + c$ implies + +$$\frac{dz_{n+1}}{dc} = 2z_n \cdot \frac{dz_n}{dc} + 1 \iff \frac{dz_n}{dc} = -\frac{1}{Df(z_n)} + \frac{1}{Df(z_n)} \frac{dz_{n+1}}{dc}.$$ + +Hence we have + +$$ +\begin{align*} +\frac{d}{dc}z(c) &= \frac{dz_0}{dc} = -\frac{1}{Df(z_0)} + \frac{1}{Df(z_0)}\frac{dz_1}{dc} \\ +&= -\frac{1}{Df(z_0)} + \frac{1}{Df(z_0)}\left(-\frac{1}{Df(z_1)} + \frac{1}{Df(z_1)}\frac{dz_2}{dc}\right) \\ +&= -\frac{1}{Df(z_0)} - \frac{1}{Df^2(z_0)} + \frac{1}{Df^2(z_0)}\frac{dz_2}{dc} \\ +&= -\sum_{n=1}^{N} \frac{1}{Df^n(z(c))} + \frac{1}{Df^N(z_0)}\frac{dz_N}{dc}. +\end{align*} +$$ + +By letting $N \to \infty$ we formally have the desired formula. The series actually converges since +$|dz_N/dc|$ is uniformly bounded by a constant depending only on $c$ (by Proposition 3.1) and +$|Df^N(z_0)|$ grows exponentially by hyperbolicity of $f = f_c$. +$\blacksquare$ + +*Remark 3.3.* + +• The estimate in Proposition 3.1 is valid for any $c \in C - \partial M$. Moreover, the derivative formula is also valid for any hyperbolic parameter in M. +---PAGE_BREAK--- + +• Proposition 3.1 implies an estimate + +$$ +\left| \frac{dz}{dc}(c) \right| = O\left( |c - \hat{c}|^{-1-\beta} \right) +$$ + +if $c$ approaches $\hat{c} \in \partial\mathbb{M}$ in such a way that + +$$ +\operatorname{dist}(c, \partial\mathbb{M}) \geq C|c - \hat{c}|^{1+\beta} +$$ + +for some constant $C > 0$. The smallest possible value that $\beta$ can take is zero, for example, when $c \to \hat{c} = -2$ along the negative real axis. Typically $\beta$ is positive, for example, $\beta = 1/2$ in the main theorem of [RL]. + +In general, when $c$ approaches semi-hyperbolic $\hat{c} \in \partial\mathbb{M}$ along a parameter ray landing at $\hat{c}$, it satisfies $\mathrm{dist}(c, \partial\mathbb{M}) \ge C|c - \hat{c}|$ for some $C > 0$, and thus $\beta = 0$. (This is a combination of two facts: the John property of the complement of the Julia set $J(f_{\hat{c}})$ by [CJY] and the asymptotic similarity between $J(f_{\hat{c}})$ and $\mathbb{M}$ at $\hat{c}$ by [RL].) This observation implies that our main theorem is stronger and it does not come from the geometry of the Mandelbrot set. We need the dynamics (the derivative formula) to prove it. + +4 Z-cycles + +For $c \approx \hat{c}$, choose any $z = z_0 \in J(f_c)$. The orbit $z_n := f_c^n(z_0) (n \in \mathbb{N}_0)$ may land on $V_0$ (or more precisely, on $V_0 \cap J(f_c)$), and go out, then it may come back again. To describe the behavior of such an orbit, we introduce the notion of “Z-cycle” for the orbit of $z$, where “Z” indicates that the orbit comes close to “zero”. + +We set $f := f_c$ for brevity. + +**Definition (Z-cycle).** A finite Z-cycle of the orbit $z_n = f^n(z_0)$ ($n \in \mathbb{N}_0$) is a finite subset of $\mathbb{N}_0$ of the form + +$$ +Z = \{ n \in \mathbb{N}_0 : N \le n < N' \} = [N, N') \cap \mathbb{N}_0, +$$ + +such that $z_N, z_{N'} \in V_0$ but $z_n \notin V_0$ if $N < n < N'$. An infinite Z-cycle is an infinite subset of $\mathbb{N}_0$ of the form + +$$ +Z = \{ n \in \mathbb{N}_0 : N \le n < \infty \} = [N, \infty) \cap \mathbb{N}_0, +$$ + +such that $z_N \in V_0$ but $z_n \notin V_0$ for all $n > N$. By a *Z-cycle* we mean a finite or infinite *Z-cycle*. In both cases, we denote them $\mathbb{Z} = [N, N')$ or $\mathbb{Z} = [N, \infty)$ for brevity. + +**Decomposition of the orbit by Z-cycles.** For a given orbit $z_n = f^n(z_0)$ ($n \in \mathbb{N}_0$) of $z_0 \in J(f_c)$, the set $\mathbb{N}_0$ of indices is uniquely decomposed by using finite or infinite Z-cycles in one of the following three types: + +• The first type is of the form + +$$ +\mathbb{N}_0 = [0, N_1) \sqcup Z_1 \sqcup Z_2 \sqcup \dots, \tag{2} +$$ + +where $z_n \notin V_0$ for $n \in [0, N_1)$ and $Z_k := [N_k, N_{k+1})$ is a finite Z-cycle for each $k \ge 1$. + +• The second type is of the form + +$$ +\mathbb{N}_0 = [0, N_1) \sqcup Z_1 \sqcup Z_2 \sqcup \dots \sqcup Z_{k_0}, \quad (3) +$$ + +where $k_0 \ge 1$ such that $z_n \notin V_0$ for $n \in [0, N_1)$; $Z_k := [N_k, N_{k+1})$ is a finite Z-cycle for each $1 \le k \le k_0 - 1$; and $Z_{k_0} = [N_{k_0}, \infty)$ is an infinite Z-cycle. +---PAGE_BREAK--- + +• The third type is just $\mathbb{N}_0 = [0, N_1)$ with $N_1 = \infty$, where $z_n \notin V_0$ for all $n \in \mathbb{N}$. + +In the first and second types it is possible that $N_1 = 0$ and $[0, N_1)$ is empty. For the second and third types, we set $Z_k := \emptyset$ for any $k \ge 1$ for which $Z_k$ is not defined yet. Hence we always assume that $\mathbb{N}_0$ formally has an infinite decomposition of the form (2) associated with the orbit of $z_0 \in J(f_c)$. + +**The three lemmas.** In what follows we assume the following “parameter ray condition” without (or with) mentioning: + +“Parameter ray condition”. The parameter *c* is always in the parameter ray $\mathcal{R}_M(\theta)$ that lands on $\hat{c}$. + +Now we present three principal lemmas about Z-cycle. (The proofs will be given later.) + +**Lemma A.** There exists a constant $K_A > 0$ such that for any $c \approx \hat{c}$, any $z = z_0 \in J(f_c)$, and for any Z-cycle $Z = [N, N')$ of the orbit $z_n = f_c^n(z)$ ($n \in \mathbb{N}_0$), we have + +$$ +\sum_{i=1}^{N'-N} \frac{1}{|Df_c^i(z_N)|} \le \frac{K_A}{\sqrt{|c-\hat{c}|}}, \quad (4) +$$ + +where we set $N' - N := \infty$ if $N' = \infty$. + +**Lemma B.** There exists a constant $K_B > 0$ such that for any $c \approx \hat{c}$ and any $N \le \infty$, if $z = z_0 \in J(f_c)$ satisfies $z_n \notin V_0$ for any $n \in [0, N)$, then we have + +$$ +\sum_{i=1}^{N} \frac{1}{|Df_c^i(z_0)|} \le K_B. \tag{5} +$$ + +In fact, $K_B$ depends only on the choices of $\hat{c}$ and $\nu$. Hence we have: + +**Corollary 4.1.** For any $c \approx \hat{c}$ and any $z = z_0 \in J(f_c)$, if the orbit of $z$ never lands on $V_0 = D(\nu)$, then the derivative satisfies + +$$ +\left|\frac{dz}{dc}\right| \leq \sum_{n=1}^{\infty} \frac{1}{|Df_c^n(z_0)|} \leq K_B. \quad (6) +$$ + +**Lemma C (Z-cycles Expand Uniformly).** There exists a constant $\Lambda > 1$ such that for any $c \approx \hat{c}$, any $z = z_0 \in J(f_c)$, and for any finite Z-cycle $Z = [N, N')$ of the orbit $z_n = f_c^n(z)$ ($n \in \mathbb{N}_0$), we have + +$$ +|D f_c^{N'-N}(z_N)| \geq \Lambda. \tag{7} +$$ + +This $\Lambda$ also depends only on the choice of $\nu$. Indeed, $\Lambda$ is bounded by a constant compatible with $\nu^{-1}$. +---PAGE_BREAK--- + +**5 Proof of the main theorem assuming Lemmas A, B, and C** + +We will use the derivative formula (Proposition 3.2) and Lemmas A, B, and C to show the inequality. + +For a given $c \approx \hat{c}$ and $z = z_0 \in J(f_c)$, we consider the decomposition $\mathbb{N}_0 = [0, N_1) \sqcup Z_1 \sqcup Z_2 \sqcup \dots$ as in (2). Set $f := f_c$. Then we have + +$$ +\begin{align*} +\left|\frac{dz}{dc}\right| &\le \sum_{n=1}^{\infty} \frac{1}{|Df^n(z_0)|} = \sum_{n=1}^{N_1} \frac{1}{|Df^n(z_0)|} + \sum_{k \ge 1} \sum_{n \in Z_k} \frac{1}{|Df^{n+1}(z_0)|} \\ +&= \sum_{n=1}^{N_1} \frac{1}{|Df^n(z_0)|} + \sum_{k \ge 1, Z_k \ne \emptyset} \sum_{i=1}^{N_{k+1}-N_k} \frac{1}{|Df^{N_k}(z_0)| |Df^i(z_{N_k})|} +\end{align*} +$$ + +By Lemma B, we obviously have $1/|Df^{N_1}(z_0)| \le K_B$. By Lemma C, we have + +$$ +|Df^{N_k}(z_0)| = |Df^{N_k-N_{k-1}}(z_{N_{k-1}})| \cdots |Df^{N_2-N_1}(z_{N_1})| |Df^{N_1}(z_0)| \ge \Lambda^{k-1}/K_B +$$ + +as long as $Z_k \neq \emptyset$. Hence by Lemma A, we have + +$$ +\sum_{n=1}^{\infty} \frac{1}{|Df^n(z_0)|} \le K_B + \sum_{k \ge 1} \frac{K_B}{\Lambda^{k-1}} \cdot \frac{K_A}{\sqrt{|c-\hat{c}|}} = K_B + \frac{K_B \Lambda}{\Lambda-1} \cdot \frac{K_A}{\sqrt{|c-\hat{c}|}} +$$ + +We may assume that $|c - \hat{c}| \le 1$ such that $K_B \le K_B / \sqrt{|c - \hat{c}|}$. Hence by setting $K := K_B + \frac{K_B K_A \Lambda}{\Lambda - 1}$, we have + +$$ +\left|\frac{dz}{dc}\right| \le \frac{K}{\sqrt{|c - \hat{c}|}} \quad \text{for any } c \approx \hat{c}. +$$ + +6 S-cycles + +To show Lemmas A, B, and C, we introduce the notion of “S-cycle”. + +For $c \approx \hat{c}$, set $f := f_c$ and choose any $z = z_0 \in J(f_c)$. The orbit $z_n := f^n(z_0)$ ($n \in \mathbb{N}_0$) may land on $\mathcal{V}$. Unless it lands exactly on the hyperbolic set $\Omega(c)$, it will follow some orbit of $\Omega(c)$ for a while, and be repelled out of $U_l$ eventually. Then it may come back to $\mathcal{V}$, or land on $V_0$. We define such a process as an “S-cycle”, where “S” indicates that orbit stays near the “singularity” of the hyperbolic metric $\gamma$ to be defined in Section 11, or the cycle is relatively “short” compared to Z-cycle. + +**Definition (S-cycle).** A finite *S-cycle* $\mathcal{S} = [M, M')$ of the orbit $z_n = f^n(z_0)$ ($n \in \mathbb{N}_0$) is a finite subset of $\mathbb{N}_0$ with the following properties: + +(S1) $z_M \in V_j \subset \mathcal{V}$ for some $j = 1, 2, \dots, l$. If $M > 0$ then $z_{M-1} \notin \mathcal{V}$. + +(S2) There exists a minimal $m \ge 1$ such that for $n = M + (l-j) + mp$, $z_{n-p} \in U_l$ but $z_n \notin U_l$. + +(S3) $M' = M + (l-j) + mp + L$ for some $L \in [1, \infty)$ such that $z_n \notin V_0 \cup \mathcal{V}$ for $n = M + (l-j) + mp + i$ ($0 \le i < L$) and $z_{M'} \in V_0 \cup \mathcal{V}$. + +Note that in (S1), $z_{M-1}$ may be contained in $V_0$. Note also that in (S2), some of $z_{n-p+1}, \dots, z_{n-1}$ may not be contained in $U_l$. + +An infinite S-cycle $\mathcal{S} = [M, \infty)$ of the orbit $z_n = f^n(z_0)$ ($n \in \mathbb{N}_0$) is an infinite subset of $\mathbb{N}_0$ satisfying either +---PAGE_BREAK--- + +* Type (I): (S1), (S2), and + +(S3)' $z_n \notin V_0 \cup \mathcal{V}$ for all $n \ge M + (l-j) + mp$; + +or + +* Type (II): (S1) and + +(S2)' either $z_M = b_j(c)$ for $j < l$ or $z_M \in \Omega(c)$ for $j = l$. Equivalently, $z_n \in U_l$ for every $n = M + (l-j) + kp$ with $k \in \mathbb{N}$. + +**Decomposition of Z-cycles by S-cycles.** Every Z-cycle $Z = [N, N')$ ($N \le \infty$) of the orbit $z_n = f^n(z_0)$ ($n \in \mathbb{N}_0$) has a unique decomposition by finite or infinite S-cycles. +For a finite Z-cycle $Z = [N, N')$, there exists a finite decomposition + +$$Z = \{N\} \sqcup S_1 \sqcup S_2 \sqcup \dots \sqcup S_{k_0},$$ + +where $S_k := [M_k, M_{k+1})$ is a finite S-cycle for each $k = 1, \dots, k_0$ satisfying $N+1 = M_1$ and +$N' = M_{k_0+1}$. + +For an infinite Z-cycle $Z = [N, \infty)$, there exists either a finite decomposition + +$$Z = \{N\} \sqcup S_1 \sqcup S_2 \sqcup \dots \sqcup S_{k_0},$$ + +where $S_k := [M_k, M_{k+1})$ is finite for $k = 1, \dots, k_0 - 1$ but infinite for $k = k_0$; or an infinite +decomposition + +$$Z = \{N\} \sqcup S_1 \sqcup S_2 \sqcup \dots$$ + +where $S_k := [M_k, M_{k+1})$ is finite for any $k \ge 1$. + +When we have a finite decomposition $Z = \{N\} \sqcup S_1 \sqcup S_2 \sqcup \dots \sqcup S_{k_0}$, we set $S_k := \emptyset$ for $k > k_0$ and we assume that any Z-cycle formally has an infinite decomposition of the form $Z = \{N\} \sqcup S_1 \sqcup S_2 \sqcup \dots$. We call this *the S-cycle decomposition* of Z. + +**The three lemmas for S-cycles.** Now we present three lemmas for S-cycles, that are parallel to Lemmas A, B, and C for Z-cycles: + +**Lemma A'.** There exists a constant $\kappa_A > 0$ such that for any $c \approx \hat{c}$, any $z = z_0 \in J(f_c)$, +and for any S-cycle $S = [M, M')$ of the orbit $z_n = f_c^n(z)$ ($n \in \mathbb{N}_0$), we have + +$$\sum_{i=1}^{M'-M} \frac{1}{|Df_c^i(z_M)|} \leq \kappa_A, \quad (8)$$ + +where we set $M' - M := \infty$ if $M' = \infty$. + +**Lemma B'.** There exists a constant $\kappa_B > 0$ such that for any $c \approx \hat{c}$ and any $M \le \infty$, if +$z = z_0 \in J(f_c)$ satisfies $z_n \notin V_0 \cup \mathcal{V}$ for $n \in [0, M)$, then + +$$\sum_{i=1}^{M} \frac{1}{|Df_c^i(z_0)|} \leq \kappa_B. \quad (9)$$ +---PAGE_BREAK--- + +**Lemma C' (S-cycles Expand Uniformly).** By choosing a sufficiently small $\nu$, there exists a constant $\lambda > 1$ such that for any $c \approx \hat{c}$, any $z = z_0 \in J(f_c)$, and for any finite S-cycle $S = [M, M')$ of the orbit $z_n = f_c^n(z)$ ($n \in \mathbb{N}_0$), we have + +$$|D f_c^{M'-M}(z_M)| \geq \lambda. \quad (10)$$ + +The proofs of these lemmas will be given later. + +# 7 Some lemmas concerning the parameter ray condition + +This section is devoted for some lemmas related to the condition that $c$ is always on the parameter ray $\mathcal{R}_M(\theta)$ landing at $\hat{c}$ (the “parameter ray condition”). + +**Dynamic rays for Cantor Julia sets.** (See [CG, VIII, 3], [M, Appendix A].) For any parameter $c \in \mathbb{C}$, the *Böttcher coordinate* at infinity is a unique conformal map $\Phi_c$ defined near $\infty$ such that $\Phi_c(f_c(z)) = \Phi_c(z)^2$ and $\Phi_c(z)/z \to 1$ as $z \to \infty$. Let $K(f_c)$ be the set of $z$ whose orbit is never captured in the domain of $\Phi_c$. Then the boundary of $K(f_c)$ coincides with the Julia set $J(f_c)$. + +When $c \in \mathbb{M}$, the set $K(f_c)$ is connected and the Böttcher coordinate extends to a conformal isomorphism $\Phi_c : \mathbb{C} - K(f_c) \to \mathbb{C} - \bar{\mathbb{D}}$. The *dynamic ray* of angle $t \in T = \mathbb{R}/\mathbb{Z}$ is the analytic curve + +$$\mathcal{R}_c(t) := \{\Phi_c^{-1}(re^{2\pi it}) : r > 1\}.$$ + +We say that $\mathcal{R}_c(t)$ lands at $z \in K(f_c)$ if $\Phi_c^{-1}(re^{2\pi it})$ tends to $z$ as $r \searrow 1$. + +When $c \notin \mathbb{M}$, the set $K(f_c)$ coincides with $J(f_c)$ which is a Cantor set. There exists a minimal $r_c > 1$ such that the inverse $\Phi_c^{-1}$ extends to a conformal embedding of $\mathbb{C} - \bar{\mathbb{D}}(r_c)$ into $\mathbb{C}$ whose image contains the critical value $c = f_c(0)$. (The Douady-Hubbard uniformization $\Phi_M : \mathbb{C} - \mathbb{M} \to \mathbb{C} - \bar{\mathbb{D}}$ is given by setting $\Phi_M(c) := \Phi_c(c)$.) The dynamic ray of angle $t \in T$ is partially defined in $\Phi_c^{-1}(\mathbb{C} - \bar{\mathbb{D}}(r_c))$, and it extends to an analytic curve $\mathcal{R}_c(t)$ landing at a point in $K(f_c)$ unless $2^n t = t_c$ for some $n \ge 1$, where $t_c := (2\pi)^{-1} \arg \Phi_c(c)$. + +**Our setting and notation.** Let us go back to our setting with semi-hyperbolic $\hat{c} \in \partial \mathbb{M}$ where $\mathcal{R}_M(\theta)$ lands. We will use the following facts and notations: + +• There is no interior point in $K(\hat{f}_c)$ and thus $K(\hat{f}_c) = J(\hat{f}_c)$. Moreover, $J(\hat{f}_c)$ is connected and locally connected ([CJY]). By Carathéodory's theorem, $\Phi_{\hat{c}}^{-1}$ extends continuously to $\mathbb{C} - \bar{\mathbb{D}}$ and the dynamic ray $\mathcal{R}_{\hat{c}}(t)$ of any angle $t$ lands. + +• The angle $\theta$ is not recurrent under the angle doubling $t \mapsto 2t$ ([D2, Thm.2]). Set + +$$\Theta := \{2^{n+l-1}\theta \in T : n \ge 0\}$$ + +and let $\hat{\Theta}$ denote its closure in $T$, where $l$ is the minimal $l$ with $f_{\hat{c}}^{l-1}(\hat{c}) \in \Omega(\hat{c})$. For $t \in \hat{\Theta}$ the dynamic ray $\mathcal{R}_{\hat{c}}(t)$ lands on a point in the hyperbolic set $\Omega(\hat{c})$. (See Step 1 of Proposition S below.) In particular, $\mathcal{R}_{\hat{c}}(2^{n+l-1}\theta)$ lands on $\hat{b}_{n+l} \in \Omega(\hat{c})$ for each $n \ge 0$. + +• Let us fix an $r_0 > 1$ and consider the compact set + +$$E_0 := \{re^{2\pi it} : t \in \hat{\Theta}, r \in [r_0^{1/2p}, r_0]\} \subset \mathbb{C} - \bar{\mathbb{D}}.$$ + +By choosing $r_0$ close enough to 1, the set $E(\hat{c}) := \Phi_{\hat{c}}^{-1}(E_0)$ is contained in $U_l$. +---PAGE_BREAK--- + +• The parameter ray condition $c \in \mathcal{R}_M(\theta)$ is equivalent to $c \in \mathcal{R}_c(\theta)$, or to $2\pi t_c = \arg \Phi_c(c) = 2\pi\theta$. Non-recurrence of $\theta$ assures that the dynamic rays $\mathcal{R}_c(t)$ with $t \in \hat{\Theta}$ are always defined and land on the Julia set. + +• Since the Böttcher coordinate $\Phi_c(z)$ is holomorphic in both $c$ and $z$ as long as it is defined, $E(c) := \Phi_c^{-1}(E_0)$ is well-defined for each $c \approx \hat{c}$ and also contained in $U_l$. More precisely, we choose the disk $\Delta$ in Proposition 2.2 small enough and assume that both $E(c)$ and $\Omega(c)$ moves holomorphically in $U_l$ for any $c \in \Delta$. + +Let us check the following proposition, that is interesting in its own right: + +**Proposition S (Stability of Landing Rays).** For any $c \in \Delta$ (without assuming the parameter ray condition) and any $t \in \hat{\Theta}$, the dynamic ray $\mathcal{R}_c(t)$ lands on a point in the hyperbolic set $\Omega(c)$ and $\mathcal{R}_c(t) \cap U_l$ has uniformly bounded length. In particular, $\mathcal{R}_c(2^{n+l-1}\theta)$ lands on $b_{n+l}(c) \in \Omega(c)$ for each $n \ge 0$. Moreover, the set + +$$ \hat{\mathcal{R}}(c) := \bigcup_{t \in \hat{\Theta}} \mathcal{R}_c(t) \subset \bar{\mathcal{C}} $$ + +moves continuously in the Hausdorff topology on the Riemann sphere as $c \to \hat{c}$. + +**Proof.** The proof breaks into three steps. + +**Step 1.** We first consider the case of $c = \hat{c}$. We claim: *For any angle $t \in \hat{\Theta}$, the dynamic ray $\mathcal{R}_{\hat{c}}(t)$ lands on $\Omega(\hat{c})$ and $\mathcal{R}_{\hat{c}}(t) \cap U_l$ has uniformly bounded length.* + +Let $x = x(t)$ denote the landing point of $\mathcal{R}_{\hat{c}}(t)$. By the Carathéodory theorem, $x(t)$ depends continuously on the angle $t$. Since $x(2^{l-1}\theta) = \hat{b}_l \in \Omega(\hat{c})$ and any angle $t \in \hat{\Theta}$ is an accumulation point of the orbit of $2^{l-1}\theta$ by the angle doubling, we obtain $x(t) \in \Omega(\hat{c})$. (Note that $\Omega(\hat{c})$ is forward invariant and compact.) + +Let us set $\mathcal{R} := \mathcal{R}_{\hat{c}}(t)$ and + +$$ \mathcal{R}(n) := \{ z \in \mathcal{R} : |\Phi_{\hat{c}}(z)|^{2np} \in [r_0^{1/2^p}, r_0] \} $$ + +for $n \ge 0$ such that $f_{\hat{c}}^{np}(\mathcal{R}(n)) = f_{\hat{c}}^{np}(\mathcal{R}) \cap E(\hat{c})$ and the union + +$$ \mathcal{R}(0) \cup \mathcal{R}(1) \cup \mathcal{R}(2) \cup \dots $$ + +coincides with the bounded arc $\mathcal{R} - \Phi_{\hat{c}}^{-1}(\{w \in \mathbb{C} : |w| > r_0\})$. Note that the arc $f_{\hat{c}}^{np}(\mathcal{R}(n)) \subset E(\hat{c}) \subset U_l$ has uniformly bounded length. By the Koebe distortion theorem and the condition $|Df_{\hat{c}}^p(z)| \ge \mu$ in $U_l$, we have + +$$ \text{length}(\mathcal{R}(n)) = O(\mu^{-n}), $$ + +where the implicit constant is independent of the angle $t$. Hence the dynamic ray $\mathcal{R}$ has uniformly bounded length in $U_l$. + +**Step 2.** Next we claim: *For any $c \approx \hat{c}$ and angle $t \in \hat{\Theta}$, the dynamic ray $\mathcal{R}_c(t)$ lands on $\chi_c(x(t)) \in \Omega(c)$ and $\mathcal{R}_c(t) \cap U_l$ has uniformly bounded length.* + +Set $\mathcal{R}' := \mathcal{R}_c(t)$ and + +$$ \mathcal{R}'(n) := \left\{ z \in \mathcal{R}' : |\Phi_c(z)|^{2np} \in [r_0^{1/2^p}, r_0] \right\} $$ +---PAGE_BREAK--- + +such that $f_c^{np}(\mathcal{R}'(n)) = f_c^{np}(\mathcal{R}') \cap E(c)$. We also set $x' := \chi_c(x)$ where $x = x(t)$ is the landing point of $\mathcal{R} = \mathcal{R}_{\hat{c}}(t)$ in $\Omega(\hat{c})$. Since $\Omega(c)$ and $E(c)$ move holomorphically in $U_l$ with respect to $c \approx \hat{c}$, we may assume that the disk $D := \mathbb{D}(f_{\hat{c}}^{np}(x), R_l)$ contains the point $f_c^{np}(x') = \chi_c(f_{\hat{c}}^{np}(x))$ and the arcs $f_{\hat{c}}^{np}(\mathcal{R}(n))$ and $f_c^{np}(\mathcal{R}'(n))$. Since there exists a univalent branch $g_c$ of $f_c^{-np}$ defined on $D$ such that it sends $f_c^{np}(x')$ to $x'$ and $f_c^{np}(\mathcal{R}'(n))$ to $\mathcal{R}'(n)$, and since $|Df_c^p(z)| \ge \mu$ in $U_l$, we have + +$$ \mathrm{dist}(x', \mathcal{R}'(n)) = O(\mu^{-n}). $$ + +It follows that $\mathcal{R}' = \mathcal{R}_c(t)$ lands at $x' = \chi_c(x)$ and $\mathcal{R}' \cap U_l$ has uniformly bounded length independent of $c \approx \hat{c}$ and $t \in \hat{\Theta}$. + +**Step 3.** Finally we show the continuity of the set $\hat{\mathcal{R}}(c)$. It is enough to show: For any $c \approx \hat{c}$ there exists a homeomorphism $\phi_c : \hat{\mathcal{R}}(\hat{c}) \to \hat{\mathcal{R}}(c)$ such that $\phi_c \to \mathrm{id}$ uniformly as $c \to \hat{c}$ in the spherical metric. + +By Step 2, the homeomorphism $\phi_c$ is naturally defined by $\phi_c(\infty) = \infty$, $\phi_c := \chi_c$ on $\Omega(\hat{c})$, and $\phi_c := \Phi_c^{-1} \circ \Phi_{\hat{c}}$ on each ray $\mathcal{R}_{\hat{c}}(t)$ with $t \in \hat{\Theta}$. + +Now suppose that there exists an $\epsilon > 0$ such that for any $k \in \mathbb{N}$, we can find a pair of $c_k$ and $z_k$ such that $|c_k - \hat{c}| \le 1/k$, $z_k \in \hat{\mathcal{R}}(\hat{c})$, and the spherical distance between $\phi_{c_k}(z_k)$ and $z_k$ exceeds $\epsilon$. By taking a subsequence, we may assume that $z_k$ has a limit $\zeta = \lim_{k\to\infty} z_k$ in $\hat{\mathcal{R}}(\hat{c})$. + +Since the map $\Phi_c^{-1}(w)$ is continuous in both $c$ and $w$, the map $\phi_c$ converges to identity as $c \to \hat{c}$ locally uniformly near each point of $\hat{\mathcal{R}}(\hat{c}) - \Omega(\hat{c}) \cup \{\infty\}$. The convergence of $\phi_c$ near $\infty$ is uniform as well in the spherical metric because $\Phi_c$ is tangent to identity near $\infty$. Hence the limit $\zeta$ above belongs to $\Omega(\hat{c})$. + +Let $W(n)$ denote the bounded subset of $\hat{\mathcal{R}}(\hat{c})$ given by + +$$ W(n) := \Omega(\hat{c}) \cup \bigcup_{t \in \hat{\Theta}} \left\{ \Phi_{\hat{c}}^{-1}(r e^{2\pi i t}) : r \le r_0^{1/2^{np}} \right\}. $$ + +For any $n$, there exists a $k_n \in \mathbb{N}$ such that $z_k \in W(n)$ for any $k \ge k_n$. Now we define a point $x_k$ in $\Omega(\hat{c})$ as follow: let $x_k := z_k$ if $z_k \in \Omega(\hat{c})$. Otherwise $z_k$ belongs to a dynamic ray $\mathcal{R}_{\hat{c}}(t_k)$ for some $t_k \in \hat{\Theta}$, and we let $x_k = x(t_k)$ be its landing point. Then we obtain + +$$ |\phi_{c_k}(z_k) - z_k| \le |\phi_{c_k}(z_k) - \phi_{c_k}(x_k)| + |\phi_{c_k}(x_k) - x_k| + |x_k - z_k|, $$ + +where both $|\phi_{c_k}(z_k) - \phi_{c_k}(x_k)|$ and $|x_k - z_k|$ are $O(\mu^{-n})$ by Steps 1 and 2, and $|\phi_{c_k}(x_k) - x_k| = |\chi_{c_k}(x_k) - x_k| = O(|c_k - \hat{c}|) = O(1/k)$. (See [BR, Corollary 2].) Hence $|\phi_{c_k}(z_k) - z_k|$ is bounded by $\epsilon/2$ by taking sufficiently large $n$ and $k$. This is a contradiction. ■ + +The next lemma will be used in the proof of Lemma A: + +**Lemma T.** Let $\hat{c} \in \partial\mathbb{M}$ be a semi-hyperbolic parameter. There exists a positive constant $C_T = C_T(\hat{c})$ such that $\mathrm{dist}(0, J(f_c)) \ge C_T\sqrt{|c-\hat{c}|}$ for any $c \approx \hat{c}$ on the parameter ray $\mathcal{R}_M(\theta)$ that lands at $\hat{c}$. + +**Proof.** Since $f_c(z) - f_c(0) = (z - 0)^2$, it is equivalent to show + +$$ \mathrm{dist}(c, J(f_c)) \ge C_T' |c - \hat{c}| $$ + +for some constant $C_T' = C_T^2 > 0$ independent of $c \approx \hat{c}$ with $c \in \mathcal{R}_M(\theta)$. +---PAGE_BREAK--- + +Set $a(c) := f_c^l(0)$ and $b(c) := b_l(c)$ for $c \approx \hat{c}$. Since $f_c^{l-1}$ is univalent near $c$, we have + +$$\mathrm{dist}(c, J(f_c)) \asymp \mathrm{dist}(a(c), J(f_c))$$ + +by the Koebe distortion theorem. By a result of Rivera-Letelier [RL, Appendix 2] and van Strien [vS, Theorem.1.1] (see also Douady and Hubbard [DH2, p.333, Lemma 1] for Misiurewicz case), there exists a constant $B_0 \neq 0$ such that + +$$a(c) - b(c) = B_0(c - \hat{c}) + O((c - \hat{c})^2)$$ + +for $c \approx \hat{c}$. Hence it is enough to show that there exists a constant $C_T'' > 0$ such that + +$$\mathrm{dist}(a(c), J(f_c)) \geq C_T'' |a(c) - b(c)| \quad (11)$$ + +for $c \approx \hat{c}$ with $c \in \mathcal{R}_M(\theta)$. + +For each $z \in E(c) = \Phi_c^{-1}(E_0)$ defined in the proof of Proposition S, there exists an angle $t \in \hat{\Theta}$ such that $\arg \Phi_c(z) = 2\pi t$. By Proposition S, the external ray $R_c(t)$ lands on a point $L_c(z)$ in $\Omega(c)$. Now we define a constant $\Gamma(c)$ for each $c \approx \hat{c}$ by + +$$\Gamma(c) := \inf \left\{ \frac{\mathrm{dist}(z, J(f_c))}{|z - L_c(z)|} \in (0, 1] : z \in E(c) \right\}$$ + +and claim that its infimum + +$$\Gamma := \inf \{\Gamma(c) : c \in \Delta\}$$ + +is a positive constant if we choose sufficiently small disk $\Delta$ centered at $\hat{c}$. Indeed, if there exists a sequence $c_k \to \hat{c}$ in $\Delta$ such that $\Gamma(c_k) \to 0$, then we have $\mathrm{dist}(z_k, J(f_{c_k})) \to 0$ for some $z_k \in E(c_k)$. (Note that $|z - L_c(z)|$ is always bounded because $E(c)$ and $J(c)$ are uniformly bounded for $c \in \Delta$.) However, it is impossible because $E(c)$ and $J(f_c)$ move continuously at $c = \hat{c}$ and $E(\hat{c})$ has a definite distance from $J(f_{\hat{c}})$. Hence we obtain + +$$\mathrm{dist}(z, J(f_c)) \geq \Gamma |z - L_c(z)|$$ + +for each $z \in E(c)$ and $c \in \Delta$. + +Suppose that $c \in R_M(\theta) \cap \Delta$ and $f_c^{np}(a(c)) \in E(c)$ for some $n \in \mathbb{N}_0$. Since $L_c(f_c^{np}(a(c))) = f_c^{np}(b(c))$, we have + +$$\mathrm{dist}(f_c^{np}(a(c)), J(f_c)) \geq \Gamma |f_c^{np}(a(c)) - f_c^{np}(b(c))|.$$ + +By Proposition S, if we choose sufficiently small $r_0$, then the length of the arc in the dynamic ray joining any $z \in E(c)$ and $L_c(z) \in \Omega(c)$ is uniformly and arbitrarily small. Thus there exists a univalent branch of $f_c^{-np}$ on the disk $\mathbb{D}(f_c^{np}(b(c)), 2R_l)$ that sends both $f_c^{np}(a(c))$ and $f_c^{np}(b(c))$ to $a(c)$ and $b(c)$ respectively. By the Koebe distortion theorem, we have (11). ■ + +*Remark 7.1.* This proof is based on the argument to show that the basin at infinity of $f_{\hat{c}}$ is a John domain. See [CJY, §3] and [CG, p.118]. + +The next lemma will be used in the proof of Lemma C: + +**Lemma U.** There exists a constant $C_U > 0$ with the following property: for any $c \approx \hat{c}$ with $c \in R_M(\theta)$ and any $z_0 \in V_0 \cap J(f_c)$ such that $z_{n-p} \in U_l$ and $z_n \notin U_l$, we have $|Df_c^n(z_0)| \ge C_U/|z_0|$. + + +---PAGE_BREAK--- + +**Proof.** By Lemma T (and its proof), we have $|z_0| \ge \text{dist}(0, J(f_c)) \ge C_T \sqrt{|c-\hat{c}|}$ and +$|b_0(c)| \asymp \sqrt{|b_l(c) - f_c^{l-1}(c)|} \asymp \sqrt{|c-\hat{c}|}$. Hence we have $|z_0| \ge C_1 |b_0(c)|$ for some constant +$C_1 > 0$ and it follows that + +$$ +|z_1 - b_1(c)| = |z_0^2 - b_0(c)^2| \le C_2 |z_0|^2 +$$ + +where $C_2 := 1 + C_1^2$. + +Now $z_n \notin U_l$ means that $|z_n - \hat{b}_n| \ge \text{dist}(z_n, \Omega(\hat{c})) \ge R_l$. Since $z_{n-p} \in U_l$, $z_n$ is still close to $\Omega(\hat{c})$ and by taking a smaller $R_l$ if necessary, we may assume that there exists an $R > R_l$ independent of $c \approx \hat{c}$ and $z_0 \in V_0 \cap J(f_c)$ such that $z_n \in D(\hat{b}_n, R)$. Since we may assume that $|\hat{b}_n - b_n(c)| = |\hat{b}_n - \chi_c(\hat{b}_n)| \le R_l/2$ for any $c \approx \hat{c}$, we have + +$$ +|z_n - b_n(c)| \geq |z_n - \hat{b}_n| - |\hat{b}_n - b_n(c)| \geq R_l/2. +$$ + +Let $G$ be a univalent branch of $f_c^{-(n-1)}$ defined on $D(\hat{b}_n, 2R)$ (by taking smaller $R$ and $R_l$ if necessary) that maps $b_n(c)$ to $b_1(c)$ and $z_n$ to $z_1$. By the Koebe distortion theorem, we have + +$$ +|DG(z_n)| \asymp |DG(b_n(c))| +$$ + +and + +$$ +|z_1 - b_1(c)| = |G(z_n) - G(b_n(c))| \asymp |DG(b_n(c))| |z_n - b_n(c)|. +$$ + +Since $|z_1 - b_1(c)| \le C_2 |z_0|^2$ and $|z_n - b_n(c)| \ge R_l/2$, we have $|Df_c^{n-1}(z_1)| = |DG(z_n)|^{-1} \ge C_3/|z_0|^2$, where $C_3$ is a constant independent of $c \approx \hat{c}$. Hence we have + +$$ +|Df_c^n(z_0)| = |Df_c^{n-1}(z_1)| |Df_c(z_0)| \geq \frac{C_3}{|z_0|^2} \cdot (2|z_0|) = \frac{2C_3}{|z_0|}. +$$ + +Set $C_U := 2C_3$. $\blacksquare$ + +**Geometry of the parameter ray.** The following lemma will be used in the proof of +Theorem 1.2: + +**Lemma V.** Let $\hat{c} \in \partial\mathbb{M}$ be a semi-hyperbolic parameter and $\mathcal{R}_{\mathbb{M}}(\theta)$ a parameter ray landing on $\hat{c}$. Then the sequence $\{c_n\}_{n \ge 0}$ in $\mathcal{R}_{\mathbb{M}}(\theta)$ defined by + +$$ +c_n := \Phi_M^{-1} \left( r_0^{1/2^{np}} e^{2\pi i \theta} \right) +$$ + +satisfies the following properties: + +(1) $|c_{n+k} - \hat{c}| = O(\mu^{-k})|c_n - \hat{c}|$ for any $n$ and $k \ge 0$. + +(2) Let $\mathcal{R}_{\mathbb{M}}(n)$ be the subarc of $\mathcal{R}_{\mathbb{M}}(\theta)$ bounded by $c_n$ and $c_{n+1}$. Then + +$$ +|c_{n+1} - c_n| \asymp \text{length}(\mathcal{R}_{\mathbb{M}}(n)) = O(\mu^{-n}). +$$ + +In particular, $\mathcal{R}_{\mathbb{M}}(\theta)$ has finite length in a neighborhood of $\hat{c}$. +---PAGE_BREAK--- + +**Proof.** By a result by Rivera-Letelier [RL], there exists a constant $\hat{\lambda} \neq 0$ such that $\Psi := \Phi_M^{-1} \circ \Phi_{\hat{c}} : \mathbb{C} - J(f_{\hat{c}}) \to \mathbb{C} - \mathbb{M}$ is of the form + +$$ \Psi(z) = \hat{c} + \hat{\lambda}(z - \hat{c}) + O(|z - \hat{c}|^{3/2}) $$ + +when $z \in \mathbb{C} - J(f_{\hat{c}})$ and $z \approx \hat{c}$. In particular, $\Psi$ maps the dynamic ray $\mathcal{R}_{\hat{c}}(\theta)$ to the parameter ray $\mathcal{R}_{\mathbb{M}}(\theta)$ conformally near the landing point $\hat{c}$. Hence it is enough to check that *the points* + +$$ z_n := \Psi^{-1}(c_n) = \Phi_{\hat{c}}^{-1} \left( r_0^{1/2^{np}} e^{2\pi i \theta} \right) $$ + +satisfies + +(1') $|z_{n+k} - \hat{c}| = O(\mu^{-k})|z_n - \hat{c}|$ for $k \ge 0$; and + +(2') the length of the subarc of $\mathcal{R}_{\hat{c}}(\theta)$ bounded by $z_n$ and $z_{n+1}$ is compatible with $|z_{n+1} - z_n|$ and is $O(\mu^{-n})$ + +for sufficiently large $n$. + +For each $t \in \hat{\Theta}$ and $n \ge 0$, set $z_n(t) := \Phi_{\hat{c}}^{-1}(r_0^{1/2^{np}} e^{2\pi i t})$ such that the sequence $\{z_n(t)\}_{n \ge 0}$ converges along the external ray $\mathcal{R}_{\hat{c}}(t)$ to the landing point $x(t)$. Note that $z_0(t)$ and $z_1(t)$ bound the arc $\mathcal{R}_{\hat{c}}(t) \cap E(\hat{c})$. Since $E(\hat{c})$ and $\hat{\Theta}$ are compact, we have + +(a) $|z_0(t) - x(t)| \asymp 1$; and + +(b) $|z_0(t) - z_1(t)| \asymp \text{length}(\mathcal{R}_{\hat{c}}(t) \cap E(\hat{c}))$, + +where the implicit constants are independent of $t \in \hat{\Theta}$. + +Now suppose that $n$ is large enough such that $np \ge l-1$ and thus $t_n := 2^{np}\theta \in \hat{\Theta}$. Then we can find a univalent branch of $f_{\hat{c}}^{-np}$ defined on a disk centered at $x(t_n)(= \hat{b}_{np+1})$ with a definite radius independent of $n$ that maps $z_0(t_n)$, $z_k(t_n)$ and $x(t_n)$ univalently to $z_n, z_{n+k}$ and $\hat{c}$ respectively. By the Koebe distortion theorem and (a) we have + +$$ \frac{|z_{n+k} - \hat{c}|}{|z_n - \hat{c}|} \asymp \frac{|z_k(t_n) - x(t_n)|}{|z_0(t_n) - x(t_n)|} \asymp |z_k(t_n) - x(t_n)|. $$ + +We can find a univalent inverse branch $G_k$ of $f_{\hat{c}}^{kp}$ defined on a disk centered at $x(t_{n+k}) (= \hat{b}_{(n+k)p+1})$ with a definite radius independent of $n$ and $k$ that maps $z_0(t_{n+k})$ and $x(t_{n+k})$ univalently to $z_k(t_n)$ and $x(t_n)$. Hence by Koebe again we have + +$$ |z_k(t_n) - x(t_n)| \asymp |DG_k(x(t_{n+k}))| |z_0(t_{n+k}) - x(t_{n+k})| = O(\mu^{-k}). $$ + +It follows that $|z_{n+k} - \hat{c}| = O(\mu^{-k})|z_n - \hat{c}|$ and we obtain (1'). + +By (b) and the same argument as above, the length of the subarc of $\mathcal{R}_{\hat{c}}(\theta)$ bounded by $z_n$ and $z_{n+1}$ is uniformly compatible with $|z_{n+1} - z_n|$ for any $n \ge 0$. As a corollary of Step 1 of Proposition S, we conclude that the length is $O(\mu^{-n})$. Thus we obtain (2'). $\blacksquare$ + +*Remark 7.2.* Since there exist at most finitely many dynamic rays of the Julia set $J(f_{\hat{c}})$ landing at $\hat{c}$ (see Thurston [Th, Theorem II.5.2] or Kiwi [K1, Theorem 1.1]), the asymptotic similarity between $J(f_{\hat{c}})$ and $\mathbb{M}$ at $\hat{c}$ by Rivera-Letelier [RL] implies that $\mathbb{M}$ has the same finite number of parameter rays landing at $\hat{c}$. (cf. [CG, VIII, 6]. See also [Mc, Chapter 6].) +---PAGE_BREAK--- + +# 8 Proofs of Theorems 1.2 and 1.3 + +**Proof of Theorem 1.2.** We combine the Main Theorem and Lemma V. It is enough to show the existence of the improper integral + +$$z(c(r_0)) + \lim_{\delta \to 0+} \int_{r_0}^{1+\delta} \frac{dz(c)}{dc} \frac{dc(r)}{dr} dr = z(c(r_0)) + \sum_{n \ge 0} \int_{\mathbb{R}_{\mathbb{M}}(n)} \frac{dz(c)}{dc} dc,$$ + +where $r_0 > 1$ is a constant given in the definition of the set $E_0$ in the previous section, and $\mathbb{R}_{\mathbb{M}}(n)$ is the subarc of $\mathbb{R}_{\mathbb{M}}(\theta)$ bounded by $c_n$ and $c_{n+1}$ defined in Lemma V. Note that by Lemma V, we obtain + +$$\text{length}\mathbb{R}_{\mathbb{M}}(n) \asymp |c_{n+1} - c_n| \le |c_{n+1} - \hat{c}| + |c_n - \hat{c}| = O(|c_n - \hat{c}|)$$ + +and + +$$|c_n - \hat{c}| \le \sum_{m \ge n} \text{length}\mathbb{R}_{\mathbb{M}}(m) = O(\mu^{-n}).$$ + +Note also that + +$$|c_n - \hat{c}| \asymp |c - \hat{c}| \quad (12)$$ + +for any $c \in \mathbb{R}_{\mathbb{M}}(n)$, where the implicit constant is independent of $n$ by the Koebe distortion theorem, applied in the same way as the proof of Lemma V. + +By the Main Theorem we obtain + +$$ +\begin{align*} +\sum_{n \ge 0} \int_{\mathbb{R}_{\mathbb{M}}(n)} \left|\frac{dz(c)}{dc}\right| |dc| &\le \sum_{n \ge 0} \int_{\mathbb{R}_{\mathbb{M}}(n)} \frac{K}{\sqrt{|c-\hat{c}|}} |dc| \\ +&\asymp \sum_{n \ge 0} \frac{K}{\sqrt{|c_n-\hat{c}|}} \text{length}\mathbb{R}_{\mathbb{M}}(n) \\ +&= \sum_{n \ge 0} O\left(\frac{1}{\sqrt{|c_n-\hat{c}|}} |c_n-\hat{c}|\right) \\ +&= \sum_{n \ge 0} O(\mu^{-n/2}) < \infty. +\end{align*} +$$ + +Hence the improper integral above converges absolutely to some $z(\hat{c})$. + +To show the one-sided Hölder continuity, it is enough to check $|z(c_n)-z(\hat{c})| = O(\sqrt{|c_n-\hat{c}|})$ for each $c_n$ by (12). The same argument as above yields + +$$|z(c_n) - z(\hat{c})| \le \sum_{k \ge 0} \int_{\mathbb{R}_{\mathbb{M}}(n+k)} \left|\frac{dz(c)}{dc}\right| |dc| \le \sum_{k \ge 0} O(\sqrt{|c_{n+k}-\hat{c}|}).$$ + +By (1) of Lemma V, we have $|c_{n+k}-\hat{c}| = O(\mu^{-k})|c_n-\hat{c}|$ for each $k \ge 0$ and thus $|z(c_n)-z(\hat{c})| = \sum_{k \ge 0} O(\mu^{-k/2})\sqrt{|c_n-\hat{c}|} = O(\sqrt{|c_n-\hat{c}|})$. + +Since it is clear that $z(\hat{c})$ is confined in a bounded region, to show $z(\hat{c}) \in J(f_{\hat{c}})$, we only need to show $\lim_{c \to \hat{c}} (z(c))^2 + c = (\lim_{c \to \hat{c}} z(c))^2 + \lim_{c \to \hat{c}} c$, but this follows from the continuity of the quadratic map. ■ +---PAGE_BREAK--- + +**Proof of Theorem 1.3** For each $z_0 \in J(f_{c_0})$ and its motion $z(c) = h_c(z_0) = H(c, z_0)$ along the parameter ray $\mathcal{R}_M(\theta)$, we define $h_{\hat{c}}(z_0)$ by the limit $z(\hat{c})$ given in Theorem 1.2. Since $h_c$ is continuous and the convergence of $h_c$ to $h_{\hat{c}}$ as $c \to \hat{c}$ along the parameter ray $\mathcal{R}_M(\theta)$ is uniform, $h_{\hat{c}}$ is continuous as well. Hence $f_{\hat{c}} \circ h_{\hat{c}} = h_{\hat{c}} \circ f_{c_0}$ is obvious and it is enough to show the surjectivity of $h_{\hat{c}} : J(f_{c_0}) \to J(f_{\hat{c}})$. First we take any repelling periodic point $x \in J(f_{\hat{c}})$. Since there is a holomorphic family $x(c)$ of repelling periodic points for $c$ sufficiently close to $\hat{c}$ such that $x = x(\hat{c})$, we have some $z_0 \in J(f_{c_0})$ with $h_c(z_0) = x(c)$ for any $c \approx \hat{c}$ with $c \in \mathcal{R}_M(\theta)$. In particular, we have $h_{\hat{c}}(z_0) = x$. Next we take any $w \in J(f_{\hat{c}})$ and a sequence of repelling periodic points $x_n$ of $f_{\hat{c}}$ that converges to $w$ as $n \to \infty$. (Such a sequence exists since repelling periodic points are dense in the Julia set.) Let $z_n \in J(f_{c_0})$ be the repelling periodic point with $h_{\hat{c}}(z_n) = x_n$. Then any accumulation point $y$ of the sequence $z_n$ satisfies $h_{\hat{c}}(y) = w$ by continuity. ■ + +# 9 Proof of Lemma A assuming Lemmas A' and C' + +Without loss of generality we may assume that $N = 0$, i.e., $z = z_0 \in V_0 \cap J(f_c)$. We set $f := f_c$. Now consider the S-cycle decomposition $\mathbb{Z} = \{0\} \cup S_1 \cup S_2 \cup \dots$ of $\mathbb{Z} = [0, N')$ where $S_k = [M_k, M_{k+1})$ if $S_k \neq \emptyset$, and $M_1 = 1$. Then we have + +$$ +\begin{aligned} +\sum_{i=1}^{N'} \frac{1}{|Df^i(z)|} &= \frac{1}{|Df(z)|} + \sum_{k \ge 1} \sum_{n \in S_k} \frac{1}{|Df^{n+1}(z)|} \\ +&= \frac{1}{2|z|} + \sum_{k \ge 1, S_k \ne \emptyset} \sum_{i=1}^{M_{k+1}-M_k} \frac{1}{|Df^i(z_{M_k})| |Df^{M_k}(z)|} \\ +&\le \frac{1}{2|z|} + \sum_{k \ge 1, S_k \ne \emptyset} \frac{\kappa_A}{|Df^{M_k}(z)|} +\end{aligned} +$$ + +by Lemma A'. If $S_k \neq \emptyset$, then by Lemma C', + + + +$$ |Df^{M_k}(z)| = |Df^{M_k - M_{k-1}}(z_{M_{k-1}})| \cdots |Df^{M_2 - M_1}(z_{M_1})| |Df(z)| \geq \lambda^{k-1} \cdot 2|z|, $$ + +where $M_1 = 1$. Hence we have $|Df^{M_k}(z)|^{-1} \le 1/(\lambda^{k-1} \cdot 2|z|)$ for any $k$. Moreover, by Lemma T, we have $\text{dist}(0, J(f_c)) \ge C_T \sqrt{|c-\hat{c}|}$ for $c \approx \hat{c}$ on the parameter ray, and thus + +$$ +\begin{aligned} +\sum_{i=1}^{N'} \frac{1}{|Df^i(z)|} &\le \frac{1}{2|z|} + \sum_{k=1}^{\infty} \frac{\kappa_A}{\lambda^{k-1} (2|z|)} \\ +&\le \frac{1}{2 \cdot \text{dist}(0, J(f_c))} \left\{ 1 + \sum_{k=1}^{\infty} \frac{\kappa_A}{\lambda^{k-1}} \right\} \\ +&\le \frac{1}{2C_T \sqrt{|c-\hat{c}|}} \left\{ 1 + \kappa_A \frac{\lambda}{\lambda-1} \right\}. +\end{aligned} +$$ + +Hence by setting $K_A := (2C_T)^{-1}\{1 + \kappa_A\lambda/(\lambda - 1)\}$, we have the claim. ■ + +# 10 Proof of Lemma B assuming Lemmas A', B' and C' + +Just like the S-cycle decompositions of Z-cycles, we have a finite or infinite decomposition of the form + +$$ [0, N) = [0, M_1) \cup S_1 \cup S_2 \cup \dots $$ +---PAGE_BREAK--- + +where we have the following three cases: + +1. $N = M_1 \le \infty$ and $z_n \notin V_0 \cup \mathcal{V}$ for any $0 \le n < M_1$. Hence $S_k = \emptyset$ for all $k \in \mathbb{N}$. + +2. $z_n \notin V_0 \cup \mathcal{V}$ for $0 \le n < M_1$, and there exists a $k_0 \in \mathbb{N}$ such that $S_k := [M_k, M_{k+1})$ is an S-cycle for each $k \le k_0$ and $S_k = \emptyset$ for all $k > k_0$. + +3. $z_n \notin V_0 \cup \mathcal{V}$ for $0 \le n < M_1$, and $S_k := [M_k, M_{k+1})$ is a finite S-cycle for any $k \in \mathbb{N}$. + +Set $f = f_c$. For all cases, we have + +$$ +\begin{aligned} +\sum_{n=1}^{N} \frac{1}{|Df^n(z)|} &= \sum_{n=1}^{M_1} \frac{1}{|Df^n(z)|} + \sum_{k \ge 1, S_k \ne \emptyset} \sum_{i=1}^{M_{k+1}-M_k} \frac{1}{|Df^i(z_{M_k})| |Df^{M_k}(z)|} \\ +&\le \kappa_B + \sum_{k \ge 1, S_k \ne \emptyset} \frac{\kappa_A}{|Df^{M_k}(z)|} +\end{aligned} +$$ + +by Lemmas A' and B'. By Lemma B' again, we obviously have $|Df^{M_1}(z)|^{-1} < \kappa_B$. Hence by Lemma C', we have + +$$ |Df^{M_k}(z)| = |Df^{M_k - M_{k-1}}(z_{M_{k-1}})| \cdots |Df^{M_2 - M_1}(z_{M_1})| |Df^{M_1}(z)| \geq \lambda^{k-1}/\kappa_B. $$ + +Hence we have + +$$ \sum_{n=1}^{N} \frac{1}{|Df^n(z)|} \leq \kappa_B + \sum_{k \geq 1} \frac{\kappa_A \kappa_B}{\lambda^{k-1}} < \kappa_B + \kappa_A \kappa_B \frac{\lambda}{\lambda-1} =: K_B. $$ + +■ + +## 11 Hyperbolic metrics + +For the proofs of Lemmas A', B', C' and C, we will use the hyperbolic metrics and the expansion of $f_c$ with respect to these metrics. + +For a domain $\Omega$ in $\mathbb{C}$ with $\#(\mathbb{C} - \Omega) \ge 2$, there exists a hyperbolic metric $\rho(z)|dz|$ on $\Omega$ of constant curvature -4 induced by the metric $|dz|(1 - |z|^2)$ on the universal covering $\mathbb{D} = \tilde{\Omega}$. + +We first recall the following standard fact: + +**Lemma W.** Let $\Omega_0$ be a domain in $\mathbb{C}$ with $\#(\mathbb{C} - \Omega_0) \ge 2$ and $\rho_0(z)|dz|$ be its hyperbolic metric. Then for any domain $\Omega \subset \Omega_0$, the hyperbolic metric $\rho(z)|dz|$ of $\Omega$ satisfies + +$$ \rho_0(z) \leq \rho(z) \leq \frac{1}{\operatorname{dist}(z, \partial\Omega)}, $$ + +where $\operatorname{dist}(z, \partial\Omega)$ is the Euclidean distance between $z$ and $\partial\Omega$. + +See [Ah, Theorems 1.10 & 1.11] for more details. +---PAGE_BREAK--- + +**Postcritical sets.** The *postcritical set* $P(f_c)$ of the polynomial $f_c(z) = z^2 + c$ is defined by + +$$P(f_c) := \overline{\{f_c(0), f_c^2(0), f_c^3(0), \dots\}}.$$ + +For example, we have + +$$P(\hat{f}_c) = \{\hat{b}_1, \hat{b}_2, \dots, \hat{b}_{l-1}\} \cup \Omega(\hat{c})$$ + +when $c = \hat{c}$ and this set is finite if $\hat{c}$ is a Misiurewicz point. Moreover, for any $c \approx \hat{c}$, we have $\mathfrak{H}P(f_c) \ge 2$ and the universal covering of (each component of) $\mathbb{C} - P(f_c)$ is the unit disk $^2$. + +Let $\gamma = \gamma(z)|dz|$ denote the hyperbolic metric of $\mathbb{C}-P(f_c)$, which is induced by the metric +$|dz|/(1-|z|^2)$ on the unit disk $\mathbb{D}$. The metric $\gamma = \gamma(z)|dz|$ has the following properties: + +(i) $\gamma: \mathbb{C} - P(f_c) \to \mathbb{R}_+$ is real analytic and diverges on $P(f_c) \cup \{\infty\}$. + +(ii) if both $z$ and $f_c(z)$ are in $\mathbb{C} - P(f_c)$, we have + +$$\frac{\gamma(f_c(z))}{\gamma(z)} |Df_c(z)| > 1.$$ + +**Lemma X.** If the constant $\nu$ is sufficiently small, there exists a constant $C_X \asymp \nu^2$ with the following property: For any $c \approx \hat{c}$, we have + +$$\frac{\gamma(z)}{\gamma(\zeta)} \geq C_X$$ + +if either + +(1) $z, \zeta \in J(f_c) - V$; or + +(2) $z \in J(f_c) - V_0 \cup V$ and $\zeta \in V_1 - f_c(V_0).$ + +**Proof.** We may assume that there exists an $R_0 > 0$ such that $J(f_c) \subset \bar{\mathbb{D}}(R_0)$ for any $c \approx \hat{c}$. +Since $\gamma$ diverges only at the postcritical set $P(f_c)$ in $\bar{\mathbb{D}}(R_0)$, there exists a constant $C_4 > 0$ +such that $\gamma(w) \ge C_4$ for any $w \in \bar{\mathbb{D}}(2R_0) - P(f_c)$. In particular, we have $\gamma(z) \ge C_4$ in both +cases (1) and (2). Moreover, for these cases, we can find a constant $C_5$ independent of $\nu \ll 1$ +and $c \approx \hat{c}$ such that + +$$\mathrm{dist}(\zeta, P(f_c)) \geq C_5\nu^2.$$ + +Hence if $\nu$ is sufficiently small, then Lemma W implies that $\gamma(\zeta) \leq 1/(C_5\nu^2)$. Now we +have $\gamma(z)/\gamma(\zeta) \geq C_4C_5\nu^2 =: C_X$. ■ + +**Lemma Y.** There exists a constant $A > 1$ such that for $c \approx \hat{c}$, if $z, f_c(z), \dots, f_c^n(z)$ are all contained in $J(f_c) - V$, we have + +$$|D f_c^n(z)| \geq C_X A^n.$$ + +This estimate also holds if $z, f_c(z), \dots, f_c^{n-1}(z)$ are all contained in $J(f_c) - V_0 \cup V$ and $f_c^n(z) \in V_1 - f_c(V_0).$ + +$^2$Without the parameter ray condition, $f_c$ may have Siegel disks and the set $\mathbb{C} - P(f_c)$ may contain the disks. +---PAGE_BREAK--- + +**Proof.** Since the Julia set is uniformly bounded when $c \approx \hat{c}$, we may assume that there exists a constant $A > 1$ such that for any $c \approx \hat{c}$, + +$$ \frac{\gamma(f_c(w))}{\gamma(w)} |Df_c(w)| \geq A $$ + +if either $w, f_c(w) \in J(f_c) - V$; or $w \in J(f_c) - V \cup V_0$ and $f_c(w) \in V_1 - f_c(V_0)$. + +By the chain rule, we have + +$$ |Df_c^n(z)| = \prod_{i=0}^{n-1} |Df_c(f_c^i(z))| \geq \prod_{i=0}^{n-1} \frac{\gamma(f_c^i(z))}{\gamma(f_c^{i+1}(z))} A \geq \frac{\gamma(z)}{\gamma(f_c^n(z))} A^n. $$ + +By applying Lemma X with $\zeta := f_c^n(z)$, we obtain the desired inequality. ■ + +## 12 Proof of Lemma B' + +Set $f = f_c$. Suppose that $M < \infty$. Since we have $z_i \notin V_0 \cup V$ for all $i \le M-1$, we can apply Lemma Y and we have + +$$ |Df^i(z_0)| \geq \frac{\gamma(z_0)}{\gamma(z_i)} \cdot A^i \geq C_X A^i. $$ + +If $z_M \notin V_0 \cup V$ or $z_M \notin V_1 - f_c(V_0)$, then we can apply Lemma Y again and we have +$|Df^M(z_0)| \ge C_X A^M \ge C_X$. Otherwise $z_M \in V_j$ for some $j \ne 1$. Since $z_{M-1} \notin V_0 \cup V$, +we may assume that $|z_{M-1}| \ge \xi_0$ for some constant $0 < \xi_0 \le 1/2$ depending only on $\hat{c}$ and +independent of $\nu \ll 1$, $c \approx \hat{c}$, and $z_0 \in J(f_c)$. Hence we have + +$$ |Df^M(z_0)| = |Df^{M-1}(z_{M-1})| |Df(z_{M-1})| \ge C_X A^{M-1} \cdot 2\xi_0 \ge 2\xi_0 C_X. $$ + +Thus + +$$ \sum_{i=1}^{M} \frac{1}{|Df^i(z_0)|} \le \sum_{i=1}^{M-1} \frac{1}{C_X A^i} + \frac{1}{2\xi_0 C_X} < \frac{1}{C_X} \left( \frac{1}{A-1} + \frac{1}{2\xi_0} \right) =: \kappa_B. $$ + +If $M = \infty$, then the same estimate as above yields + +$$ \sum_{i=1}^{\infty} \frac{1}{|Df^i(z_0)|} \le \sum_{i=1}^{\infty} \frac{1}{C_X A^i} < \frac{1}{C_X(A-1)} < \kappa_B. $$ + +■ + +## 13 Proof of Lemma A' + +Set $f = f_c$. For a given S-cycle $S = [M, M')$, we may assume that $M = 0$ without loss of generality. We divide the proof in two cases. + +**Case 1.** Suppose that $S$ is either a finite S-cycle or an infinite S-cycle of type (I). Then there exist $j \in \{1, 2, \dots, l\}$, $m \in \mathbb{N}$, and $L \in \mathbb{N} \cup \{\infty\}$ such that + +* $z = z_0 \in V_j$; + +* $z_{n-p} \in U_l$ when $n = (l-j) + mp$, but $z_n \notin U_l$; + +* $z_{n+i} \notin V_0 \cup V$ if $0 \le i < L$. +---PAGE_BREAK--- + +• $M' < \infty$ iff $L < \infty$ and $M' = (l-j) + mp + L$. + +Hence we have the following estimates of $|Df^n(z)|$: + +• When $n = 1, \dots, l-j-1$, we have $z_n \in V_{j+n}$ and + +$$|Df^n(z)| \geq \xi^n \geq \xi^{l-1}.$$ + +• When $n = (l-j) + kp + i$ with $0 \le k < m$ and $0 \le i < p$, + +$$ +\begin{align*} +|Df^n(z)| &= |Df^{l-j}(z)| |Df^{kp}(z_{l-j})| |Df^i(z_{(l-j)+kp})| \\ +&\geq \xi^{l-j} \cdot \mu^k \cdot \xi^i \\ +&\geq \xi^{(l-1)+(p-1)} \mu^k. +\end{align*} +$$ + +• When $n = (l - j) + mp + i$ with $0 \le i < L \le \infty$, + +$$ +\begin{align*} +|Df^n(z)| &= |Df^{(l-j)+mp}(z)| |Df^i(z_{(l-j)+mp})| \\ +&\geq \xi^{l-j} \cdot \mu^m \cdot \frac{\gamma(z_{(l-j)+mp})}{\gamma(z_n)} \cdot A^i \\ +&\geq \xi^{l-1} C_X A^i. +\end{align*} +$$ + +Here the constant $A$ above is the same as that of Lemma Y. + +• When $L < \infty$ and $n = M' = (l-j) + mp + L$, the point $z_{M'}$ satisfies either $z_{M'} \in V_1 - f_c(V_0)$; or $z_{M'} \in V_j$ for some $j \ne 1$. By the same argument as in the proof of Lemma B', there exists a constant $0 < \xi_0 \le 1/2$ depending only on $\hat{c}$ such that + +$$ +\begin{equation} \tag{13} +\begin{split} +|Df^n(z)| &= |Df^{M'}(z)| = |Df^{(l-j)+mp}(z)| |Df^L(z_{(l-j)+mp})| \\ +&\quad \geq \xi^{l-j} \cdot \mu^m \cdot \min\{C_X A^L, C_X A^{L-1} \cdot 2\xi_0\} \\ +&\quad \geq 2\xi^{l-1} \xi_0 C_X +\end{split} +\end{equation} +$$ + +$$ +\begin{equation} +\begin{aligned} +& 2\xi^{l-1} \xi_0 C_X && (14) \\ +& \text{or } 2\xi^{l-1} \xi_0 C_X && (14) +\end{aligned} +\end{equation} +$$ + +By these estimates, when $M' < \infty$, we have: + +$$ +\begin{align*} +& \sum_{i=1}^{M'} \frac{1}{|Df^i(z)|} \\ +&= \sum_{i=1}^{l-j-1} \frac{1}{|Df^i(z)|} + \sum_{k=0}^{m-1} \sum_{i=0}^{p-1} \frac{1}{|Df^{l-j}(z)| |Df^{kp+i}(z_{l-j})|} \\ +&\quad + \sum_{i=0}^{L-1} \frac{1}{|Df^{(l-j)+mp}(z)| |Df^i(z_{(l-j)+mp})|} + \frac{1}{|Df^{M'}(z)|} \\ +&\leq \frac{l-2}{\xi^{l-1}} + \sum_{k=0}^{m-1} \frac{p}{\xi^{(l-1)+(p-1)} \cdot \mu^k} + \sum_{i=0}^{L-1} \frac{1}{\xi^{l-1} C_X A^i} + \frac{1}{2\xi^{l-1} \xi_0 C_X} \\ +&\leq \frac{l-2}{\xi^{l-1}} + \frac{p}{\xi^{(l-1)+(p-1)}} \cdot \frac{\mu}{\mu-1} + \frac{1}{\xi^{l-1} C_X} \cdot \frac{A}{A-1} + \frac{1}{2\xi^{l-1} \xi_0 C_X} \\ +&=: \kappa_A. +\end{align*} +$$ + +Note that $\kappa_A$ does not depend on $j$, $m$, and $L$. + +If $M' = \infty$, then $L = \infty$ and one can easily check + +$$ +\sum_{i=1}^{\infty} \frac{1}{|Df^i(z)|} \leq \frac{l-2}{\xi^{l-1}} + \frac{p}{\xi^{(l-1)+(p-1)}} \cdot \frac{\mu}{\mu-1} + \frac{1}{\xi^{l-1} C_X} \cdot \frac{A}{A-1} < \kappa_A. +$$ +---PAGE_BREAK--- + +**Case 2.** Suppose that $S = [0, \infty)$ is an infinite S-cycle of type (II). Then there exists a $j \in \{1, 2, \dots, l\}$ such that $z = z_0 \in V_j$ and $z = b_j(c)$ if $j < l$ and $z \in \Omega(c)$ if $j = l$. Hence for any $k \in \mathbb{N}$ we have $z_{(l-j)+kp} \in U_l$. By the same estimates as in Case 1, we have + +$$ +\begin{aligned} +\sum_{i=1}^{\infty} \frac{1}{|Df^i(z)|} &= \sum_{i=1}^{l-j-1} \frac{1}{|Df^i(z)|} + \sum_{k=0}^{\infty} \sum_{i=0}^{p-1} \frac{1}{|Df^{l-j}(z)| |Df^{kp+i}(z_{l-j})|} \\ +&\leq \frac{l-2}{\xi^{l-1}} + \sum_{k=0}^{\infty} \frac{p}{\xi^{(l-1)+(p-1)} \cdot \mu^k} \\ +&= \frac{l-2}{\xi^{l-1}} + \frac{p}{\xi^{(l-1)+(p-1)}} \cdot \frac{\mu}{\mu-1} < \kappa_A. +\end{aligned} +$$ + +## 14 Proof of Lemma C' + +Set $f = f_c$. We will show that $|Df^{M'-M}(z_M)| \geq \kappa_C/\nu$ for some constant $\kappa_C$ that depends only on $\hat{c}$. By choosing $\nu$ sufficiently small, we have $\lambda := \kappa_C/\nu > 1$. + +As in the proof of Lemma A', we assume that $M = 0$ and set $M' := (l-j) + mp + L$ where $z_0 \in V_j$ for some $1 \le j \le l$. We also set $n := (l-j) + mp$, then by the chain rule we have + +$$ |Df^{M'}(z_0)| = |Df^n(z_0)| \cdot |Df^L(z_n)|. \quad (15) $$ + +First let us give an estimate of $|Df^n(z_0)|$. We can find an $\tilde{R}_l > 0$ such that + +$$ f^p(\mathbb{D}(\hat{w}, R_l)) \subset \mathbb{D}(\hat{w}_p, \tilde{R}_l/2) \subset \mathbb{D}(\hat{w}_p, \tilde{R}_l) $$ + +for any $\hat{w} \in \Omega(\hat{c})$ if we choose $R_l$ small enough, where $\hat{w}_p = f_c^p(\hat{w})$. Let $\hat{x} := \hat{b}_j$ if $z_0 \in V_j$ and $j \ne l$, or $\hat{x} := \hat{w}$ if $z_0 \in \mathbb{D}(\hat{w}, C_0'\nu^2) \subset V_l$ for some $\hat{w} \in \Omega(\hat{c})$. (The choice of $\hat{w}$ is not unique.) Let $x_0(c) = b_j(c)$ if $j < l$, or $x_0(c) = \chi_c(\hat{x})$ if $j = l$. Note that for any $c \approx \hat{c}$, we have $b_j(c) \in V_j$, $\chi_c(\hat{w}) \in \mathbb{D}(\hat{w}, C_0'\nu^2)$, and $\chi_c(\hat{w}_p) \in \mathbb{D}(\hat{w}_p, C_0'\nu^2)$. In particular, we may assume that $|z_0 - x_0(c)| \le \max(C_0, 2C_0')\nu^2$ and $|\hat{x}_n - x_n(c)| \le R_l/2$, where $\hat{x}_n = f_c^n(\hat{x})$ and $x_n(c) = \chi_c(\hat{x}_n) = f_c^n(x_0(c))$. Thus, $|z_n - x_n(c)| \ge |z_n - \hat{x}_n| - |\hat{x}_n - x_n(c)| \ge R_l/2$. + +Now we take the inverse branch $G$ of $f^n$ defined on $\mathbb{D}(\hat{x}_n, \tilde{R}_l)$ that maps $x_n(c)$ to $x_0(c)$, and $z_n$ to $z_0$. By the Koebe distortion theorem, we have + +$$ |DG(z_n)| \asymp |DG(x_n(c))| $$ + +and + +$$ |z_0 - x_0(c)| = |G(z_n) - G(x_n(c))| \asymp |DG(x_n(c))||z_n - x_n(c)|. $$ + +Since $|z_0 - x_0(c)| \le \max(C_0\nu^2, 2C_0'\nu^2)$ and $|z_n - x_n(c)| \ge R_l/2$, we have $|DG(z_n)| \le C_6\nu^2/R_l$, where $C_6$ is a constant independent of $c \approx \hat{c}$, $\nu \ll 1$, and $z_0 \in J(f_c)$. Hence $|Df^n(z_0)| \ge R_l/(C_6\nu^2)$. + +Next we give an estimate of the form $|Df^L(z_n)| \ge C_7\nu$, where $C_7$ is a constant independent of $c \approx \hat{c}$, $\nu \ll 1$, and $z_0 \in J(f_c)$. (Then by (15) the proof is done.) The estimate relies on the geometry of (and dynamics on) the postcritical set $P(f_c)$: Take any $i \in [0, L)$, then by Lemmas W and Y we obtain + +$$ +\begin{align*} +|Df^L(z_n)| &= |Df^{L-i}(z_n)||Df^i(z_{M'-i})| \\ +&\geq \frac{\gamma(z_n)}{\gamma(z_{M'-i})} A^{L-i} |Df^i(z_{M'-i})| \\ +&\geq \gamma(z_n) \cdot \operatorname{dist}(z_{M'-i}, P(f_c)) |Df^i(z_{M'-i})|. +\end{align*} +$$ +---PAGE_BREAK--- + +By taking a small enough $R_l$, we may assume that $f_c^p(U_l)$ is disjoint from $P(f_c) - \Omega(\hat{c})$. Hence $z_n$ has a definite distance from $P(f_c)$ (more precisely, $\text{dist}(z_n, P(f_c))$ is bigger than a positive constant independent of $c \approx \hat{c}$, $\nu \ll 1$, and $z_0 \in J(f_c)$) and we always have $\gamma(z_n) \asymp 1$. + +Thus it is enough to show: There exists an $i \in [0, l+p)$ such that + +(1) $z_{M'-i}$ has a definite distance from $P(f_c)$; and + +(2) $|Df^i(z_{M'-i})| \ge C_8\nu$ for some constant $C_8$ depending only on $\hat{c}$. + +Note that if $z_{M'} \in V_0$, then $z_{M'}$ already has a definite distance from $P(f_c)$ by semi-hyperbolicity. This situation corresponds to $i=0$ and condition (2) is ignored. + +Figure 5: Black heavy dots indicate the critical orbit. Some possible behaviors of $z_{M'-i} \mapsto z_{M'-i+1} \mapsto \cdots \mapsto z_M$ are indicated by smaller dots (in red). + +If $z_{M'} \in V_{j'}$ with $1 \le j' \le l$, then such an $i$ can be found in $[1, l+p)$ by the following procedure (Figure 5). Suppose that $z_{M'} \in V_1$. Then $z_{M'-1}$ is contained in $f^{-1}(V_1) - V_0$, and thus $|z_{M'-1}| \ge \nu$. By setting $i=1$, it follows that $z_{M'-1}$ has a definite distance from $P(f_c)$, and we have $|Df(z_{M'-1})| \ge 2\nu$. + +Suppose that $z_{M'} \in V_2$. Then $f^{-1}(V_2)$ has two components containing $\pm \hat{b}_1$ for any $c \approx \hat{c}$. If $z_{M'-1}$ is in the component containing $-\hat{b}_1$, then $|z_{M'-1} - (-\hat{b}_1)| \asymp \nu^2$ and it has a definite distance from $P(f_c)$. Now set $i=1$. Since $|Df(-\hat{b}_1)| = 2|\hat{b}_1| \ge \xi$ by definition of $\xi$ in Section 2, we have $|Df(z_{M'-1})| \asymp |Df(-\hat{b}_1)| \ge \xi > \nu$ for $\nu \ll 1$. If $z_{M'-1}$ is in the component containing $\hat{b}_1$, then $z_{M'-1}$ is necessarily contained in $f^{-1}(V_2) - V_1$, and then $|z_{M'-1} - \hat{b}_1| \asymp \nu^2$. In this situation $|z_{M'-2}| \asymp \nu$ and $z_{M'-2}$ has a definite distance from $P(f_c)$. Set $i=2$. Then + +$$|Df^2(z_{M'-2})| = |Df(z_{M'-2})||Df(z_{M'-1})| \asymp \nu \cdot |Df(\hat{b}_1)| \ge \xi\nu.$$ + +Suppose that $z_{M'} \in V_{j'}$ with $j' = 3, \dots, l-1$. As in the situation of $z_{M'} \in V_2$, either + +• $|z_{M'-i} - (-\hat{b}_{j'-i})| \asymp \nu^2$ for some $i < j'$ and $z_{M'-i}$ has a definite distance from $P(f_c)$; or + +• $|z_{M'-j'}| \asymp \nu$ and $z_{M'-j'}$ has a definite distance from $P(f_c)$. We set $i:=j'$ in this case. + +In both cases, we have $|z_{M'-k} - \hat{b}_{j'-k}| \asymp \nu^2$ for each $k=1, \dots, i-1$. In particular, since $2|\hat{b}_n| \ge \xi$ for $n \in \mathbb{N}$, we have: + +• If $i < j'$, then $|Df^i(z_{M'-i})| \asymp 2|\hat{b}_{j'-i}| \cdot 2|\hat{b}_{j'-i+1}| \cdots 2|\hat{b}_{j'-1}| \ge \xi^i \ge \xi^{l-2}$. +---PAGE_BREAK--- + +• If $i = j'$, then $|Df^i(z_{M'-i})| \asymp 2\nu \cdot 2|\hat{b}_1| \cdots 2|\hat{b}_{j'-1}| \ge 2\xi^{j'-1}\nu \ge 2\xi^{l-2}\nu$. + +In both cases, we have $|Df^i(z_{M'-i})| \ge C_8\nu$ for some constant $C_8 > 0$ independent of $c \approx \hat{c}$, +$\nu \ll 1$, and $z_0 \in J(f_c)$. + +Finally suppose that $z_{M'} \in V_l$, i.e., $\text{dist}(z_{M'}, \Omega(\hat{c})) < C'_0 \nu^2$ by definition of $V_l$. Now we +claim: *there exists a $k' \le p$ such that $\text{dist}(z_{M'-k'}, \Omega(\hat{c})) \asymp R_l$.* + +Indeed, if there exists some $1 \le k' < p$ such that $z_{M'}, z_{M'-1}, \dots, z_{M'-k'+1} \in U_l$ but +$z_{M'-k'} \notin U_l$, then $\text{dist}(z_{M'-k'}, \Omega(\hat{c})) \asymp R_l$. Now suppose that all $z_{M'}, z_{M'-1}, z_{M'-2}, \dots, z_{M'-p+1}$ +remain in $U_l$ (but not in $V_l$ except $z_{M'}$). Let us show that $z_{M'-p} \notin U_l$ by contradiction. + +Assume that $z_{M'-p} \in U_l$. Since $|Df^p(z)| \ge \mu > 2.5$ for $z \in U_l$, by the Koebe distortion theorem and invariance of $\Omega(c)$ by $f^p = f_c^p$, we obtain $2 \cdot \text{dist}(z_{M'-p}, \Omega(c)) < \text{dist}(z_{M'}, \Omega(c))$ if $\nu \ll 1$. (Note that we have $z_{M'} \notin \Omega(c)$ since $\mathcal{S}$ is a finite S-cycle.) Since $\Omega(\hat{c})$ moves holomorphically, we may assume that $\text{dist}(\Omega(c), \Omega(\hat{c})) \le C'_0\nu^2/4$ for $c \approx \hat{c}$. Hence we obtain + +$$ +\begin{align*} +\operatorname{dist}(z_{M'-p}, \Omega(\hat{c})) &\leq \operatorname{dist}(z_{M'-p}, \Omega(c)) + C_0'\nu^2/4 \\ +&< \frac{\operatorname{dist}(z_{M'}, \Omega(c))}{2} + C_0'\nu^2/4 \\ +&\leq \left(\frac{\operatorname{dist}(z_{M'}, \Omega(\hat{c})) + C_0'\nu^2/4}{2} + C_0'\nu^2/4\right) \\ +&< C_0'\nu^2. +\end{align*} +$$ + +It would imply $z_{M'-p} \in V_l$, contradicting the construction of the S-cycle [$M, M'$). It follows +that $z_{M'-p} \notin U_l$ and thus $\text{dist}(z_{M'-k'}, \Omega(\hat{c})) \asymp R_l$ for $k' = p$. + +The point $z_{M'-k'}$ above has a definite distance from $\Omega(\hat{c})$. It also has a definite distance +from $P(f_{\hat{c}})$, unless $|z_{M'-k'} - \hat{b}_{l-1}| \asymp \nu^2$. However, in this case we may apply the same +argument as in the case of $1 \le j' \le l-1$ and there exists an $i \in [k', k' + l)$ such that +$z_{M'-i}$ has a definite distance from $P(f_{\hat{c}})$. Moreover, since $i$ is bounded by $p+l$, we have +$|Df^i(z_{M'-i})| \ge C_8\nu$ by replacing the above $C_8$ if necessary. ■ + +**15 Proof of Lemma C** + +This proof is similar to that of Lemma C'. We will show that $|Df_c^{N'-N}(z_0)| \ge K_C/\nu$ for some +constant $K_C$ that depends only on $\hat{c}$, and we set $\Lambda := K_C/\nu > 1$ by choosing $\nu \ll 1$. + +Without loss of generality we may assume that $N = 0$. Set $n := l + mp$ and $L := N' - n$ +such that $z_{n-p} \in U_l$, $z_n \notin U_l$, $z_{n+i} \notin V_0$ for $0 \le i < L$, and $z_{n+L} \in V_0$. + +By the chain rule, we have + +$$ +|Df_c^{N'}(z_0)| = |Df_c^n(z_0)| \cdot |Df_c^L(z_n)|. \quad (16) +$$ + +By Lemma U, we have $|Df_c^n(z_0)| \ge C_U/|z_0| \ge C_U/\nu$ where the constant $C_U > 0$ is indepen- +dent of $c \approx \hat{c}$ and $z_0 \in J(f_c) \cap V_0$. Hence it is enough to show that $|Df_c^L(z_n)| \ge \eta$ for some +constant $\eta > 0$ that is independent of $\nu \ll 1$, $c \approx \hat{c}$ and $z_0 \in V_0 \cap J(f_c)$. (Then we have +$|Df_c^{N'}(z)| \ge C_U\eta/\nu$ by (16) and the proof is done.) + +To show this, we use the hyperbolic metric. Let $\rho(z)|dz| = \rho_c(z)|dz|$ be the hyperbolic +metric on $\mathbb{C} - P(f_c)$, where + +$$ +P(f_c) = \{c, f_c(c), f_c^2(c), \ldots\} +$$ + +is the postcritical set of $f_c$ for $c \approx \hat{c}$ with $c \notin \mathbb{M}$. + +Since $J(f_c) \cap P(f_c) = \emptyset$ when $c \notin \mathbb{M}$, we have + +$$ +\frac{\rho(f(z))}{\rho(z)} |Df_c(z)| \ge 1 +$$ +---PAGE_BREAK--- + +for any $z \in J(f_c)$. (See [Mc, Theorem 3.5] for example.) We also have $\rho(z) \le \text{dist}(z, P(f_c))^{-1}$ by Lemma W. Hence $|Df^L(z_n)| \ge \rho(z_n)/\rho(z_{N'}) \ge \rho(z_n) \cdot \text{dist}(z_{N'}, P(f_c))$. To complete the proof, we show that both $\rho(z_n)$ and $\text{dist}(z_{N'}, P(f_c))$ are uniformly bounded from below for any $c \approx \hat{c}$ and for any $z_0 \in V_0 = \mathbb{D}(\nu)$. + +Let us work with $\text{dist}(z_{N'}, P(f_c))$ first: Let $\tilde{\mathcal{R}}(c)$ denote the closure of the union of the forward images of the dynamic ray $\mathcal{R}_c(\theta)$. By using the set $\hat{\mathcal{R}}(c)$ defined in Section 7, we have + +$$ \tilde{\mathcal{R}}(c) = \overline{\mathcal{R}_c(\theta) \cup \mathcal{R}_c(2\theta) \cup \dots \cup \mathcal{R}_c(2^{l-1}\theta) \cup \hat{\mathcal{R}}(c)}. $$ + +By Proposition S, this set moves continuously as $c \to \hat{c}$ along $c \in \mathcal{R}_{\mathbb{M}}(\theta)$ with respect to the Hausdorff distance on the sphere. Since the postcritical set $P(f_c)$ is contained in $\tilde{\mathcal{R}}_c$, we obtain + +$$ \text{dist}(z_{N'}, P(f_c)) \ge \text{dist}(z_{N'}, \tilde{\mathcal{R}}_c) \ge \text{dist}(0, \tilde{\mathcal{R}}_c) - |z_{N'}| \ge \text{dist}(0, \tilde{\mathcal{R}}_c) - \nu, $$ + +where $\text{dist}(0, \tilde{\mathcal{R}}_c)$ tends to $\text{dist}(0, \tilde{\mathcal{R}}_{\hat{c}}) > 0$ as $c \to \hat{c}$ with $c \in \mathcal{R}_{\mathbb{M}}(\theta)$. Now we choose sufficiently small $\nu$ and we conclude that $\text{dist}(z_{N'}, P(f_c))$ is bounded by a positive constant that is independent of $c \to \hat{c}$ with the parameter ray condition and $z_{N'} \in V_0$. + +Next we work with $\rho(z_n)$: Let $T_c : \mathbb{C} \to \mathbb{C} (c \neq 0)$ be a complex affine map with $T_c(c) = \hat{c}$ and $T_c(f_c(c)) = f_{\hat{c}}(\hat{c})$ such that $T_c(z) \to z$ uniformly on compact sets as $c \to \hat{c}$. Set $g_c := T_c \circ f_c \circ T_c^{-1}$. Then $g_c$ is a quadratic map whose postcritical set is + +$$ P(g_c) = T_c(P(f_c)) = \{\hat{c}, f_{\hat{c}}(\hat{c}) = g_c(\hat{c}), g_c^2(\hat{c}), \dots\}. $$ + +Hence the hyperbolic metrics $\rho'_c$ on $\mathbb{C} - P(g_c)$ and $\hat{\rho}$ on $\mathbb{C} - \{\hat{c}, f_{\hat{c}}(\hat{c})\}$ satisfy $T_c^* \rho'_c = \rho_c$ and $\hat{\rho} \le \rho'_c$ for all $c$, where $T_c^*$ is the pull-back. + +As in the proof of Lemma C', if we choose $R_l$ small enough, then we can find an $\tilde{R}_l > 0$ such that $f_c^p(U_l) \subset N(\Omega(\hat{c}), \tilde{R}_l)$ for any $c \approx \hat{c}$ and that the closure $E$ of the set $N(\Omega(\hat{c}), \tilde{R}_l) - V_l$ contains neither $\hat{c}$ nor $f_{\hat{c}}(\hat{c})$. (Note that $f_{\hat{c}}(\hat{c})$ may belong to $\Omega(\hat{c})$ and be contained in $V_l$.) It follows that $z_n$ is contained in $E$ for $c \approx \hat{c}$, and hence so is $z'_n := T_c(z_n)$. Thus we obtain + +$$ \rho'_{\tilde{c}}(z'_{n}) \geq \hat{\rho}(z'_{n}) \geq \min_{w \in E} \hat{\rho}(w) > 0. $$ + +Since $\rho_c(z_n) = \rho'(T_c(z_n))|DT_c(z_n)| = |\rho'(z'_n)|DT_c(z_n)|$ and $DT_c(w) \to 1$ uniformly on $E$ as $c \to \hat{c}$, we conclude that $\rho(z_n)$ is bounded by a positive constant from below that is independent of $\nu \ll 1$, $c \approx \hat{c}$ and the original choice of $z_0 \in V_0 \cap J(f_c)$. ■ + +# 16 Itinerary sequences + +When $c \notin \mathbb{M}$, the critical value $c$ has a well defined external angle $t_c = (2\pi)^{-1} \arg \Phi_c(c)$. The angle $t_c$ is not equal to zero when $c \in X = \mathbb{C} - \mathbb{M} \cup R_+$. For $c \in X$, the dynamic rays $\mathcal{R}_c(t_c/2)$ and $\mathcal{R}_c((t_c+1)/2)$ together with the critical point 0 separate the complex plane $\mathbb{C}$ into two disjoint open sets, say $W_0 = W_0(c)$ and $W_1 = W_1(c)$. Let the one that contains $c$ be $W_0$. If $t_c = 0$ and $\mathcal{R}_{\mathbb{M}}(0)$ lands at a semi-hyperbolic parameter $\hat{c}$, then $\tilde{\mathcal{R}}_c(\theta)$ lands at $\hat{c}$, and both $\tilde{\mathcal{R}}_c(\theta/2)$ and $\tilde{\mathcal{R}}_c((\theta+1)/2)$ land at 0. Moreover, as $c$ approaches $\hat{c}$ along $\mathcal{R}_{\mathbb{M}}(0)$, in a large disk centered at 0, rays $\tilde{\mathcal{R}}_c(\theta/2)$ and $\tilde{\mathcal{R}}_c((\theta+1)/2)$ move continuously to $\tilde{\mathcal{R}}_c(\theta/2)$ and $\tilde{\mathcal{R}}_c((\theta+1)/2)$, respectively. + +Assume $z \in J(f_c)$. Define its *itinerary* or *itinerary sequence* $I_c(z) = \{I_c(z)_n\}_{n \ge 0}$ by +$I_c(z)_n = 0$ if $f_c^n(z) \in W_0$, $I_c(z)_n = 1$ if $f_c^n(z) \in W_1$, and $I_c(z)_n = *$ if $f_c^n(z) = 0$. If the +critical point 0 belongs to the Julia set, $I_c(f_c(0))$ is called the *kneading sequence* for $f_c$. +---PAGE_BREAK--- + +*Remark 16.1.* One can also define an itinerary $\{s_0, s_1, \ldots\}$ in such a way that $s_n = 0$ if $f_c^n(z) \in \overline{W}_0 \cap J(f_c)$ and that $s_n = 1$ if $f_c^n(z) \in \overline{W}_1 \cap J(f_c)$. When $\hat{c}$ is a semi-hyperbolic parameter, if $f_{\hat{c}}^k(z) = 0$ for some $k \ge 0$, then $f_{\hat{c}}^n(z) \ne 0$ for all $n \ne k$ since the critical point is non-recurrent. Suppose $f_{\hat{c}}^n(z) \in W_{s_n}$ for $n \ne k$, then with the definition of itinerary in this remark, the itinerary of $z$ will have two values $\{s_0, \ldots, s_{k-1}, 0, s_{k+1}, \ldots\}$ and $\{s_0, \ldots, s_{k-1}, 1, s_{k+1}, \ldots\}$. We employ the symbol $\ast$ in the above definition so as to identify sequences $\{s_0, \ldots, s_{k-1}, 0, s_{k+1}, \ldots\}$ and $\{s_0, \ldots, s_{k-1}, 1, s_{k+1}, \ldots\}$ by the one $\{s_0, \ldots, s_{k-1}, \ast, s_{k+1}, \ldots\}$. + +**Lemma Z.** Let $\hat{c}$ be a semi-hyperbolic parameter. + +(i) $I_{\hat{c}}(z) = I_{\hat{c}}(w)$ if and only if $z = w$. + +(ii) If $I_{\hat{c}}(z)_k = \ast$ and $I_{\hat{c}}(z)_n = I_{\hat{c}}(w)_n$ for all $n \ne k$, then $I_{\hat{c}}(w)_k = \ast$ and $w = z$. + +**Proof.** Since $\mathbb{C} - \mathcal{R}_{\hat{c}}(\theta) \cup \{\hat{c}\}$ is a simply connected domain without a critical value, there exist inverse branches $f_{\hat{c},i}^{-1} : \mathbb{C} - \mathcal{R}_{\hat{c}}(\theta) \cup \{\hat{c}\} \to W_i$ of $f_{\hat{c}}$, $i = 0$ or $1$. Each of these two branches can be extended at the critical value $\hat{c}$, and each extended branch is one-to-one. + +(i) If $I_{\hat{c}}(z)_n = I_{\hat{c}}(w)_n = s_n$ for all $n \ge 0$, then for any $N \in \mathbb{N}_0$ both $f_{\hat{c}}^N(z)$ and $f_{\hat{c}}^N(w)$ belong to $W_{s_N}$ provided $s_N \ne \ast$, or belong to $\{0\}$ provided $s_N = \ast$. The set $J(f_{\hat{c}}) \cap \overline{W_{s_N}}$ can be covered by a finite number of disks $\mathbb{D}(y_i, \epsilon)$ with $y_i \in J(f_{\hat{c}}) \cap \overline{W_{s_N}}$, $i \in F$, and $F$ is a finite index set. We choose $\epsilon$ to be the constant such that the inequality (1) holds. Let $B_N(y_i, \epsilon)$ be the component of $f_{\hat{c}}^{-N}(\mathbb{D}(y_i, \epsilon))$ such that $f_{\hat{c}}^{-N}(y_i) \in \overline{W_{s_0}}$, $f_{\hat{c}}^{-N+1}(y_i) \in \overline{W_{s_1}}$, ..., $f_{\hat{c}}^{-1}(y_i) \in \overline{W_{s_{N-1}}}$. It is not difficult to see that both $z$ and $w$ are contained in a simply connected domain covered by the union $\cup_{i \in F} B_N(y_i, \epsilon)$. It follows that $z = w$ easily from the exponential contraction (1) by taking $N \to \infty$. + +(ii) If $I_{\hat{c}}(z)_k = \ast$ and $I_{\hat{c}}(z)_n = I_{\hat{c}}(w)_n$ for all $n > k$, then $I_{\hat{c}}(f_{\hat{c}}^{k+1}(z)) = I_{\hat{c}}(f_{\hat{c}}^{k+1}(w))$. Thus, $f_{\hat{c}}^{k+1}(z) = f_{\hat{c}}^{k+1}(w) = \hat{c}$ by (i). Since $\hat{c}$ is the critical value, $f_{\hat{c}}^k(w) = 0$, and then $I_{\hat{c}}(w)_k = \ast$. Therefore, $I_{\hat{c}}(z) = I_{\hat{c}}(w)$, and then $z = w$ by (i). ■ + +Let $z(c)$ and $\hat{c}$ be as in Theorem 1.1, and let $c_0$ be $c(2)$ in Theorem 1.2 or be as in Theorem 1.3. The statement (i) of following corollary describes how the itinerary of $z(c)$ retains. The statement (ii) tells that every given point, say $w$, of $J(f_{\hat{c}})$ is a limiting point $z(\hat{c})$ of some $z(c)$ in $J(f_c)$ where the limit is taken as in Theorem 1.2. + +**Corollary 16.2.** + +(i) Suppose $I_{c_0}(z(c_0)) = s$, then $I_{\hat{c}}(z(\hat{c})) = s$ if and only if $f_{\hat{c}}^n(z(\hat{c})) \ne 0$ for all $n \ge 0$, otherwise $I_{\hat{c}}(z(\hat{c})) = \{s_0, \ldots, s_{k-1}, \ast, s_{k+1}, \ldots\}$ if and only if $f_{\hat{c}}^k(z(\hat{c})) = 0$ for some $k \ge 0$. + +(ii) Let $w \in J(f_{\hat{c}})$ and $I_{\hat{c}}(w) = s$. If $f_{\hat{c}}^n(w) \ne 0$ for all $n \ge 0$, there exists a unique $z(c_0)$, with $I_{c_0}(z(c_0)) = s$, such that $w = z(\hat{c})$. If $f_{\hat{c}}^k(w) = 0$ for some $k \ge 0$, then there exist exactly two $z(c_0)$ and $\tilde{z}(c_0)$, having itineraries $\{s_0, \ldots, s_{k-1}, 0, s_{k+1}, \ldots\}$ and $\{s_0, \ldots, s_{k-1}, 1, s_{k+1}, \ldots\}$ respectively, such that $w = z(\hat{c}) = \tilde{z}(\hat{c})$. + +**Proof.** (i) For $c \notin \mathbb{M}$, every point $z \in J(f_c)$ of given itinerary is bounded away from $\mathcal{R}_c(\theta/2) \cup \mathcal{R}_c((\theta+1)/2) \cup \{0\}$ and moves holomorphically with $c$. Thus, $f_{\hat{c}}^n(z(\hat{c})) \in \overline{W}_{s_n}(\hat{c})$ if $f_{c_0}^n(z(c_0)) \in W_{s_n}(c_0)$. Hence, $I_{\hat{c}}(z(\hat{c})) = s$ if $f_{\hat{c}}^n(z(\hat{c})) \ne 0$ for all $n \ge 0$. If $f_{\hat{c}}^k(z(\hat{c})) = 0 = \overline{W}_0(\hat{c}) \cap \overline{W}_1(\hat{c}) \cap J(f_\hat{c})$, then $0 \ne f_{\hat{c}}^n(z(\hat{c})) \in W_{s_n}(\hat{c})$ for all $n \ne k$ and $I_{\hat{c}}(z(\hat{c})) = \{s_0, \ldots, s_{k-1}, \ast, s_{k+1}, \ldots\}$. +---PAGE_BREAK--- + +(ii) For any $w \in J(f_{\hat{c}})$, by Theorem 1.3, there exists $z(c_0) \in J(f_{c_0})$ such that $h_c(z(c_0)) = z(c) \to z(\hat{c}) = w$ as $c \to \hat{c}$ along $\mathcal{R}_M(\theta)$. If $f_{\hat{c}}^n(w) \neq 0$ for all $n \ge 0$, then $z(c_0) \neq 0$ for all $n \ge 0$, and we conclude that $I_{c_0}(z(c_0)) = I_{\hat{c}}(w)$. If there exists another $\tilde{z}(c_0) \in J(f_{c_0})$ such that $h_c(\tilde{z}(c_0)) = \tilde{z}(c) \to \tilde{z}(\hat{c}) = w$ as $c \to \hat{c}$ along $\mathcal{R}_M(\theta)$, then $I_{c_0}(\tilde{z}(c_0)) = I_{c_0}(z(c_0))$, and consequently $\tilde{z}(c_0) = z(c_0)$ by the bijectivity between the itinerary sequences and Julia set $J(f_{c_0})$. + +If $f_{\hat{c}}^k(w) = 0$ for some $k \ge 0$, then $f_{c_0}^n(z(c_0)) \in W_{s_n}(c_0)$ for $n \ne k$, and $f_{c_0}^k(z(c_0))$ belongs to $W_0(c_0)$ or $W_1(c_0)$. Without loss of generality, assume $f_{c_0}^k(z(c_0)) \in W_0(c_0)$. Let $\tilde{z}(c_0)$ be such a point that $f_{c_0}^{k+1}(\tilde{z}(c_0)) = f_{c_0}^{k+1}(z(c_0))$, $f_{c_0}^k(\tilde{z}(c_0)) \in W_1(c_0)$, and $f_{c_0}^n(\tilde{z}(c_0)) \in W_{s_n}(c_0)$ for $0 \le n < k$. It is easy to see that such a point exists. We have $f_{c_0}^{k+1}(\tilde{z}(c)) \to f_{\hat{c}}^{k+1}(w)$ as $c \to \hat{c}$. And, by (i) and Lemma Z, we obtain $I_{\hat{c}}(\tilde{z}(\hat{c})) = I_{\hat{c}}(w)$ and $\tilde{z}(\hat{c}) = w$. If there is another $z'(c_0) \in J(f_{c_0})$ such that $h_c(z'(c_0)) \to w$ as $c \to \hat{c}$ along $\mathcal{R}_M(\theta)$, then either $I_{c_0}(z'(c_0)) = I_{c_0}(z(c_0))$ or $I_{c_0}(z'(c_0)) = I_{c_0}(\tilde{z}(c_0))$. Consequently, by the bijectivity between the itinerary sequences and Julia set $J(f_{c_0})$, we conclude that $z'(c_0) = z(c_0)$ or $\tilde{z}(c_0)$. ■ + +# 17 Proofs of Theorems 1.5 and 1.6 + +**Proof of Theorem 1.5.** Because $J(f_{\hat{c}})$ is locally connected, it is clear that $\mathcal{E}^{\theta}(\theta) = I_{\hat{c}}(\hat{c})$, namely the kneading sequence of $\theta$ is equal to the kneading sequence for $f_{\hat{c}}$. Hence, it is enough to prove the theorem by using $\mathbf{e} = I_{\hat{c}}(\hat{c})$. Note that $\mathbf{e} \in \Sigma_2$ because $\hat{c}$ is not recurrent under iteration of $f_{\hat{c}}$. + +For any $w \in J(f_{\hat{c}})$, we have $\sigma^n(I_{\hat{c}}(w)) \neq \mathbf{e}$ for all $n \ge 0$, or $I_{\hat{c}}(w) = \mathbf{e}$, or $\sigma^k(I_{\hat{c}}(w)) = \mathbf{e}$ for some $k \ge 1$. For any $\mathbf{s} \in \Sigma_2$ satisfying $\sigma^n(\mathbf{s}) \neq \mathbf{e}$ for all $n \ge 0$ or $\mathbf{s} = \mathbf{e}$, from Corollary 16.2, there corresponds a unique $w \in J(f_{\hat{c}})$ with $I_{\hat{c}}(w) = \mathbf{s}$. For such $\mathbf{s} \in \Sigma_2$ that $\sigma^{k+1}(\mathbf{s}) = \mathbf{e}$ for some $k \ge 0$, there is a unique $\mathbf{a} \neq \mathbf{s}$ in $\Sigma_2$ satisfying $\mathbf{a} \sim_e \mathbf{s}$ and again from Corollary 16.2 there corresponds a unique $w \in J(f_{\hat{c}})$ with $I_{\hat{c}}(w) = \{a_0, \dots, a_{k-1}, *, s_{k+1}, \dots\} = \{s_0, \dots, s_{k-1}, *, s_{k+1}, \dots\}$. This shows the bijectivity between $\Sigma_2/\sim_e$ and $J(f_{\hat{c}})$. Let the bijection $\Sigma_2/\sim_e \to J(f_{\hat{c}})$ be $h$. Since $I_{\hat{c}}(h(\mathbf{s})) = \mathbf{s}$ if $f_{\hat{c}}^n(h(\mathbf{s})) \neq 0$ for all $n \ge 0$ or $I_{\hat{c}}(h(\mathbf{s})) = \{s_0, \dots, s_{k-1}, *, s_{k+1}, \dots\}$ if $f_{\hat{c}}^k(h(\mathbf{s})) = 0$ for some $k \ge 0$ (we use $\mathbf{s}$ for an element in both $\Sigma_2$ and $\Sigma_2/\sim_e$ if it does not cause any confusion), by a similar argument to the proof of Lemma Z (i), the continuity of $h$ follows easily by virtue of the exponential contraction (1). Compactness of $\Sigma_2/\sim_e$ and $J(f_{\hat{c}})$ leads to $h$ a homeomorphism. To show $h$ acts as a conjugacy, observe from Corollary 16.2 that points $h \circ \sigma(\mathbf{s})$ and $f_{\hat{c}} \circ h(\mathbf{s})$ have the same itinerary under $f_{\hat{c}}$, thus they are the same by Lemma Z (i). ■ + +**Proof of Theorem 1.6.** There are exactly two cases: $f_{\hat{c}}^n(w) \neq 0$ for all $n \ge 0$ or $f_{\hat{c}}^n(w) = 0$ for some $n \ge 0$. By Corollary 16.2, $h_{\hat{c}}^{-1}(\{w\})$ is a singleton if and only if $f_{\hat{c}}^n(w)$ is as the first case, whereas it consists of two distinct points if and only if $f_{\hat{c}}^n(w)$ is as the second case. ■ + +# Acknowledgments + +The authors would like to thank the referee for his/her comments and suggestions that make the paper more precise and readable. Chen was partly supported by NSC 99-2115-M-001-007, MOST 103-2115-M-001-009, 104-2115-M-001-007, and 105-2115-M-001-003. Kawahira was partly supported by JSPS KAKENHI Grant Number 16K05193. They thank the hospitality of Academia Sinica, Nagoya University, RIMS in Kyoto University, and Tokyo Institute of Technology where parts of this research were carried out. +---PAGE_BREAK--- + +References + +[Ah] L.V. Ahlfors. *Conformal Invariants*, McGraw-Hill Book Co., 1973. + +[AK] P. Atela and H. Kriete. Cantor goes Julia, preprint, *Mathematica Gottingensis* no. 3 (1998), 1-15. + +[BK] C. Bandt and K. Keller. Symbolic dynamics for angle-doubling on the circle. I. The topology of locally connected Julia sets. *Ergodic theory and related topics, III* (Güstrow, 1990), 1-23, Lecture Notes in Math., **1514**, Springer, Berlin, 1992. + +[BR] L. Bers and H.L. Royden. Holomorphic family of injections. *Acta Math.* **157** (1987), 259-286. + +[CG] L. Carleson and T. Gamelin. *Complex Dynamics*. Springer-Verlag, 1993. + +[CJY] L. Carleson, P.W. Jones and J.-C. Yoccoz. Julia and John. *Bol. Soc. Bras. Mat.* **25** (1994), 1-30. + +[CK] Y.-C. Chen and T. Kawahira. Simple proofs for the derivative estimates of the holomorphic motion near two boundary points of the Mandelbrot set. *J. Math. Anal. App.* **473** (2019) 345-356. + +[CKLY] Y.-C. Chen, T. Kawahira, H.-L. Li and J.-M. Yuan. Family of invariant Cantor sets as orbits of differential equations. II: Julia sets. *Interna. J. Bifur. Chaos.* **21** (2011), 77-99. + +[D1] A. Douady. Does a Julia set depend continuously on the polynomial? Complex dynamical systems (Cincinnati, OH, 1994), 91–138, *Proc. Sympos. Appl. Math.*, **49**, Amer. Math. Soc., Providence, RI, 1994. + +[D2] A. Douady. Conjectures about the Branner-Hubbard motion of Cantor sets in $\mathbb{C}$. *Dynamics on the Riemann Sphere: A Bodil Branner Festschrift*, 209–222, Eur. Math. Soc., 2006. + +[DH1] A. Douady and J.H. Hubbard. Étude dynamique des polynômes complexes I & II, *Publ. Math. d'Orsay*. 1984 & 1985. + +[DH2] A. Douady and J.H. Hubbard. On the dynamics of polynomial-like mappings. *Ann. Sci. Éc. Norm. Sup.* **18** (1985), 287-344. + +[Du] P.L. Duren. *Univalent Functions*. Springer-Verlag, 1983. + +[L] M.Yu. Lyubich. Some typical properties of the dynamics of rational mappings. *Russian Math. Surveys* **38** (1983), 154–155. + +[K1] J. Kiwi. Wandering orbit portrait. *Trans. Amer. Math. Soc.* **354** (2002), 1473–1485. + +[K2] J. Kiwi. Real laminations and the topological dynamics of complex polynomials. *Adv. Math.* **184** (2004), 207–267. + +[MSS] R. Mañé, P. Sad and D. Sullivan. On the dynamics of rational maps. *Ann. Sci. École Norm. Sup.* **16** (1983), 193–217. + +[Mc] C. McMullen. *Complex Dynamics and Renormalization*, Annals of Mathematics Studies, Vol. 135. Princeton University Press, 1994. +---PAGE_BREAK--- + +[M] J. Milnor. Periodic orbits, external rays, and the Mandelbrot set: An expository account. Géométrie complexe et systèmes dynamiques, M.Flexor (ed.) et al. *Astérisque*. **261** (2000), 277–333. + +[PRLS] F. Przytycki, J. Rivera-Letelier and S. Smirnov. Equivalence and topological invariance of conditions for non-uniform hyperbolicity in the iteration of rational maps. *Invent. Math.* **151** (2003), 29–63. + +[RL] J. Rivera-Letelier. On the continuity of Hausdorff dimension of Julia sets and similarity between the Mandelbrot set and Julia sets. *Fund. Math.* **170** (2001), 287–317. + +[Shi] M. Shishikura. The Hausdorff dimension of the boundary of the Mandelbrot set and Julia sets. *Ann. Math.* **147** (1998), no. 2, 225–267. + +[vS] S. van Strien. Misiurewicz maps unfold generally (even if they are critically non-finite). *Fund. Math.* **163** (2000), 39–57. + +[Th] W.P. Thurston. On the geometry and dynamics of iterated rational maps. In "Complex dynamics: Families and friends", 3–109. Edited by D. Schleicher. A.K. Peters, 2009. + +Yi-Chiuan Chen +Institute of Mathematics +Academia Sinica +Taipei 10617, Taiwan +YCChen@math.sinica.edu.tw + +Tomoki Kawahira +Department of Mathematics +Tokyo Institute of Technology +Tokyo 152-8551, Japan +kawahira@math.titech.ac.jp + +Mathematical Science Team +RIKEN Center for Advanced Intelligence Project (AIP) +1-4-1 Nihonbashi, Chuo-ku +Tokyo 103-0027, Japan \ No newline at end of file diff --git a/samples_new/texts_merged/7569662.md b/samples_new/texts_merged/7569662.md new file mode 100644 index 0000000000000000000000000000000000000000..177d5ee6891cd2efdbaae69a1161f12f6a473d95 --- /dev/null +++ b/samples_new/texts_merged/7569662.md @@ -0,0 +1,198 @@ + +---PAGE_BREAK--- + +ON SINGULAR POINTS OF ELECTRICAL CIRCUITS + +By +SHIGEO ICHIRAKU + +(Received April 27, 1978) + +# 1. Introduction. + +A state of an electrical circuit with $b$ elements is specified by a current vector $i=(i_1, \cdots, i_b) \in \mathbb{R}^b$ and a voltage vector $v=(v_1, \cdots, v_b) \in \mathbb{R}^b$. Let $G$ be the oriented graph of the circuit, and we can regard naturally $v$ and $i$ as a real 1-chain and 1-cochain of $G$, i.e., $i \in C_1(G)$, $v \in C^1(G)$. Kirchhoff laws restricts the possible states to a $b$-dimensional subspace $K=\text{Ker } \partial \times \text{Im } \partial^* \subset C_1(G) \times C^1(G)$, where $\partial: C_1(G) \to C_0(G)$ ($\partial^*: C^0(G) \to C^1(G)$) is the boundary (coboundary) operator. The characteristics of resistors (possibly with couplings) of the circuit give the restraint that $(i_R, v_R)$ to be in an $n_R$-dimensional submanifold $A_R \subset C_1(G_R) \times C^1(G_R)$, where $(i_R, v_R)$ denotes the currents and voltages of resistive elements, $n_R$ the number of resistive elements in the circuit, $G_R$ the subgraph of $G$ consisting of all resistive elements. + +Combining Kirchhoff laws and the restraint of the characteristics of resistors, we have a space $\Sigma=K \cap A \subset C_1(G) \times C^1(G)$, where $A=\{(i,v): (i_R, v_R, i_L, v_L, i_C, v_C); (i_R, v_R) \in A_R\}$, on which the dynamics of the circuit takes place. Now, we assume the transversality of $K$ and hence $\Sigma$ is $(b-n_R)$-dimensional submanifold of $C_1(G) \times C^1(G)$. + +The dynamics is described by the following form ([6], [4]). Let + +$$J = \Sigma C_{mn}(v_C) dv_{C,m} \otimes dv_{C,n} - \Sigma L_{mn}(i_L) di_{L,m} \otimes di_{L,n}$$ + +be a 2-tensor on $C_1(G) \times C^1(G)$, where $C_{mn}(v_C)$ ($L_{mn}(i_L)$) is incremental capacitance (inductance) matrix and is assumed symmetric and positive definite ([4]). + +The vector field $X$ on $\Sigma$ which describes the dynamics satisfies the following: + +$$ (\pi^* J)_{(i,v)}(X_{(i,v)}, \xi) = (\xi^* \eta)_{(i,v)}(\xi), \quad \text{for } \xi \in T_{(i,v)}(\Sigma), $$ + +where $\eta$ is a certain 1-form and $\pi$ is the projection to the components of inductor currents and capacitor voltages, + +$$ \pi^t: C_1(G) \times C^1(G) \to C_1(G_L) \times C^1(G_C), $$ + +with its domain restricted to $\Sigma$, and + +$$ t: \Sigma \to C_1(G) \times C^1(G) $$ +---PAGE_BREAK--- + +is the natural inclusion. + +If $\pi: \Sigma \to C_1(G_L) \times C^1(G_C)$ is regular at $(i, v)$, i.e., the differential of $\pi$ at $(i, v)$, +$D\pi(i, v)$ has full rank ($b-n_R$), then $X_{(i,v)}$ is uniquely determined by the above +equation, for $J$ is non-degenerate bi-linear from at every point. A point $(i, v) \in \Sigma$ +is called *singular point* iff $\pi$ is not regular at $(i, v) \in \Sigma'$. Since $\pi^*J$ is degenerate +at the singular point $(i, v) \in \Sigma'$, $X$ is not determined at $(i, v)$. In fact, there is a +case in which we cannot define $X_{(i,v)}$ at some singular points consistently with +other regular points governed by the above equation. In most cases, however, +we can remove singular points by adding arbitrarily small capacitors and inductors +appropriately to the original circuits. This procedure is called "*regularization*" and justified by the fact that it corresponds "*to take account of parasitive elements* +" in circuit theory ([6], [1]). But at least theoretically there is a circuit which is +not regularizable, and even in regularizable cases the regularized circuit have more +reactive elements than the original one ([3]). The purpose of this paper is to +point out that singular points are derived from conflictions of Kirchhoff laws +and the restraints of resistive characteristics, therefore in general at singular +points the solution jumps to another branch of the characteristic submanifold. +This process is just a kind of "*catastrophe*". Of course, this phenomenon is +already known by circuit theorists, for example, as "*relaxed oscillation*" or "*dis- +continuous oscillation*" ([1]). + +## 2. Statement of results. + +A tree $T$ is called proper iff $T$ contains all the capacitance branches and contains no inductance branch. The complements of $T$ in $G$ is called the link of $T$ and is denoted by $L$. If the graph of the circuit has no proper tree, the map $\pi: \Sigma \to C_1(G_L) \times C^1(G_C)$ is singular at any point $(i, v) \in \Sigma'$, for the projection $\pi'|_K: K \to C_1(G_L) \times C^1(G_C)$ is already singular. This situation is called "forced degeneracy" ([6], [4]). Excluding the forced degeneracy, we assume the existence of proper tree. + +Let $B$ and $Q$ are the fundamental loop matrix and the fundamental cutset matrix with respect to a proper tree. (For definition of $B$ and $Q$, see [5], [2].) +And Kirchhoff space $K$ is the image of the following into-isomorphism: + +$$ \left[ \begin{matrix} B^t & 0 \\ 0 & Q^t \end{matrix} \right] : C_1(L) \times C^1(T) \to C_1(G) \times C^1(G), $$ + +where $L$ is the link of $T$ in $G$. Let $K(i_L, v_C)$ be the affine subspace of $K$ determined by fixing the currents of inductors and the voltages of capacitors, this is possible because vector $(i_L, v_C)$ is subvector of $(i_L, v_T) \in C_1(L) \times C^1(T)$. Clearly the +---PAGE_BREAK--- + +space $K(i_L, v_C)$ is the parallel translation in $K$ of $K(0, 0)$ to the point + +$$b(i_L, v_C) = (i, v) = \begin{bmatrix} B^t & 0 \\ 0 & Q^t \end{bmatrix} \begin{bmatrix} i_L \\ 0 \\ 0 \\ v_C \end{bmatrix},$$ + +here we assume the numbering of the elements is appropriately arranged. + +Let + +$$\pi_R: C_1(G) \times C^1(G) \to C_1(G_R) \times C^1(G_R)$$ + +be the natural projection to the currents and voltages of resistors, and $\pi'_R(K(i_L, v_C))= K_0+(i_R, v_R)$ where $K_0=K(0,0) \subset C_1(G_R) \times C^1(G_R)$ and $(i_R, v_R)=\pi'_R(b(i_L, v_C))$. + +Now we can state our result. + +**Theorem.** Let $C$ be a circuit whose graph has a proper tree. Suppose $\Lambda$ and $\Sigma$ are transversal. Then, a point $(i, v) = (i_L, i_C, i_R, v_L, v_C, v_R)$ is singular point if and only if the characteristic submanifold $A_R$ and the affine subspace $K_0(i_R, v_R)$ are not transverse at $(i_R, v_R)$ in $C_1(G_R) \times C^1(G_R)$. + +### 3. Proof of Theorem. + +Let $i(L)$, $v(T)$, $i(R(L))$, $v(R(T))$ denote the currents of link branches, voltages of tree branches, currents of link resistors, and voltages of tree resistors, respectively. For $(i_L, v_C) \in C_1(G_L) \times C^1(G_C)$, we define the map $k_{i(L),v_C}: C_1(G_{R(L)}) \times C^1(G_{R(T)}) \to K(i_L, v_C)$ by the following: + +$$k_{i(L),v_C}(i_{R(L)}, v_{R(T)}) = \begin{bmatrix} B^t & 0 \\ 0 & Q^t \end{bmatrix} \begin{bmatrix} i_L \\ i_{R(L)} \\ v_C \\ v_{R(T)} \end{bmatrix}.$$ + +Then $k_{i(L),v_C}$ is an isomorphism with its inverse: + +$$\pi'_{R(L,R,T)}(\pi'_R K(i_L, v_C)) : K(i_L, v_C) \to C_1(G_{R(L)}) \times C^1(G_{R(T)}),$$ + +for $\pi'_{R(L,R,T)} k_{i_L,v_C} = i d_{C_1(G_{R(L)})} \times C^1(G_{R(T)})$ and $\dim K(i_L, v_C) = n_R = \dim C_1(G_{R(L)}) \times C^1(G_{R(T)})$. + +And hence, the projection $\pi'_R$ with its domain and range restricted as follows: + +$$\pi'_R[K(i_L, v_C): K(i_L, v_C) \to \pi'_R(K(i_L, v_C)) = K_0 + (i_R, v_R)$$ + +is also an isomorphism with inverse: + +$$k_{(i_L,v_C)} \circ \pi'_{(i_L,i_T)}: K_0 + (i_R,v_R) \to K(i_L,v_C).$$ +---PAGE_BREAK--- + +Now, we prove the theorem. Suppose $\mathfrak{p}=(i, v) \in \Sigma'$ is a singular point, i.e., $D\pi(\mathfrak{p}): T_p(\Sigma') \to T_{\pi(p)}(C_1(G_L) \times C^1(G_C))$ is singular. Then $\text{Ker}(D\pi'(\mathfrak{p}) \cap T_p(\Sigma)) \neq \{0\}$, by projecting this to the space $C_1(G_R) \times C^1(G_R)$, we obtain: + +$$ \pi'_R(\text{Ker } D\pi'(\mathfrak{p}) \cap T_p(\Sigma)) \neq \{0\}. $$ + +Since + +$$ T_p(\Sigma) = T_p(A) \cap K = (\pi'_R)^{-1}(T_{\pi(R; p)}(A_R)) \cap K, $$ + +the above equation implies: + +$$ \pi'_R(K \cap \text{Ker } D\pi'(\mathfrak{p})) \cap T_{\pi(R; p)}(A_R) \neq 0. $$ + +But, + +$$ \pi'_R(K \cap \text{Ker } D\pi'(\mathfrak{p})) = \pi'_R(K \cap \pi'^{-1}(\mathbf{i}_L, \mathbf{v}_C)) = \pi'_R(K(\mathbf{i}_L, \mathbf{v}_C)) = K_0|_{(\mathbf{i}_R, \mathbf{v}_R)}, $$ + +this shows that $K_0|_{(\mathbf{i}_R, \mathbf{v}_C)}$ and $A_R$ are not transverse at $(\mathbf{i}_R, \mathbf{v}_R)$. This proves the sufficiency of the theorem. + +Conversely, if $K_0|_{(\mathbf{i}_R, \mathbf{v}_R)}$ and $A_R$ are not transverse at $(\mathbf{i}_R, \mathbf{v}_R)$ with $\mathfrak{p}= (\mathbf{i}_{R'}, \mathbf{i}_{L'}, \mathbf{i}_{C'}, \mathbf{v}_{R'}, \mathbf{v}_{L'}, \mathbf{v}_{C'}) \subseteq \Sigma$, then + +$$ \pi'_R(K \cap \text{Ker } D\pi'(\mathfrak{p})) \cap T_{\pi(R; p)}(A_R) \neq 0. $$ + +Since $\pi'_R(K(i_L, v_C)$ is isomorphism, + +$$ (K \cap \text{Ker } D\pi'(\mathfrak{p})) \cap (\pi'_R K(i_L, v_C))^{-1} (T_{\pi(R; p)}(A_R)) \neq 0, $$ + +this means: + +$$ \text{Ker } D\pi'(\mathfrak{p}) \cap T_p(\Sigma) \neq \{0\}. $$ + +This proves the necessity of the theorem. + +**Remark.** In terms of B and Q, the space $K_0 \subseteq C_1(G_R) \times C^1(G_R)$ is given as follows. Let us decompose the matrices B and Q into the following forms: + +$$ B = \begin{bmatrix} R(L) & L & R(T) & C \\ 1 & 0 & A_{RT} & A_{RC} \\ 0 & 1 & A_{LT} & A_{LC} \\ 0 & 0 & A_{RT} & A_{LC} \end{bmatrix} R(L), $$ + +$$ Q = \begin{bmatrix} R(L) & L & R(T) & C \\ -A_{RT}^t & -A_{LT}^t & 1 & 0 \\ -A_{RC}^t & -A_{LC}^t & 0 & 1 \\ 0 & 0 & 1 & C \end{bmatrix} R(T). $$ + +Then, it is easily seen that + +$$ K_0 = \pi'_R(K(0, 0)) $$ + +$$ = \{(i_R, v_R) = (i_{R(L)}, i_{R(T)}, v_{R(L)}, v_{R(T)}) | i_{R(T)} = A_{RT}^t i_{R(L)}, v_{R(L)} = -A_{RT} v_{R(T)}, (i_{R(T)}, v_{R(T)}) \in C_1(G_{R(L)}) \times C^1(G_{R(T)})\}. $$ + +This means that the space $K_0$ is just Kirchhoff space of the resistive circuit obtained from the given one by open-circuitting all inductance branches and short-circuiting all capacitance branches. +---PAGE_BREAK--- + +**Examples 1. (Example 5 in [6].)** Consider a circuit of Fig. 1 consisting of one non-linear resistor with characteristic of Fig. 2, one capacitor and one inductor. By Remark, $K_0$ is Kirchhoff space of Fig. 3, i.e., $K_0$ is just v-axis in Fig. 2. Therefore at $p_i$ the solution must jump into $p'_i$. + +2. (Example 6 in [6], A regularization of the above example.) To regularize the above example, we add a parasitic element $C'$ in parallel to L as in Fig. 4. + +Fig. 1. + +Fig. 2. + +Fig. 3. + +Fig. 4. + +Fig. 5. + +Fig. 6. +---PAGE_BREAK--- + +Then, $K_0$ is Kirchhoff space of Fig. 5, i.e., $K_0$ is just *i*-axis. Therefore $K_0+(v_R, i_R)$ is always transverse to $\Lambda_R$, and hence the circuit of Fig. 4 has no singular point at all. + +Finally, we propose an engineering problem concerning the catastrophy theory. + +**Problem.** Is it possible to make a device (coupled resistors) with its characteristic “cusp type singularity”, as is shown in Fig. 6? + +Certainly, Esaki diode has a characteristic of “fold type singularity”. The “cusp type singularity” is the simplest singularity next to the “fold type singularity”. The cusp type device may be very usefull as was the case in Esaki diodes. + +**Addendum.** Professor H. Kawakami informed the author that the coupled resistors with “cusp type” characteristics ($y=-3bx+x^3$) could be constructed from operational amplifiers and nonlinear analog elements [7]. + +References + +[1] A. A. Andronov, A. A. Vitt and S. E. Khaikin: *Theory of oscillation*. Pergmamon Press. + +[2] S. Ichiraku: *On the transversality conditions in electrical circuits*. Yokohama Math. J. 25 (1977), 85-89. + +[3] E. Ihring: *The regularization of nonlinear electrical circuits*, Proc. of A.M.S., **47** (1975), 179-183. + +[4] T. Matsumoto: *On the dynamics of electrical networks*, J. Differential Equation, **21** (1976), 179-196. + +[5] R. Rohrer: *Circuit Theory*. McGraw-Hill, 1970. + +[6] S. Smale: *On the mathematical foundation of electrical circuit theory*, J. Differential Geometry, 7 (1972), 193-210. + +[7] H. Kawakami, Kunihiro Kobayashi and T. Matsumura, *A realization of voltage controlled nonlinear resistors*, Trans. IECE, (1977) 60-A No. 10, 990-991, (Japanese). + +Department of Mathematics +Yokohama City University +Yokohama, Japan \ No newline at end of file diff --git a/samples_new/texts_merged/7642017.md b/samples_new/texts_merged/7642017.md new file mode 100644 index 0000000000000000000000000000000000000000..3551ceee9fc6dddc1b098efac5464bd729379fc5 --- /dev/null +++ b/samples_new/texts_merged/7642017.md @@ -0,0 +1,313 @@ + +---PAGE_BREAK--- + +Fast and Accurate Texture Recognition with Multilayer +Convolution and Multifractal Analysis + +Hicham Badri, Hussein Yahia, Khalid Daoudi + +► To cite this version: + +Hicham Badri, Hussein Yahia, Khalid Daoudi. Fast and Accurate Texture Recognition with Multilayer Convolution and Multifractal Analysis. European Conference on Computer Vision, ECCV 2014, Sep 2014, Zürich, Switzerland. [hal-01064793](https://hal.inria.fr/hal-01064793) + +HAL Id: hal-01064793 + +https://hal.inria.fr/hal-01064793 + +Submitted on 17 Sep 2014 + +**HAL** is a multi-disciplinary open access +archive for the deposit and dissemination of sci- +entific research documents, whether they are pub- +lished or not. The documents may come from +teaching and research institutions in France or +abroad, or from public or private research centers. + +L'archive ouverte pluridisciplinaire **HAL**, est +destinée au dépôt et à la diffusion de documents +scientifiques de niveau recherche, publiés ou non, +émanant des établissements d'enseignement et de +recherche français ou étrangers, des laboratoires +publics ou privés. +---PAGE_BREAK--- + +# Fast and Accurate Texture Recognition with Multilayer Convolution and Multifractal Analysis + +Hicham Badri, Hussein Yahia, and Khalid Daoudi + +INRIA Bordeaux Sud-Ouest, 33405 Talence, France +{hicham.badri,hussein.yahia,khalid.daoudi}@inria.fr + +**Abstract.** A fast and accurate texture recognition system is presented. The new approach consists in extracting locally and globally invariant representations. The locally invariant representation is built on a multi-resolution convolutional network with a local pooling operator to improve robustness to local orientation and scale changes. This representation is mapped into a globally invariant descriptor using multifractal analysis. We propose a new multifractal descriptor that captures rich texture information and is mathematically invariant to various complex transformations. In addition, two more techniques are presented to further improve the robustness of our system. The first technique consists in combining the generative PCA classifier with multiclass SVMs. The second technique consists of two simple strategies to boost classification results by synthetically augmenting the training set. Experiments show that the proposed solution outperforms existing methods on three challenging public benchmark datasets, while being computationally efficient. + +## 1 Introduction + +Texture classification is one of the most challenging computer vision and pattern recognition problems. A powerful texture descriptor should be invariant to scale, illumination, occlusions, perspective/affine transformations and even non-rigid surface deformations, while being computationally efficient. Modeling textures via statistics of spatial local textons is probably the most popular approach to build a texture classification system [1,2,3,4,5,6,7]. Based on this Bag-of-Words architecture, these methods try to design a robust local descriptor. Distributions over these textons are then compared using a proper distance and a nearest neighbor or kernel SVMs classifier [8]. Another alternative to regular histograms consists in using multifractal analysis [9,10,11,12,13]. The VG-fractal method [9] statistically represents the textures with the full PDF of the local fractal dimensions or lengths, while the methods in [10,11,12,13] make use of the box-counting method to estimate the multifractal spectrum. Multifractal-based descriptors are theoretically globally invariant to bi-Lipschitz transforms that include perspective transforms and texture deformations. A different approach recently presented in [14] consists in building a powerful local descriptor by cascading wavelet scattering transformations of image patches and using a generative PCA classifier [15]. Unfortunately, while these methods achieve high accuracy on some standard benchmark datasets, little attention is given to the computational efficiency, which is crucial in a real-world system. +---PAGE_BREAK--- + +We present in this paper a new texture classification system which is both accurate and computationally efficient. The motivation behind the proposed work comes from the success of multifractal analysis [10,9,11,12,13]. Given an input texture, the image is filtered with a small filter bank for various filter orientations. A pooling operator is then applied to improve robustness to local orientation change. This process is repeated for different resolutions for a richer representation. This first step generates various low-pass and high-pass responses that form a *locally invariant* representation. The mapping towards the final descriptor is done via multifractal analysis. It is well known that the *multifractal spectrum* encodes rich texture information. The methods in [10, 11, 12, 13] use the box-counting method to estimate the multifractal spectrum. However, this method is unstable due to the limited resolution of real-world images. We present a new multifractal descriptor that is more stable and improves invariance to bi-Lipschitz transformations. This improvement is validated by extensive experiments on public benchmark datasets. The second part of our work concerns training strategies to improve classification rates. We propose to combine the generative PCA classifier [14,15] with kernel SVMs [8] for classification. We also introduce two strategies called "synthetic training" to artificially add more training data based on illumination and scale change. Results outperforming the state-of-the-art are obtained over challenging public datasets, with high computational efficiency. + +The paper is organized as follows: section 2 describes the proposed descriptor, section 3 presents the proposed training strategies, section 4 presents classification results conducted on 3 public datasets as well as a comparison with 9 state-of-the-art methods. + +## 2 Robust Invariant Texture Representation + +The main goal of a texture recognition system is to build an *invariant* representation, a mapping which reduces the large intra-class variability. This is a very challenging problem because the invariance must include various complex transformations such as translation, rotation, occlusion, illumination change, non-rigid deformations, perspective view, among others. As a result, two similar textures with different transformation parameters must have similar descriptors. An example is given in Figure 1. Not only the system should be accurate, but it should be also computationally efficient. Otherwise, its use in a real-world system would be limited due to the long processing time to extract the descriptor. Our goal in this paper is to build both an *accurate* and *fast* texture recognition system. Our Matlab non-optimized implementation takes around 0.7 second to extract the descriptor on a medium size image (480 × 640) using a modern laptop. The processing time can be further decreased by reducing the resolution of the image without sacrificing much the accuracy. This is due to the strong robustness of our descriptor to scale changes via accurate multifractal statistics that encode rich multi-scale texture information. We explain in this section how we build the proposed descriptor, the motivation behind the approach and the connection with previous work. + +### 2.1 Overview of the Proposed Approach + +The proposed descriptor is based on two main steps : +---PAGE_BREAK--- + +Fig. 1: Intra-class variability demonstration. The three textures 1, 2 and 3 exhibit strong changes in scale and orientation as well as non-rigid deformations. As can be seen, the proposed descriptor is nearly invariant to these transformations (see section 2). + +1. Building a *locally* invariant representation : using multiple high-pass filters, we generate different sparse representations for different filter orientations. A pooling operator is applied on the orientation to increase the local invariance to orientation change. The process is repeated for multiple image resolutions for a richer representation. + +2. Building a *globally* invariant representation : the first step generates various images that encode different texture information. We also include the multi-resolution versions of the input to provide low-pass information. We need a mapping that transforms this set of images into a stable, fixed-size descriptor. We use multi-fractal analysis to statistically describe each one of these images. We present a new method that extracts rich information directly from local singularity exponents. The local exponents encode rich multi-scale texture information. Their log-normalized distribution represents a stable mapping which is invariant to complex bi-Lipschitz transforms. As a result, the proposed multifractal descriptor is proven mathematically to be robust to strong environmental changes. + +## 2.2 Locally Invariant Representation + +A locally invariant representation aims at increasing the similarity of local statistics between textures of the same class. To build this representation, we construct a simple convolutional network where the input image is convolved with a filter bank for various orientations, and then pooled to reduce local orientation change. The multilayer extension consists in repeating the same process for various image resolutions on the low-pass output of the previous resolution, which offers a richer representation. +---PAGE_BREAK--- + +Given an input texture *I*, the image is first low-pass filtered with a filter $\psi_l$ to reduce small image domain perturbations and produce an image $J_{1,0}$. This image is then filtered with multiple zero-mean high-pass filters $\psi_{k,\theta}$, where *k* denotes the filter number and $\theta$ its orientation. High-pass responses encode higher-order statistics that are not present in the low-pass response $J_{1,0}$. A more stable approach consists in applying the modulus on the high-pass responses, which imposes symmetric statistics and improves invariance of the local statistics. Applying multiple filtering with multiple different filters naturally increases the amount of texture information that are going to be extracted further via multifractal analysis. In order to increase the local invariance to orientation, we apply a pooling operator $\phi_\theta: \mathcal{R}^{i \times j \times n} \rightarrow \mathcal{R}^{i \times j}$ on the oriented outputs for each filter: + +$$ J_{1,k} = \phi_{\theta}(|J_{1,0} \star \psi_{k,\theta}|, \theta = \theta_1, \dots, \theta_n), \quad k = 1, \dots, K, \qquad (1) $$ + +where *n* is the number of orientations and *i* × *j* is the size of the low-pass image. As a result, we obtain 1 low-pass response and *K* high-pass responses, each image is encoding different statistics. For a richer representation, we repeat the same operation for different resolutions $s = 2^0, \dots, -L$, where *s* = 1 is the finest resolution and $s = 2^{-L}$ is the coarsest resolution. The image generation process is then generalized as follows: + +$$ J_{s,k} = \begin{cases} I \star \psi_l & k=0, s=1 \\ \downarrow (J_{2s,0} \star \psi_l) & k=0, s \neq 1 \\ \phi_\theta(|J_{s,0} \star \psi_{k,\theta}|, \theta=\theta_1, \dots, \theta_n) & k=1, \dots, K, \end{cases} \qquad (2) $$ + +where $\downarrow$ denotes the downsampling operator. We found that calculating statistics on multiple resolutions instead of a single one increases significantly the robustness of the descriptor. This can be expected because two textures may seem "more similar" at a lower resolution. As a result, the intra-class variability decreases as the resolution decreases, but keeping higher resolution images is important to ensure extra-class decorrelation. + +## Dimensionality Reduction with Pooling + +Using multiple filters $\psi_{k,\theta}$ increases dramatically the size of the image set. Knowing that each image $J_{s,k}$ will be used to extract statistics using multifractal analysis, this will result in a very large descriptor. One resulting issue is the high dimensionality of the training set. Another one is the processing time as the statistics should be applied on each image. We propose to merge different high-pass responses $J_{s,k}$ together to reduce the number of images. A straightforward approach would be to gather various images $\{J_{s,k}, k=t, \dots, u\}$ and then apply a pooling operator $\phi_r$ that is going to merge each image subset into one single image $J_{s,k_{t,...,u}}$: + +$$ J_{s,k_{t,...,u}}} = \phi_r(J_{s,k}, k=t, \dots, u). \qquad (3) $$ + +As a result, the number of high-pass responses will be decreased; this leads to a reduced size descriptor. The pooling operator $\phi_r$ can be either the mean or the min/max functions. We take $\phi_r$ as a maximum function in this paper. An example is given in Figure 2 for one resolution $s=0$ using 6 high-pass filters and one low-pass filter. The +---PAGE_BREAK--- + +number of images is reduced from 7 to 3. For 5 resolutions ($s = 2^0, \dots, -4$), the total number of images goes from 35 to 15, which is an important reduction. + +Fig. 2: Image generation example applied on the texture input $I$ for one resolution using 6 high-pass filters. The images $J_{0,1,\dots,6}$ are a result of the orientation pooling (eq. 2). The 6 images are reduced to 2 images using a pooling operator $\phi_r$ on similar responses to reduce the dimensionality. The same process is repeated for multiple resolutions. + +## 2.3 Globally Invariant Representation + +Once the set of low-pass and high-pass images is generated, we need to extract global statistics, a mapping into a fixed-size descriptor, which is *globally invariant* to the complex physical transformations. We propose to use a new multifractal approach to statistically describe textures suffering from strong environmental changes. To understand the difference between the proposed method and the previous work, we first present the standard fractal and multifractal analysis framework used by the previous methods, we then introduce the proposed approach. + +**Multifractal Analysis** In a nutshell, a fractal object $E$ is self-similar across scales. One characteristic of its irregularity is the so-called *box fractal dimension*. By measuring a fractal object on multiple scales $r$, the box fractal dimension is defined as a power-law relashionship between the scale $r$ and the smallest number of sets of length $r$ covering $E$ [16]: + +$$ \dim(E) = \lim_{r \to 0} \frac{\log N(r, E)}{-\log r}, \quad (4) $$ + +Using squared boxes of size $r$, this dimension can be estimated numerically, known as the *box-counting method*. Multifractal analysis is an extension of this important notion. A multifractal object $F$ is composed of many fractal components $F_{1,...,f}$. In this +---PAGE_BREAK--- + +case, a single fractal dimension is not sufficient to describe this object. The *multifractal spectrum* is the collection of all the associated fractal dimensions that describe the multifractal object. + +It is easy to show mathematically that the fractal dimension is invariant to bi-Lipschitz transformations [17], which includes various transformations such as non-rigid transformations, view-point change, translation, rotation, etc.. As a result, the multifractal spectrum is also invariant to these transformations. This makes the multifractal spectrum an attractive tool to globally describe textures. However, the box-counting method gives a rather crude estimation of the real fractal dimension. The fractal dimension is estimated for each fractal set using a log-log regression. As the resolution $r$ is supposed to be very small ($r \to 0$), using small-sized boxes on a relatively low-resolution image results in a biased estimation due to the relatively low-resolution of real-world images [18]. It has been used as the core of various recent multifractal texture descriptors [10, 11, 12, 13] that use the same box-counting method to build the final descriptor. We present a different method to statistically describe textures using multifractal analysis. Contrary to previous methods, we use a new measure which is based on the distribution of local singularity exponents. It can be shown in fact that this measure is related to the true multifractal spectrum, and its precision is proven by the high-accuracy of the proposed descriptor. Moreover, this approach is computationally efficient, which permits to achieve high accuracy at reduced processing time. + +**Proposed Multifractal Descriptor** The proposed method first estimates the local singularity exponents $h(x)$ on each pixel $x$, and then applies the empirical histogram followed by log operator to extract the global statistics $\phi_h = \log(\rho_h + \epsilon)$. This operation is performed on all the resulting images of the first step, which results in multiple histograms $\phi_{h_i}$. The concatenation of all these histograms forms the final descriptor. + +Let $J$ be an image, and $\mu_\psi(B(x,r)) = \int_{B(x,r)} (J \star \psi_r)(y)dy$ a positive measure, where $\psi_r$ is an appropriate wavelet at scale $r$ (Gaussian in our case) and $B(x,r)$ a closed disc of radius $r > 0$ centered at $x$. Multifractal analysis states that the wavelet projections scale as power laws in $r$ [19,20,21]. We use a microcanonical evaluation [20] which consists in assessing an exponent $h(x)$ for each pixel $x$: + +$$ \mu_{\psi}(B(x, r)) \approx \alpha(x)r^{h(x)}, \quad r \to 0. \qquad (5) $$ + +The validity of equation (5) has been tested on a large dataset [21], which proves that natural images exhibit a strong multifractal behavior. Introducing the log, the formula is expressed as a linear fit: + +$$ \log(\mu_{\psi}(B(x, r))) \approx \log(\alpha(x)) + h(x)\log(r), \quad r \to 0. \qquad (6) $$ + +Rewriting the equation in the matrix form permits to calculate all the exponents at once by solving the following linear system: + +$$ \underbrace{\begin{bmatrix} 1 & \log(r_1) \\ \vdots \\ 1 & \log(r_l) \end{bmatrix}}_{A} \underbrace{\begin{bmatrix} \log(\alpha(x_1)) & \cdots & \log(\alpha(x_N)) \\ h(x_1) & \cdots & h(x_N) \end{bmatrix}}_{\eta} = \underbrace{\begin{bmatrix} \log(\mu_{\psi}(B(x_1, r_1))) & \cdots & \log(\mu_{\psi}(B(x_N, r_1))) \\ \vdots & & \vdots \\ \log(\mu_{\psi}(B(x_1, r_l))) & \cdots & \log(\mu_{\psi}(B(x_N, r_l))) \end{bmatrix}}_{b}, \quad (7) $$ +---PAGE_BREAK--- + +$$ \underset{\eta}{\operatorname{argmin}} ||A\eta - b||_2^2, h(x_i) = \eta(2, i), \quad (8) $$ + +where *N* is the number of pixels of the image *J*, *l* is the number of scales used in the log-log regression. This matrix formulation is computationally efficient and plays an important role in the speed of the proposed method. Given the local exponents *h*(*x*), which is an image of the same size of *J* that describes the local irregularities at each pixel, we need to extract now a fixed-size measure that globally describes the statistics of *h*(*x*). Using the box-counting method, this would require extracting all the fractal fractal sets $F_h = \{x | h(x) \approx h\}$, and then calculating the box-counting dimension for each set $F_h$. As discussed before, this approach leads to a crude estimation of the true multifractal spectrum due to the actual low-resolution of real-world images. Moreover, a log-log regression should be performed on each fractal set. Instead, we propose to use the empirical histogram $\rho_h$ followed by a log operator: + +$$ \varphi_h = \log(\rho_h + \epsilon), \quad (9) $$ + +where $\epsilon \ge 1$ is set to provide stability. The distribution of the local exponents is an invariant representation which encodes the multi-scale properties of the texture. The log acts as a normalization operator that nearly linearizes histogram scaling and makes the descriptor more robust to small perturbations. This way, we have access to reliable statistics¹. This log-histogram is calculated on each image generated in the first step, which results in a set of histograms $\varphi_{h_1,...,h_M}$, where *M* is the total number of generated images. The final descriptor $\varphi$ is constructed by concatenating $(\uplus)$ all the generated histograms: + +$$ \varphi = \bigoplus_m^{M} \varphi_{h_m}; \quad (10) $$ + +A descriptor example is given in Figure 3. This descriptor $\varphi$ is the result of the concatenation of 14 log exponents histograms calculated on the images generated with the first step of the method presented in section 2.2 and further explained in Figure 2. Three images are generated for each scale *s*; a low-pass response is presented in red, and two high-pass responses are presented in black and gray in the figure ². + +## 2.4 Analysis + +The basic multifractal framework consists in generating multiple images and then extracting statistics using multifractal analysis. Multifractal descriptors are mathematically invariant to bi-Lipschitz transforms, which even includes non-rigid transformation and view-point change. The proposed method follows the same strategy, but is substantially different from the previous methods. The differences lie in both the image generation step and the statistical description. For instance, the WMFS method [13] + +¹ A mathematical relationship between the log exponents histogram and the multifractal spectrum is presented in the supplementary material. + +² A histogram was discarded for $s = 2^{-4}$ in the second high response (in gray) due to the large size of the filter which is larger than the actual size of the input image at resolution $s = 2^{-4}$. +---PAGE_BREAK--- + +Fig. 3: A descriptor example using a low-pass response and two high-pass responses for 5 resolutions $s = 2^0, \dots, -4$. The exponents log-histogram is calculated for each response and for multiple image resolutions $s$. + +generates multiple images for multiple orientations, each oriented image is then analyzed using Daubechies discrete wavelet transform as well as using the wavelet leaders [22]. The multifractal spectrum (MFS) is then estimated for each image, for a given orientation using the box-counting method. Each MFS is then concatenated for a given orientation and the final descriptor is defined as the mean of all the descriptors over the orientation. Contrary to this method, we use different high-pass filters instead of one single analyzing wavelet, which permits to extract different statistics. Generating multiple descriptors for multiple orientations is computationally expensive. In contrast, we generate only one descriptor. To ensure local robustness to orientation, we apply a pooling operator on the *filtered responses*. This approach is much more computationally efficient. Finally, the core of our method is the new multifractal descriptor which permits to extract accurate statistics, contrary to the popular box-counting method as explained in the previous section. The proposed method takes about 0.7 second to extract the whole descriptor on an image of size 480 × 640, compared to 37 seconds as reported in the state-of-the-art multifractal method [13]. Experiments show that the proposed descriptor permits also to achieve higher accuracy, especially in large-scale situations when the extra-class decorrelation is a challenging issue. + +## 2.5 Pre and Post Processing + +Pre-processing and post-processing can improve the robustness of a texture recognition system. For instance, the method in [12] performs a scale normalization step on each input texture using blob detection. This step first estimates the scale of the texture and then a normalization is applied, which aims at increasing the robustness to scale change. Other texture classification methods such as [9] use Weber's law normalization to improve robustness to illumination. We do not use any scale normalization step such as [12,13], we rather use sometimes histogram equalization to improve robustness to illumination change. We also use a post-processing on features vector $\phi$ using wavelet domain soft-thresholding [?]. This step aims at increasing the intra-class correlation by +---PAGE_BREAK--- + +reducing small histogram perturbations (for more details, please refer to the supplementary material). + +# 3 Classification and Training Strategies + +The second part of our work concerns the training aspect of the texture recognition problem. The globally invariant representation offers a theoretically stable invariant representation via accurate multifractal statistics. However, there are other small transformations and perturbations that may occur in real-world images and this is where a good training strategy will help us to take advantage of the proposed descriptor in practice. We work on two ideas : + +1. The choice of the classifier can improve recognition rates: we introduce a simple combination between the Generative PCA classifier [14] and SVMs [8]. + +2. The lack of data is an issue, how to get more data? : Given an input training texture image, we synthetically generate more images by changing its illumination and scale. We call this strategy "synthetic training". + +Experiments on challenging public benchmark datasets, including a large-scale dataset with 250 classes, validates the robustness of the proposed solution. + +## 3.1 Classification + +**Support Vector Machines** SVMs [8] are widely used in texture classification [10,12,13,17,6]. Commonly used kernels are mainly RBF Gaussian kernel, polynomials and $\chi^2$ kernel. Extension to multiclass can be done via strategies such as one-vs-one and one-vs-all. In this paper, we use the one-vs-all strategy with an RBF-kernel. It consists in building a binary classifier for each class as follows: for each class, a positive label is assigned to the corresponding instances and a negative label is affected to all the remaining instances. The winning class $c_{svm}$ can be chosen based on probability estimates [23] or a simple score maximization: + +$$ c_{svm} = \underset{1 \le c \le N_c}{\operatorname{argmax}} \{f_{svm}(x,c)\} , \quad f_{svm}(x,c) = \sum_{i=1}^{M_c} \alpha_i^c y_i^c K(x_i^c, x) + b_c, \quad (11) $$ + +where $\alpha_i^c$ are the optimal Lagrange multipliers of the classifier representing the class $c$, $x_i^c$ are the support vectors of the class $c$, $y_i^c$ are the corresponding $\pm 1$ labels, $N_c$ is the number of classes and $x$ is the instance to classify. + +**Generative PCA Classifier** The generative PCA (GPCA) classifier is a simple PCA-based classifier recently used in [15,14]. Given a test descriptor $x$, GPCA finds the closest class centroid $\mathbb{E}(\{x_c\})$ to $x$, after ignoring the first $D$ principal variability directions. Let $V_c$ be the linear space generated by the $D$ eigenvectors of the covariance matrix of largest eigenvalues, and $V_c^\perp$ its orthogonal complement. The generative PCA classifier uses the projection distance associated to $P_{V_c^\perp}$: + +$$ c_{pca} = \underset{1 \le c \le N_c}{\operatorname{argmin}} \| P_{V_c^\perp} (x - \mathbb{E}(\{x_c\})) \|^2. \quad (12) $$ +---PAGE_BREAK--- + +Classification consists in choosing the class $c_{pca}$ with the minimum projection distance. + +**GPCA-SVM Classifier** We propose to combine GPCA and SVMs in one single classifier. The idea behind this combination comes from the observation that SVMs and GPCA often fail on different instances. As a result, a well-established combination of these classifiers should theoretically lead to improved performance. We propose a combination based on the distance between the score separation of each classifier output + +$$ c_{final} = \begin{cases} c_{svm} & \text{if } f_{svm}(x, c_{svm}) - f_{svm}(x, c_{pca}) \geq th_{svm} \\ c_{pca} & \text{otherwise,} \end{cases} \quad (13) $$ + +where $th_{svm}$ is a threshold parameter. The score separation gives an idea of SVMs' accuracy to classify a given instance. Another similar approach would be using probability estimates [23] instead of the score. If the measure $f_{svm}(x, c_{svm}) - f_{svm}(x, c_{pca})$ is relatively important, this means that SVMs are quite "confident" about the result. Otherwise, the classifier selects the GPCA result. Determining the best threshold $th_{svm}$ for each instance is an open problem. In this paper, we rather fix a threshold value for each experiment. We generally select a small threshold for small training sets and larger thresholds for larger sets. Even if this strategy is not optimal, experiments show that the combination improves the classification rates as expected. + +## 3.2 Synthetic Training + +One important problem in training is coping with the low amount of examples. We propose a simple strategy to artificially add more data to the training set by changing illumination and scale of each instance of the training set. While this idea seems simple, it can have a dramatic impact on the performance as we will see in the next section. + +**Multi-Illumination Training** Given an input image *I*, multi-illumination training consists in generating other images of the same content of *I* but with different illumination. There are two illumination cases; the first one consists in *uniform* changing by image scaling of the form *a*I, where *a* is a given scalar. The second case consists in *nonuniform* changing using histogram matching with a set of histograms. The histograms can come from external images, or even from the training set itself (for example by transforming or combining a set of histograms). + +**Multi-Scale Training** Given an input image *I*, multi-scale training consists simply in generating other images of the same size as *I* by zooming-in and out. In this paper, we use around 4 generated images, 2 by zooming-in and 2 others by zooming-out. + +# 4 Texture Classification Experiments + +We present in this section texture classification results conducted on standard public datasets **UIUC** [24,1], **UMD** [25] and **ALOT** [26,27], as well as a comparison with 9 state-of-the-art methods. +---PAGE_BREAK--- + +**Datasets Description** The UIUC dataset [24,1] is one of the most challenging texture datasets presented so far. It is composed of 25 classes, each class contains 40 grayscale images of size 480 × 640 with strong scale, rotation and viewpoint changes in uncontrolled illumination environment. Some images exhibit also strong non-rigid deformations. Some samples are presented in Figure 4. The UMD dataset [25] is similar to UIUC with higher resolution images (1280 × 960) but exhibits less non-rigid deformations and stronger illumination changes compared to UIUC. To evaluate the proposed method on a large-scale dataset, we choose the ALOT dataset [26,27]. It consists of 250 classes, 100 samples each. We use the same setup as the previous multifractal methods [13]: grayscale version with half resolution (768 × 512). The ALOT dataset is very challenging as it represents a significantly larger number of classes (250) compared to UIUC and UMD (25) and very strong illumination change (8 levels of illumination). The viewpoint change is however less dramatic compared to UIUC and UMD. + +Fig. 4: Texture samples from the **UIUC** dataset [24,1]. Each row represents images from the same class with strong environmental changes. + +**Implementation details** In order to build a fast texture classification system, we use only two high-pass filtering responses, which results in 3 histograms per image resolution³. The number of the image scales is fixed to 5. The filter bank consists in high-pass wavelet filters (Daubechies, Symlets and Gabor). A more robust descriptor can be built by increasing the number of filters and orientations. Filtering can be parallelized for faster processing. While augmenting the number of filters slightly improves classification results, the minimalist setup presented above, coupled with the training strategies introduced in this paper, permits to outperform existing techniques while offering in addition computational efficiency. + +**Evaluation** + +We evaluate the proposed system and compare it with state-of-the-art methods for 50 random splits between training and testing. The evaluation consists in three steps: + +³ Except for **ALOT** dataset, we use 3 high-pass responses for a more robust representation. +---PAGE_BREAK--- + +1. log-histogram vs. box-counting: We evaluate the precision of our log-histogram method and compare it with the box-counting method used in previous methods. + +2. Learning efficiency: We compare the proposed GPCA-SVM combination with single GPCA and SVM results and see how the proposed synthetic training strategy improves classification rates. + +3. We compare our main results with **9** state-of-the-art results. + +**log-histogram vs. box-counting** In this experiment, we replace the log-histogram step of our approach with the box-counting method widely used in the previous multifractal methods to see if the proposed log-histogram leads to a more accurate bi-Lipschitz invariance. The results are presented in Figure 5. As can be seen, the log-histogram approach leads to higher performance, especially when more data is available. This clearly shows that indeed, the log-histogram leads to a better bi-Lipschitz invariance, as theoretically discussed before. The log-histogram is a simple operation that permits our system to achieve high computational efficiency. + +Fig. 5: Comparison between the box-counting method and the proposed log-histogram approach for various dataset training sizes (5, 10 and 20). The proposed approach leads to a more accurate descriptor. + +**Learning Efficiency** In this experiment, we first compare the proposed GPCA-SVM combination with single GPCA and SVM classifiers using the proposed descriptor. Each dataset is presented in the form $D_{(y)}^x$ where x is the name of the dataset and y is the training size in number of images. The best results are in bold. As can be seen in Table 1, the GPCA-SVM does indeed improve classification rates. We expect to get even better results with a better strategy to set the threshold parameters $th_{svm}$ as in the proposed experiments, the threshold is fixed for all the instances. Now we compare the results with and without the proposed synthetic training strategy. As can be seen, synthetic training leads to a dramatic improvement. This is a very interesting approach as it increases only the training time. The system can achieve higher recognition accuracy for almost the same computational efficiency. For the **UMD** and **ALOT** datasets, we use uniform illumination change with the multiplicative parameter $a$ in the range [0.9, 0.95, 1.05, 1.1]. For the **UIUC** dataset, we use the nonuniform illumination change +---PAGE_BREAK--- + +with two histograms. For the multi-scale training, we use only four generated images (two by zooming-in and two other by zooming-out), which increases the training set 9 times in the **UMD** and **UIUC** datasets (no mutli-scale training is used for the **ALOT** dataset). + +
D(5)UIUCD(10)UIUCD(20)UIUCD(5)UMDD(10)UMDD(20)UMDD(10)ALOTD(30)ALOTD(50)ALOT
ProposedGPCA91.15%97.12%99.07%95.07%97.85%99.40%89.30%98.03%99.27%
SVM91.23%96.30%98.47%94.43%97.44%99.25%88.96%98.16%99.14%
GPCA-SVM92.58%97.17%99.10%95.23%98.04%99.44%90.67%98.45%99.34%
+ Synthetic TrainGPCA95.84%98.77%99.67%98.02%99.13%99.62%91.54%98.81%99.59%
SVM95.40%98.43%99.46%97.75%99.06%99.72%92.23%98.80%99.51%
GPCA-SVM96.13%98.93%99.78%98.20%99.24%99.79%92.82%99.03%99.64%
+ +Table 1: Classification rates comparison using GPCA-SVM and synthetic training. + +**Discussions** We compare the proposed method MCMA (Multilayer Convolution - Multifractal Analysis) with 9 state-of-the-art methods for 50 random splits between training and testing, for different training sizes. Results are presented in Table 2. The best results are in bold ⁴. As can be seen, the proposed method outperforms the published results on the 3 datasets. Compared to the leading method [14], our system seems to better handle viewpoint change and non-rigid deformations. This is clearly shown in the results on the **UIUC** dataset that exhibits strong enviromental changes. This result can be expected as the scattering method builds invariants on translation, rotation and scale changes, which does not include viewpoint change and non-rigid deformations. Contrary to this, using accurate multifractal statistics, our solution produces descriptors that are invariant to these complex transformations. The proposed system maintains a high performance on the **UMD** dataset. It is worth noting that on this dataset, the images are of high resolution (1280 × 960), which gives an advantage over the **UIUC** dataset. However, we did not use the original resolution, we rather rescale the images to half-size for faster processing. The high accuracy shows that the proposed multifractal method is able to extract robust invariant statistics even on low-resolution images. + +On the large-scale dataset **ALOT**, the proposed method maintains high performance. + +Recall that this dataset contains 250 classes with 100 samples each. This is a very challenging dataset that evaluates the extra-class decorrelation of the produced descriptors. + +A robust descriptor should increase the intra-class correlation, but should also decrease the extra-class correlation and this has been evaluated on a large-scale data set which contains as many different classes as possible. The results on the **ALOT** dataset clearly show a significant performance drop of the leading multifractal method WMFS. The proposed solution in fact outperforms the WMFS method even without synthetic training as can be seen in Table 1. This proves that the proposed descriptor is able to extract a robust invariant representation. + +⁴ Detailed results with standard deviation can be found in the supplementary material. +---PAGE_BREAK--- + +
DUIUC(5)DUIUC(10)DUIUC(20)DUMD(5)DUMD(10)DUMD(20)DALOT(10)DALOT(30)DALOT(50)
MFS [10]--92.74%--93.93%71.35%82.57%85.64%
OTF-MFS [11]--97.40%--98.49%81.04%93.45%95.60%
WMFS [13]93.40%97.00%97.62%93.40%97.00%98.68%82.95%93.57%96.94%
VG-Fractal [9]85.35%91.64%95.40%--96.36%---
Varma [28]--98.76%------
Lazebnik [1]91.12%94.42%97.02%90.71%94.54%96.95%---
BIF [5]--98.80%------
SRP [7]--98.56%--99.30%---
Scattering [14]93.30%97.80%99.40%96.60%98.90%99.70%---
MCMA96.13%98.93%99.78%98.20%99.24%99.79%92.82%99.03%99.64%
+ +Table 2: Classification rates on the UIUC, UMD and ALOT datasets. + +# 5 Conclusion + +This paper presents a fast and accurate texture classification system. The proposed solution builds a locally invariant representation using a multilayer convolution architecture that performs convolutions with a filter bank, applies a pooling operator to increase the local invariance and repeats the process for various image resolutions. The resulting images are mapped into a stable descriptor via multifractal analysis. We present a new multifractal descriptor that extracts rich texture information from the local singularity exponents. The descriptor is mathematically validated to be invariant to bi-Lipschitz transformations, which includes complex environmental changes. The second part of paper tackles the training part of the recognition system. We propose the GPCA-SVM classifier that combines the generative PCA classifier with the popular kernel SVMs to achieve higher accuracy. In addition, a simple and efficient "synthetic training" strategy is proposed that consists in synthetically generating more training data by changing illumination and scale of the training instances. Results outperforming the state-of-the-art are obtained and compared with 9 recent methods on 3 challenging public benchmark datasets, while ensuring high computational efficiency. + +# Acknowledgements + +Hicham Badri's PhD is funded by an INRIA (Direction of Research) CORDI-S grant. He is making a PhD in co-supervision with INRIA and Mohammed V-Agdal University - LRIT, Associated Unit to CNRST (URAC 29). + +# References + +1. Lazebnik, S., Schmid, C., Ponce, J.: A sparse texture representation using local affine regions. PAMI **27** (2005) 1265–1278 + +2. Zhang, J., Marszalek, M., Lazebnik, S., Schmid, C.: Local features and kernels for classification of texture and object categories: A comprehensive study. Int. J. Comput. Vision **73**(2) (June 2007) 213–238 +---PAGE_BREAK--- + +3. Varma, M., Zisserman, A.: A statistical approach to material classification using image patch exemplars. PAMI 31(11) (November 2009) 2032–2047 + +4. Ojala, T., Pietikäinen, M., Mäenpää, T.: Multiresolution gray-scale and rotation invariant texture classification with local binary patterns. PAMI 24(7) (July 2002) 971–987 + +5. Crosier, M., Griffin, L.D.: Texture classification with a dictionary of basic image features. In: CVPR, IEEE Computer Society (2008) + +6. Liu, L., Fieguth, P.W.: Texture classification from random features. PAMI 34(3) (2012) 574–586 + +7. Liu, L., Fieguth, P.W., Kuang, G., Zha, H.: Sorted random projections for robust texture classification. In: ICCV. (2011) 391–398 + +8. Scholkopf, B., Smola, A.J.: Learning with Kernels: Support Vector Machines, Regularization, Optimization, and Beyond. MIT Press, Cambridge, MA, USA (2001) + +9. Varma, M., Garg, R.: Locally invariant fractal features for statistical texture classification. In: CVPR, Rio de Janeiro, Brazil. (October 2007) + +10. Xu, Y., Ji, H., Fermuller, C.: A projective invariant for textures. 2006 CVPR 2 (2006) 1932–1939 + +11. Xu, Y., Huang, S.B., Ji, H., Fermuller, C.: Combining powerful local and global statistics for texture description. In: CVPR, IEEE (2009) 573–580 + +12. Xu, Y., Yang, X., Ling, H., Ji, H.: A new texture descriptor using multifractal analysis in multi-orientation wavelet pyramid. In: CVPR. (2010) 161–168 + +13. Ji, H., Yang, X., Ling, H., Xu, Y.: Wavelet domain multifractal analysis for static and dynamic texture classification. IEEE Transactions on Image Processing 22(1) (2013) 286–299 + +14. Sifre, L., Mallat, S.: Rotation, scaling and deformation invariant scattering for texture discrimination. In: CVPR. (2013) + +15. Bruna, J., Mallat, S.: Invariant scattering convolution networks. PAMI 35(8) (August 2013) 1872–1886 + +16. Falconer, K.: Techniques in Fractal Geometry. Wiley (1997) + +17. Xu, Y., Ji, H., Fermüller, C.: Viewpoint invariant texture description using fractal analysis. Int. J. Comput. Vision 83(1) (June 2009) 85–100 + +18. Arneodo, A., Bacry, E., Muzy, J.F.: The thermodynamics of fractals revisited with wavelets. Physica A: Statistical and Theoretical Physics 213(1-2) (January 1995) 232–275 + +19. Turiel, A., del Pozo, A.: Reconstructing images from their most singular fractal manifold. IEEE Trans. Img. Proc. 11(4) (April 2002) 345–350 + +20. Yahia, H., Turiel, A., Perez-Vicente, C.: Microcanonical multifractal formalism: a geometrical approach to multifractal systems. Part I: singularity analysis. Journal of Physics A: Math. Theor (41) (2008) + +21. Turiel, A., Parga, N.: The multifractal structure of contrast changes in natural images: From sharp edges to textures. Neural Computation 12(4) (2000) 763–793 + +22. Wendt, H., Roux, S.G., Jaffard, S., Abry, P.: Wavelet leaders and bootstrap for multifractal analysis of images. Signal Process. 89(6) (June 2009) 1100–1114 + +23. Chang, C.C., Lin, C.J.: LIBSVM: A library for support vector machines. ACM Transactions on Intelligent Systems and Technology 2 (2011) 27:1–27:27 + +24. : UIUC: http://www-cvr.ai.uiuc.edu/once_grp/data/. + +25. : UMD: http://www.cfar.umd.edu/~fer/website-texture/texture.htm. + +26. Burghouts, G.J., Geusebroek, J.M.: Material-specific adaptation of color invariant features. Pattern Recognition Letters 30 (2009) 306–313 + +27. : ALOT: http://staff.science.uva.nl/~aloi/public_alot/. + +28. Varma, M.: Learning the discriminative powerinvariance trade-off. In: In ICCV. (2007) \ No newline at end of file diff --git a/samples_new/texts_merged/904681.md b/samples_new/texts_merged/904681.md new file mode 100644 index 0000000000000000000000000000000000000000..3c256f3f5dd420a3f1d523dfbc1aceaebe469d19 --- /dev/null +++ b/samples_new/texts_merged/904681.md @@ -0,0 +1,228 @@ + +---PAGE_BREAK--- + +$$B_J(5840)$$ + +$$I(J^P) = \frac{1}{2}(??)$$ + +I, J, P need confirmation. + +OMITTED FROM SUMMARY TABLE + +Quantum numbers shown are quark-model predictions. + +## $B_J(5840)^+$ MASS + +OUR FIT uses $m_{B0}$ and $m_{B_J(5840)^+} - m_{B0}$ to determine $m_{B_J(5840)^+}$. + +VALUE (MeV) + +DOCUMENT ID + +5851 ± 19 OUR FIT + +$m_{B_J(5840)^+} - m_{B0}$ + +VALUE (MeV) + +EVTS + +DOCUMENT ID + +TECN + +COMMENT + +571 ± 19 OUR FIT + +571 ± 13 ± 14 + +7k + +$^1$AAIJ + +15AB LHCB pp at 7, 8 TeV + +• • • We do not use the following data for averages, fits, limits, etc. • • • + +595 ± 26 ± 14 + +7k + +$^2$AAIJ + +15AB LHCB pp at 7, 8 TeV + +$^1$AAIJ 15AB reports [$m_{B_j^+} - m_{B_0}^0} - m_{\pi^+}$] = [431 ± 13 ± 14 MeV which we adjust by the $\pi^+$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = (-1)^J$ and uses two relativistic Breit-Wigner functions in the fit for mass difference.] + +$^2$AAIJ 15AB reports [$m_{B_j^+} - m_{B_0}^0} - m_{\pi^+}$] = [455 ± 26 ± 14 MeV which we adjust by the $\pi^+$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = (-1)^J$ and uses three relativistic Breit-Wigner functions in the fit for mass difference.] + +$m_{B_J(5840)^+} - m_{B^*0}$ + +VALUE (MeV) + +EVTS + +DOCUMENT ID + +TECN + +COMMENT + +• • • We do not use the following data for averages, fits, limits, etc. • • • + +565 ± 15 ± 14 + +7k + +$^1$AAIJ + +15AB LHCB pp at 7, 8 TeV + +$^1$AAIJ 15AB reports [$m_{B_j^{+}} - m_{B_0^{0}} - (m_{B^{*+}} - m_{B^{+}}) - m_{\pi^{+}}$] = [425 ± 15 ± 14 MeV which we adjust by the $\pi^+$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = -(-1)^J$, $(m_{B^{*0}} - m_{B_0^{0}}) = (m_{B^{*+}} - m_{B^{+}})$ = [45.01 ± 0.30 ± 0.23 MeV, and uses three relativistic Breit-Wigner functions in the fit for mass difference.] + +## $B_J(5840)^0$ MASS + +OUR FIT uses $m_{B^+}$ and $m_{B_J(5840)^0} - m_{B^+}$ to determine $m_{B_J(5840)^0}$. + +VALUE (MeV) + +DOCUMENT ID + +5863 ± 9 OUR FIT + +$m_{B_J(5840)^0} - m_{B^+}$ + +VALUE (MeV) + +EVTS + +DOCUMENT ID + +TECN + +COMMENT + +584 ± 9 OUR FIT + +584 ± 5 ± 7 + +12k + +$^1$AAIJ + +15AB LHCB pp at 7, 8 TeV +---PAGE_BREAK--- + +• • • We do not use the following data for averages, fits, limits, etc. • • • + +$$610 \pm 22 \pm 7 \qquad 12k \qquad ^{2}AAIJ \qquad 15AB \text{ LHCB } pp \text{ at } 7, 8 \text{ TeV}$$ + +$^{1}$AAIJ 15AB reports $[m_{B^0_j} - m_{B^+}] - m_{\pi^-} = 444 \pm 5 \pm 7$ MeV which we adjust by the $\pi^-$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = (-1)^J$ and uses two relativistic Breit-Wigner functions in the fit for mass difference. + +$^{2}$AAIJ 15AB reports $[m_{B^0_j} - m_{B^+}] - m_{\pi^-} = 471 \pm 22 \pm 7$ MeV which we adjust by the $\pi^-$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = (-1)^J$ and uses three relativistic Breit-Wigner functions in the fit for mass difference. + +## $m_{B_j(5840)^0} - m_{B^{*+}}$ + +
VALUE (MeV)EVTSDOCUMENT IDTECNCOMMENT
584 ± 5 ± 712k1AAIJ15AB LHCBpp at 7, 8 TeV
+ +$^{1}$AAIJ 15AB reports $[m_{B^0_j} - m_{B^+}] - (m_{B^{*+}} - m_{B^+}) - m_{\pi^-} = 444 \pm 5 \pm 7$ MeV which we adjust by the $\pi^-$ mass. The masses inside the square brackets were measured for each candidate event. The result assumes $P = -(-1)^J$, $(m_{B^{*+}} - m_{B^+}) = 45.01 \pm 0.30 \pm 0.23$ MeV, and uses three relativistic Breit-Wigner functions in the fit for mass difference. + +## $B_j(5840)^+$ WIDTH + +
VALUE (MeV)EVTSDOCUMENT IDTECNCOMMENT
224 ± 24 ± 807k1AAIJ15AB LHCBpp at 7, 8 TeV
+ +• • • We do not use the following data for averages, fits, limits, etc. • • • + +$$215 \pm 27 \pm 80 \qquad 7k \qquad ^{2}AAIJ \qquad 15AB \text{ LHCB } pp \text{ at } 7, 8 \text{ TeV}$$ + +$$229 \pm 27 \pm 80 \qquad 7k \qquad ^{3}AAIJ \qquad 15AB \text{ LHCB } pp \text{ at } 7, 8 \text{ TeV}$$ + +$^{1}$Assuming $P = (-1)^J$ and using two relativistic Breit-Wigner functions in the fit for mass difference. + +$^{2}$Assuming $P = (-1)^J$ and using three relativistic Breit-Wigner functions in the fit for mass difference. + +$^{3}$Assuming $P = -(-1)^J$ and using three relativistic Breit-Wigner functions in the fit for mass difference. + +## $B_j(5840)^0$ WIDTH + +
VALUE (MeV)EVTSDOCUMENT IDTECNCOMMENT
127 ± 17 ± 3412k1AAIJ15AB LHCBpp at 7, 8 TeV
+ +• • • We do not use the following data for averages, fits, limits, etc. • • • + +$$107 \pm 20 \pm 34 \qquad 12k \qquad ^{2}AAIJ \qquad 15AB \text{ LHCB } pp \text{ at } 7, 8 \text{ TeV}$$ + +$$119 \pm 17 \pm 34 \qquad 12k \qquad ^{3}AAIJ \qquad 15AB \text{ LHCB } pp \text{ at } 7, 8 \text{ TeV}$$ + +$^{1}$Assuming $P = (-1)^J$ and using two relativistic Breit-Wigner functions in the fit for mass difference. + +$^{2}$Assuming $P = (-1)^J$ and using three relativistic Breit-Wigner functions in the fit for mass difference. + +$^{3}$Assuming $P = -(-1)^J$ and using three relativistic Breit-Wigner functions in the fit for mass difference. +---PAGE_BREAK--- + +B$_J$(5840) DECAY MODES + + + + + + + + + + + + + + + + + + + + + +
ModeFraction (Γj/Γ)
Γ1B* πseen
Γ2B πpossibly seen
+ +B$_J$(5840) BRANCHING RATIOS + +$$ +\begin{tabular}{lcccccc} +\hline +$\Gamma(B^*\pi)/\Gamma_{\text{total}}$ & & & & & $\Gamma_1/\Gamma$ & \\ +\cline{2-7} +\multicolumn{1}{c|}{\underline{VALUE}} & \multicolumn{1}{c|}{\underline{EVTS}} & \multicolumn{1}{c}{\underline{DOCUMENT ID}} & \multicolumn{1}{c}{\underline{TECN}} & \multicolumn{1}{c}{\underline{CHG}} & \multicolumn{1}{c}{\underline{COMMENT}} & \\ +\cline{2-7} +\multicolumn{1}{c|}{seen} & \multicolumn{1}{c|}{7k} & \multicolumn{1}{c}{AAIJ} & \multicolumn{1}{c}{15AB LHCB} & \multicolumn{1}{c}{$\pm$} & \multicolumn{1}{c}{pp at 7, 8 TeV} & \\ +\cline{2-7} +\multicolumn{1}{c|}{seen} & \multicolumn{1}{c|}{12k} & \multicolumn{1}{c}{AAIJ} & \multicolumn{1}{c}{15AB LHCB} & \multicolumn{1}{c}{$0$} & \multicolumn{1}{c}{pp at 7, 8 TeV} & \\ +\hline +\end{tabular} +$$ + +$$ +\begin{tabular}{lcccccc} +\hline +$\Gamma(B\pi)/\Gamma_{\text{total}}$ & & & & & $\Gamma_2/\Gamma$ & \\ +\cline{2-7} +\multicolumn{1}{c|}{\underline{VALUE}} & \multicolumn{1}{c|}{\underline{EVTS}} & \multicolumn{1}{c}{\underline{DOCUMENT ID}} & \multicolumn{1}{c}{\underline{TECN}} & \multicolumn{1}{c}{\underline{CHG}} & \multicolumn{1}{c}{\underline{COMMENT}} & \\ +\cline{2-7} +\multicolumn{1}{c|}{\textbf{possibly seen}} & \multicolumn{1}{c|}{7k} & \multicolumn{1}{c}{\footnotesize 1 AAIJ} & \multicolumn{1}{c}{\footnotesize 15AB LHCB} & \multicolumn{1}{c}{$\pm$} & \multicolumn{1}{c}{\footnotesize pp at 7, 8 TeV} & \\ +\cline{2-7} +\multicolumn{1}{c|}{\textbf{possibly seen}} & & \multicolumn{1}{c}{\footnotesize 1 AAIJ} & \multicolumn{1}{c}{\footnotesize 15AB LHCB} & \multicolumn{1}{c}{$0$} & \multicolumn{1}{c}{\footnotesize pp at 7, 8 TeV} & \\ +\hline +\end{tabular} +$$ + +¹A Bπ decay is forbidden from a $P = -(-1)^J$ parent, whereas B*π is allowed. + +B$_J$(5840) REFERENCES + +AAIJ + +15AB JHEP 1504 024 + +R. Aaij et al. + +(LHCb Collab.) \ No newline at end of file