diff --git a/.gitattributes b/.gitattributes index decc63323ce59d8e8282be54b074a2ae8688859c..63a114bcd03de2356859379819dfd4e7f14662bd 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1868,3 +1868,82 @@ EdFKT4oBgHgl3EQfZy6F/content/2301.11805v1.pdf filter=lfs diff=lfs merge=lfs -tex QtFRT4oBgHgl3EQf7Tgp/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text _NE0T4oBgHgl3EQfxQFJ/content/2301.02643v1.pdf filter=lfs diff=lfs merge=lfs -text CNAyT4oBgHgl3EQfePiT/content/2301.00318v1.pdf filter=lfs diff=lfs merge=lfs -text +ddFKT4oBgHgl3EQfqi7m/content/2301.11875v1.pdf filter=lfs diff=lfs merge=lfs -text +TdE0T4oBgHgl3EQfUwDn/content/2301.02256v1.pdf filter=lfs diff=lfs merge=lfs -text +7tFLT4oBgHgl3EQfAi4b/content/2301.11966v1.pdf filter=lfs diff=lfs merge=lfs -text +ldAyT4oBgHgl3EQfyfmD/content/2301.00685v1.pdf filter=lfs diff=lfs merge=lfs -text +YdE3T4oBgHgl3EQfcQoq/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +otE0T4oBgHgl3EQfZwCz/content/2301.02326v1.pdf filter=lfs diff=lfs merge=lfs -text +a9FAT4oBgHgl3EQfXR0t/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +YdE3T4oBgHgl3EQfcQoq/content/2301.04523v1.pdf filter=lfs diff=lfs merge=lfs -text +ztE2T4oBgHgl3EQfiAcw/content/2301.03953v1.pdf filter=lfs diff=lfs merge=lfs -text +NtFPT4oBgHgl3EQfmDWp/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +M9FAT4oBgHgl3EQfxh78/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ldAyT4oBgHgl3EQfyfmD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +_dFIT4oBgHgl3EQf9yvA/content/2301.11408v1.pdf filter=lfs diff=lfs merge=lfs -text +0tAzT4oBgHgl3EQfRPvU/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +7tFLT4oBgHgl3EQfAi4b/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +sNFKT4oBgHgl3EQf1S6Z/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +a9FAT4oBgHgl3EQfXR0t/content/2301.08532v1.pdf filter=lfs diff=lfs merge=lfs -text +0tAzT4oBgHgl3EQfRPvU/content/2301.01214v1.pdf filter=lfs diff=lfs merge=lfs -text +EdFKT4oBgHgl3EQfZy6F/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +79E0T4oBgHgl3EQffQDe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +stE4T4oBgHgl3EQfww21/content/2301.05253v1.pdf filter=lfs diff=lfs merge=lfs -text +stE4T4oBgHgl3EQfww21/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ftE3T4oBgHgl3EQf3Qv5/content/2301.04763v1.pdf filter=lfs diff=lfs merge=lfs -text +M9FAT4oBgHgl3EQfxh78/content/2301.08688v1.pdf filter=lfs diff=lfs merge=lfs -text +XNFOT4oBgHgl3EQf8TTm/content/2301.12966v1.pdf filter=lfs diff=lfs merge=lfs -text +ddAzT4oBgHgl3EQfLvvB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +Q9AyT4oBgHgl3EQf7vq-/content/2301.00845v1.pdf filter=lfs diff=lfs merge=lfs -text +6NAzT4oBgHgl3EQff_yv/content/2301.01462v1.pdf filter=lfs diff=lfs merge=lfs -text +ftE3T4oBgHgl3EQf3Qv5/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +6NAzT4oBgHgl3EQff_yv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +_dFIT4oBgHgl3EQf9yvA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +59AyT4oBgHgl3EQfpfhu/content/2301.00526v1.pdf filter=lfs diff=lfs merge=lfs -text +u9AyT4oBgHgl3EQfm_jD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ztE2T4oBgHgl3EQfiAcw/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +sdE_T4oBgHgl3EQf8hzE/content/2301.08376v1.pdf filter=lfs diff=lfs merge=lfs -text +PdFIT4oBgHgl3EQfeytv/content/2301.11276v1.pdf filter=lfs diff=lfs merge=lfs -text +PdFIT4oBgHgl3EQfeytv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +yNE2T4oBgHgl3EQf3wh7/content/2301.04174v1.pdf filter=lfs diff=lfs merge=lfs -text +TdE0T4oBgHgl3EQfUwDn/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +eNAzT4oBgHgl3EQfaPxf/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +Q9AyT4oBgHgl3EQf7vq-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +VtE1T4oBgHgl3EQfIwPB/content/2301.02944v1.pdf filter=lfs diff=lfs merge=lfs -text +ddFKT4oBgHgl3EQfqi7m/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +u9AyT4oBgHgl3EQfm_jD/content/2301.00482v1.pdf filter=lfs diff=lfs merge=lfs -text +BtE0T4oBgHgl3EQfyAJM/content/2301.02653v1.pdf filter=lfs diff=lfs merge=lfs -text +ntFST4oBgHgl3EQfLzgJ/content/2301.13741v1.pdf filter=lfs diff=lfs merge=lfs -text +TdE0T4oBgHgl3EQflQEG/content/2301.02482v1.pdf filter=lfs diff=lfs merge=lfs -text +sNFKT4oBgHgl3EQf1S6Z/content/2301.11919v1.pdf filter=lfs diff=lfs merge=lfs -text +z9AzT4oBgHgl3EQfRPsg/content/2301.01211v1.pdf filter=lfs diff=lfs merge=lfs -text +XNFOT4oBgHgl3EQf8TTm/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +A9E2T4oBgHgl3EQf8Qn_/content/2301.04218v1.pdf filter=lfs diff=lfs merge=lfs -text +4NFAT4oBgHgl3EQflx07/content/2301.08619v1.pdf filter=lfs diff=lfs merge=lfs -text +BdAzT4oBgHgl3EQfGPtU/content/2301.01024v1.pdf filter=lfs diff=lfs merge=lfs -text +BtE0T4oBgHgl3EQfyAJM/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +otE0T4oBgHgl3EQfZwCz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +DdE2T4oBgHgl3EQfoQiw/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +sdE_T4oBgHgl3EQf8hzE/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +f9AzT4oBgHgl3EQfof2V/content/2301.01598v1.pdf filter=lfs diff=lfs merge=lfs -text +yNE2T4oBgHgl3EQf3wh7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +BdAzT4oBgHgl3EQfGPtU/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +jtE5T4oBgHgl3EQfGQ7H/content/2301.05430v1.pdf filter=lfs diff=lfs merge=lfs -text +TdE4T4oBgHgl3EQfmQ04/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +4NFAT4oBgHgl3EQflx07/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +TdE0T4oBgHgl3EQflQEG/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ctFAT4oBgHgl3EQf6B4X/content/2301.08736v1.pdf filter=lfs diff=lfs merge=lfs -text +tdE5T4oBgHgl3EQfKw7w/content/2301.05469v1.pdf filter=lfs diff=lfs merge=lfs -text +KtAyT4oBgHgl3EQfsfmL/content/2301.00578v1.pdf filter=lfs diff=lfs merge=lfs -text +pNAyT4oBgHgl3EQflvio/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +O9AyT4oBgHgl3EQfg_h-/content/2301.00369v1.pdf filter=lfs diff=lfs merge=lfs -text +UdAyT4oBgHgl3EQfuvkh/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +BtE1T4oBgHgl3EQf9gZu/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +n9E2T4oBgHgl3EQfzwgf/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +8tE3T4oBgHgl3EQfqgri/content/2301.04653v1.pdf filter=lfs diff=lfs merge=lfs -text +KNE4T4oBgHgl3EQfiA0C/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +DdE2T4oBgHgl3EQfoQiw/content/2301.04017v1.pdf filter=lfs diff=lfs merge=lfs -text +ctFAT4oBgHgl3EQf6B4X/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +WdAzT4oBgHgl3EQfmP2O/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +29A0T4oBgHgl3EQfNP8E/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +6dA0T4oBgHgl3EQfN__p/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text diff --git a/0tAzT4oBgHgl3EQfRPvU/content/2301.01214v1.pdf b/0tAzT4oBgHgl3EQfRPvU/content/2301.01214v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..25fe35027c6b85a6e04d2583aa741eb47e44f0a2 --- /dev/null +++ b/0tAzT4oBgHgl3EQfRPvU/content/2301.01214v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71c4d16fc26a2ed48c141ce0bc28d2d52b276e074e755bbca483d1b0ed00e44f +size 2372819 diff --git a/0tAzT4oBgHgl3EQfRPvU/vector_store/index.faiss b/0tAzT4oBgHgl3EQfRPvU/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..1693009158e9e624bfdb4ba3044c176d8d1c4870 --- /dev/null +++ b/0tAzT4oBgHgl3EQfRPvU/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb9cd9cda5de9e6f34fd10dbd973662246c41099aa275707bf001112ca37ed00 +size 7143469 diff --git a/0tAzT4oBgHgl3EQfRPvU/vector_store/index.pkl b/0tAzT4oBgHgl3EQfRPvU/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..4d34cfaae05f99e0ac0ffffaf6533a0ef456a82c --- /dev/null +++ b/0tAzT4oBgHgl3EQfRPvU/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b69565678fc1a4923d4f435f8e98cb63c384525bacd7278ce983e99f01406018 +size 228025 diff --git a/1NAzT4oBgHgl3EQfDfoM/vector_store/index.pkl b/1NAzT4oBgHgl3EQfDfoM/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..341daf266c45930b2c6aa537628bc8ff70129291 --- /dev/null +++ b/1NAzT4oBgHgl3EQfDfoM/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bada75803874868647287272679eb91f2830beb07a2d7d272b0c9cf78e9c068 +size 207096 diff --git a/1NE3T4oBgHgl3EQfnQph/content/tmp_files/2301.04623v1.pdf.txt b/1NE3T4oBgHgl3EQfnQph/content/tmp_files/2301.04623v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..85494a0aecef60d9dbfbc7fecbe9f03f684d0eaf --- /dev/null +++ b/1NE3T4oBgHgl3EQfnQph/content/tmp_files/2301.04623v1.pdf.txt @@ -0,0 +1,1502 @@ +Enhancing ResNet Image Classification Performance by using +Parameterized Hypercomplex Multiplication +Nazmul Shahadat, Anthony S. Maida +University of Louisiana at Lafayette +Lafayette LA 70504, USA +nazmul.ruet@gmail.com, maida@louisiana.edu +Abstract +Recently, many deep networks have introduced hy- +percomplex and related calculations into their architec- +tures. In regard to convolutional networks for classifica- +tion, these enhancements have been applied to the con- +volution operations in the frontend to enhance accuracy +and/or reduce the parameter requirements while main- +taining accuracy. Although these enhancements have +been applied to the convolutional frontend, it has not +been studied whether adding hypercomplex calculations +improves performance when applied to the densely con- +nected backend. This paper studies ResNet architectures +and incorporates parameterized hypercomplex multipli- +cation (PHM) into the backend of residual, quaternion, +and vectormap convolutional neural networks to assess +the effect. We show that PHM does improve classifica- +tion accuracy performance on several image datasets, +including small, low-resolution CIFAR 10/100 and large +high-resolution ImageNet and ASL, and can achieve +state-of-the-art accuracy for hypercomplex networks. +1. Introduction +Convolutional neural networks (CNNs) have been +widely used, with great success, in visual classification +tasks [3, 12] because of their good inductive priors and +intuitive design. +Most deep learning building blocks in CNNs use +real-valued operations. +However, recent studies have +explored the complex/hypercomplex space and showed +that hypercomplex valued networks can perform bet- +ter than their real-valued counterparts due to the weight +sharing mechanism embedded in the hypercomplex mul- +tiplication [7,18]. This weight sharing differs from that +found in the real-valued convolution operation. Specif- +ically, quaternion convolutions share weights across in- +put channels enabling them to discover cross-channel in- +put relationships that support more accurate prediction +(a) Validation accuracy comparison for CIFAR-10 data. +(b) Validation accuracy comparison for CIFAR-100 data. +Figure 1. Top-1 validation accuracy comparison among orig- +inal ResNets [7], original quaternion networks [7], original +vectormap networks [7], our proposed QPHM and VPHM net- +works for CIFAR benchmarks +and generalization. The effectiveness of quaternion net- +works is shown in [7,16,19,23,31]. +The weight-sharing properties of the Hamiltonian +product allow the discovery of cross-channel relation- +ships. This is a new plausible inductive bias, namely, +that there are data correlations across convolutional in- +put channels that enhance discovery of effective cross- +channel features. Practitioners have applied these cal- +culations in the convolution stages of CNNs but not +to the dense backend where real-valued operations are +still used. The present paper puts weight-sharing cal- +culations in the dense backend to further improve CNN +arXiv:2301.04623v1 [cs.CV] 11 Jan 2023 + +96 +Top-1 Validation Accuracy +95.5 +95 +94.5 +HH +94 +93.5 +93 +ResNet Original Quaternion +Vectormap +QPHM +VPHM +Original +Original +ResNet-18 +ResNet-34 +ResNet-5082 +80 +Top-1 Validation Accuracy +78 +76 +74 +72 +70 +68 +66 +ResNet Original +Quaternion +Vectormap +QPHM +VPHM +Original +Original +ResNet-18 +ResNet-34 +ResNet-50performance. To exploit this new type of weight shar- +ing, we use a parameterized hypercomplex multiplica- +tion (PHM) [30] layer as a building block. This block +replaces the real-valued FC layers with hypercomplex +FC layers. We test the hypothesis using two types of +hypercomplex CNNs, namely quaternion [8] CNNs and +vectormap [7] CNNs. +Our contributions are: +• Showing the effectiveness of using hypercomplex +networks in the densely connected backend of a +CNN. +• Introducing quaternion networks with PHM based +dense layer (QPHM) to bring hypercomplex deep +learning properties to the entire model. +• Introducing vectormap networks with a PHM based +dense layer (VPHM) to remove hypercomplex di- +mensionality constraints from the frontend and +backend. +The effectiveness of employing PHM based FC lay- +ers with hypercomplex networks is seen in Figures 1a +and 1b. +We also show that these new models ob- +tain SOTA results for hypercomplex networks in CIFAR +benchmarks. Our experiments also show SOTA results +for American Sign Language (ASL) data. Moreover, our +models use fewer parameters, FLOPS, and latency com- +pared to the base model proposed by [7,23] for classifi- +cation. +2. Background and Related Work +2.1. Quaternion Convolution +Quaternions are four dimensional vectors of the form +Q = r + ix + jy + kz ; r, x, y, z ∈ R +(1) +where, r, x, y, and z are real values and i, j, and k are +the imaginary values which satisfy i2 = j2 = k2 = +ijk = −1. Quaternion convolution is defined by con- +volving a quaternion filter matrix with a quaternion vec- +tor (or feature map). Let, QF = R + iX + jY + kZ +be a quaternion filter matrix with R, X, Y, and Z be- +ing real-valued kernels and QV = r + ix + jy + kz +be a quaternion input vector with r, x, y, and z being +real-valued vectors. Quaternion convolution is defined +below [8]. +QF ⊛ QV = (R ∗ r − X ∗ x − Y ∗ y − Z ∗ z) ++i(R ∗ x + X ∗ r + Y ∗ z − Z ∗ y) ++j(R ∗ y − X ∗ z + Y ∗ r + Z ∗ x) ++k(R ∗ z + X ∗ y − Y ∗ x + Z ∗ r) +(2) +There are 16 real-valued convolutions but only four ker- +nels which are reused. +This is how the weight shar- +ing occurs. [18] first described the weight sharing in the +Hamilton product. +2.2. Vectormap Convolution +[7] noted that the Hamilton product and quaternion +convolution, when used in deep networks, did not re- +quire the entire Quaternion algebra. They called these +vectormap convolutions. +The weight sharing ratio is +1 +N where N is the dimension of the vectormap, Dvm. +Let V 3 +in = [v1, v2, v3] be an RGB input vector and +W 3 = [w1, w2, w3] a weight vector with N = 3. We +use a permutation τ on inputs so each input vector is +multiplied by each weight vector element: +τ(vi) = +� +v3 +i = 1 +vi−1 +i > 1 +(3) +After applying circularly right shifted permutation to +V 3 +in, a new vector V 3 is formed. The permutation of +weight τ(W 3) can be found like equation 3. Hence, the +output vector Vout is: +V 3 +out = [W 3 · V 3 +in, τ(W 3) · V 3 +in, τ 2(W 3) · V 3 +in] +(4) +Here, “·” denotes dot product. The outputs V 3 +out come +from the linear combination of the elements of V 3 +in and +W 3. Let the weight filter matrix for a vectormap be +VF = [A, B, C] and the input vector after linear com- +bination be Vh = [x, y, z], the vectormap convolution +between VF , and Vh for Dvm = 3 is: +� +� +R(VF ∗ Vh) +I (VF ∗ Vh) +J (VF ∗ Vh) +� +� = L ⊙ +� +� +A +B +C +C +A +B +B +C +A +� +� ∗ +� +� +x +y +z +� +� +(5) +where, L is a learnable matrix defined as a matrix L ∈ +RDvm×Dvm which is initialized using: +lij = +� +� +� +� +� +� +� +� +� +� +� +� +� +� +� +1 +i = 1 +1 +i = j +1 +j = Cali where Cali = (i + (i − 1)) & +Cali = Cali − Dvm if Cali > Dvm +−1 +else. +(6) +By choosing Dvm and assigning a new constant matrix +L ∈ RDvm×Dvm matching Dvm, any dimensional hy- +percomplex convolution can be used. Vectormap weight +initialization uses a similar mechanism to complex [25] +and quaternion [8] weight initialization. Our weight ini- +tialization follows [7]. + +2.3. PHM Fully Connected Layer +The above methods apply to convolutional layers but +not to fully connected (FC) layers. [30] proposed pa- +rameterized hypercomplex multiplication (PHM) for FC +layers. Like vectormaps, PHM can have any dimension. +If the dimension is four, it is like the Hamilton prod- +uct. The success of the Hamiltonian product is shown +in [7,8,16,19,28,31]. Our work uses two different PHM +dimensions: four for quaternion networks, and five for +vectormap networks. +A +fully +connected +layer +is +defined +[30] +as +y = FC(x) = Wx + b, where W ∈ Rk×d and +b ∈ Rk are weights and bias, d and k are input and +output dimensions, and x ∈ Rd, y ∈ Rk. PHM uses +the following hypercomplex transform to map input +x ∈ Rd into output y ∈ Rk as y = PHM (x) = Hx + b, +where H ∈ Rk×d is the sum of Kronecker products. +Like Dvm, let the dimension of the PHM module +be Dphm = N. +The PHM operation requires that +both d and k are divisible by N. +H is the sum +of Kronecker products of the parameter matrices +Ai ∈ RN×N and Si ∈ Rk/N×d/N, where i = 1 . . . N: +H = �N +i=1 Ai ⊗ Si. Parameter reduction comes from +reusing matrices A and S in the PHM layer. The ⊗ is +the Kronecker product. H is multiplied with the input +in the dense layer. The four dimensional PHM layer is +explained in [30]. We also use five dimensions which +is explained here. The learnable parameters for N = 5 +are Pr, Pw, Px, Py, and Pz where P ∈ R1×1. For Ai +we use the hypercomplex matrix (5 dimensions) which +is generated in a similar way of vectormap convolution +(Equations 5 and 6). H is calculated using two learnable +parameter matrices (Ai, and Si) for N = 5 as follows: +H = +� +����� +1 +0 +0 +0 +0 +0 +1 +0 +0 +0 +0 +0 +1 +0 +0 +0 +0 +0 +1 +0 +0 +0 +0 +0 +1 +� +����� +� +�� +� +A1 +⊗ +�Pr +� +���� +S1 ++ +� +����� +0 +1 +0 +0 +0 +0 +0 +1 +0 +0 +0 +0 +0 +-1 +0 +0 +0 +0 +0 +-1 +-1 +0 +0 +0 +0 +� +����� +� +�� +� +A2 +⊗ +�Pw +� +� �� � +S2 ++ +� +����� +0 +0 +1 +0 +0 +0 +0 +0 +-1 +0 +0 +0 +0 +0 +1 +-1 +0 +0 +0 +0 +0 +-1 +0 +0 +0 +� +����� +� +�� +� +A3 +⊗ +�Px +� +���� +S3 ++ +� +����� +0 +0 +0 +1 +0 +0 +0 +0 +0 +-1 +-1 +0 +0 +0 +0 +0 +1 +0 +0 +0 +0 +0 +-1 +0 +0 +� +����� +� +�� +� +A4 +⊗ +�Py +� +���� +S4 ++ +� +����� +0 +0 +0 +0 +1 +-1 +0 +0 +0 +0 +0 +-1 +0 +0 +0 +0 +0 +-1 +0 +0 +0 +0 +0 +1 +0 +� +����� +� +�� +� +A5 +⊗ +�Pz +� +���� +S5 += +� +����� +Pr +Pw +Px +Py +Pz +−Pz +Pr +Pw +−Px +−Py +−Py +−Pz +Pr +−Pw +Px +−Px +−Py +−Pz +Pr +−Pw +−Pw +−Px +−Py +Pz +Pr +� +����� +(7) +Equation 7 for N = 5 expresses the Hamiltonian prod- +uct of hypercomplex layer. It preserves all PHM layer +properties. +3. Proposed Models: QPHM and VPHM +We propose a new fully hypercomplex model in lieu +of hypercomplex CNNs that use a real-valued backend +dense layer. That is, we replace the dense layer with a +PHM layer to enjoy the benefits of hypercomplex weight +sharing. +We chose two base hypercomplex models for the con- +volutional frontend, the quaternion network and vec- +tormap network [7, 8] which were using real-valued +backend layers. To match dimensions with frontend net- +works, we used a PHM layer at four dimensions with the +quaternion network and a PHM layer at five dimensions +with the three dimensional vectormap network. In some +cases, we also needed to use a PHM layer at five dimen- +sions with quaternion networks. But we couldn’t use a +three dimensional PHM layer as the output classes must +be divisible by the dimensions in the PHM operation. +Figure 2 shows our proposed PHM based FC layer +with quaternion convolutional neural networks (QC- +NNs). At the end of QCNNs (end of layer 4 in Figure +2 (top)), the output feature maps are flattened. This flat- +tened layer is normally the input to a fully connected +layer, but in our proposed method this layer is the input +layer for the PHM based FC layer. This is represented +as Pin. The parameterized weight H performs parame- +terized multiplication to find the hyper-complex output +Pout. The type of PHM layer depends on the dimensions +needed. For quaternion networks, we used dimensions +four and five according to the number of classes in the +datasets. The figures in Figure 2 (bottom) are expanded +4D PHM and 5D PHM layer of a single dense layer con- +nection (red marked in Figure 2 (top)). +Pin = Prin + Pwin + Pxin + Pyin + Pzin +(8) +For the PHM layer with five dimensions, each PHM +layer accepts five channels of input like Prin, Pwin, + +Figure 2. Full hypercomplex network where quaternion convolutional neural networks (QCNNs) are used in the front and PHM +based fully-connected layers are applied in the back-end. 5-dimensional PHM is explained in Equation 7. Equations 8 and 9 +describe input and output for a 5D PHM layer. 4D PHM is similar. +Layer +Output +size +Quaternion +ResNet +Vectormap +ResNet +QPHM +VPHM +Stem +32x32 +3x3Q, 112, std=1 +3x3V, 90, std=1 +3x3Q, 112, std=1 +3x3V, 90, std=1 +Bottleneck +group 1 +32x32 +� +� +1x1Q, 112 +3x3Q, 112 +1x1Q, 448 +� +� ×3 +� +� +1x1V, 90 +3x3V, 90 +1x1V, 360 +� +� ×3 +� +� +1x1QP, 112 +3x3QP, 112 +1x1QP, 448 +� +� ×3 +� +� +1x1VP, 90 +3x3VP, 90 +1x1VP, 390 +� +� ×3 +Bottleneck +group 2 +16x16 +� +� +1x1Q, 224 +3x3Q, 224 +1x1Q, 896 +� +� ×4 +� +� +1x1V, 180 +3x3V, 180 +1x1V, 720 +� +� ×4 +� +� +1x1QP, 224 +3x3QP, 224 +1x1QP, 896 +� +� ×4 +� +� +1x1VP, 180 +3x3VP, 180 +1x1VP, 720 +� +� ×4 +Bottleneck +group 3 +8x8 +� +� +1x1Q, 448 +3x3Q, 448 +1x1Q, 1792 +� +� ×6 +� +� +1x1V, 360 +3x3V, 360 +1x1V, 1440 +� +� ×6 +� +� +1x1QP, 448 +3x3QP, 448 +1x1QP, 1792 +� +� ×6 +� +� +1x1VP, 360 +3x3VP, 360 +1x1VP, 1440 +� +� ×6 +Bottleneck +group 4 +4x4 +� +� +1x1Q, 896 +3x3Q, 896 +1x1Q, 3584 +� +� ×3 +� +� +1x1V, 720 +3x3V, 720 +1x1V, 2880 +� +� ×3 +� +� +1x1QP, 896 +3x3QP, 896 +1x1QP, 3584 +� +� ×3 +� +� +1x1VP, 720 +3x3VP, 720 +1x1VP, 2880 +� +� ×3 +Pooling +Layer +1x1x100 +global average-pool, 100 outputs +Output +1x1x100 +fully connected Layer, softmax +QPHM Layer +VPHM Layer +Table 1. The 50-layer architectures tested on CIFAR-100: quaternion ResNet [7, 8], vectormap ResNet [7], our proposed QPHM, +and VPHM. Input is a 32x32x3 color image. The number of stacked bottleneck modules is specified by multipliers. “Q”, “V”, +“QP”, “VP”, and “std” denote quaternion convolution, vectormap convolution, QPHM (quaternion network with PHM layer), +VPHM (vectormap network with PHM layer), and stride respectively. Integers (e.g., 90, 112) denote number of output channels. +Pxin, Pyin, and Pzin (Equation 8) and produces five +channels of output like Prout, Pwout, Pxout, Pyout, +and Pzout which are merged or stacked together to Pout +as, +Pout = Prout +Pwout +Pxout +Pyout +Pzout (9) +Hence, +the +representational +feature +maps +persist +throughout the classification network. +Similarly, this + +Parameterized +STAGE 1 +STAGE 2 +STAGE 3 +STAGE 4 +Weight +Pin +Input +H +Horse +Cat +224x224x4 +Classification +Stem Layer +PHM +based FC Layer +QCNN +64 Filters +2nd Layer +3rd Layer +4rth Layer +1st Layer +Filter size 7 +QCNN +QCNN +QCNN +Flatten +QCNN +128 Filters +256 Filters + 512 Filters +Layer +With stride 2 +64 Filters +max-pooling +Stride 2 +Stride 2 +Stride 2 +Stride 1 +5-dimensional PHM layer (VPHM) +4-dimensional PHM layer (QPHM) +PPHM (both 4D, and 5D) dense layer is applied in +the backend of original ResNet [10] which we named +RPHM (ResNet-with-PHM). +4. Experiment +The purpose of the experiments reported herein was +to test whether replacing the real-valued backend of +a CNN model with a PHM backend improved clas- +sification performance. The architectures tested were +real-valued, quaternion-valued [8, 19], and vectormap +ResNet [7], either with or without the PHM backend. +We refer to the quaternion ResNet model with the PHM +backend as QPHM. Similarly, VPHM, RPHM denote +the vectormap ResNet, and real-valued ResNet models +with the PHM backend. +Our experiments were conducted on the following +datasets: CIFAR-10/100 [14], the ImageNet300k dataset +[23] and the American Sign Language Hand Gesture +color image recognition dataset [5]. +The first two +datasets have less training samples with small image +resolutions and the other datasets use a large number +of training samples with higher resolution images. We +used these datasets to check our proposed models for +small and large training samples as well as for small and +high resolution images. The experiments were run on a +workstation with an Intel(R) Core(TM) i9-9820X CPU +@ 3.30GHz, 128 GB memory, and NVIDIA Titan RTX +GPU (24GB). +4.1. CIFAR Classification +In addition to testing the PHM with real-valued, +quaternion-valued, and vectormap ResNet, we tested the +network models with three depths: 18, 34, and 50 layers. +4.1.1 +Method +We tested all of the above mentioned architectures with +and without the PHM backend on both the CIFAR-10 +and CIFAR-100 datasets. These datasets were composed +of 32x32 pixel RGB images falling into either ten classes +or 100 classes, respectively. Both datasets have 50,000 +training, and 10,000 test examples. +The models were trained using the same components +as the real-valued networks, the original quaternion net- +work, and the original vectormap network using the +same datasets. All models in Table 2 were trained us- +ing the same hyperparameters. Our QPHM and VPHM +design is similar to the quaternion [7,8], and vectormap +networks [7], respectively. The residual architectures +differ in the number of output channels than the origi- +nal hypercomplex networks and the proposed networks +due to keeping the number of trainable parameters about +the same. The number of output channels for the resid- +ual networks is the same as [7] and [19]. Table 1 shows +the 50-layer architectures tested for CIFAR-100 dataset. +One goal is to see if the representations generated by +the PHM based dense layer instead of the real-valued +dense layer outperforms the quaternion, vectormap, and +residual baselines reported in [7]. +We also analyzed +different residual architectures to assess the effect of +depth on our proposed models. For preprocessing, we +followed [7]. We used stochastic gradient descent op- +timization with 0.9 Nesterov momentum. +The learn- +ing rate was initially set to 0.1 with warm-up learning +for the first 10 epochs. For smooth learning, we chose +cosine learning from epochs 11 to 120. However, we +were getting about same performance for linear learn- +ing. All models were trained for 120 epochs and batch +size was set to 100. This experiment used batch normal- +ization and 0.0001 weight decay. The implementation +is on github at-https://github.com/nazmul729/QPHM- +VPHM.git. +4.1.2 +Results +The main results appear in Figure 1 and 3, and in Ta- +ble 2. Figure 1 gives the overall pattern of results. Fig- +ure 1a shows results for CIFAR-10. It shows top-1 vali- +dation accuracy for the five models: real-valued ResNet, +quaternion-valued ResNet, vectormap ResNet, QPHM, +and VPHM. Also, results are shown for 18, 34, and 50 +layers. We chose top-1 performance out of three. Fig- +ure 1b shows the same consistent pattern of results for +the CIFAR-100 dataset. The magnitude of improvement +is higher for CIFIR-100 than for CIFAR-10. The results +are also shown in tabular form in Table 2, along with +counts of trainable parameters, flops, and latency. It can +be seen in Table 2 that modifying the backend to have +a PHM layers has little effect on the parameter count, +flops, and latency as the input image resolutions, and +the number of output classes are low. +The proposed QPHM model attains better top-1 val- +idation accuracy than the original ResNet, quaternion, +and vectormap networks for both datasets. The QPHM +also produces better performance compared to the pro- +posed VPHM, and RPHM models. Moreover, we com- +pare our best performance which is obtained by the +QPHM model, to the deep or shallow complex or hyper- +complex networks and notice that the QPHM is achieved +SOTA performance (shown in Table 3) for the CIFAR- +10 and -100 datasets. +Table 3 compares different complex or hypercom- +plex networks top-1 validation accuracy with our best +result. Our comparison was not limited to [7] and com- +plex space. The QPHM also gains highest top-1 vali- +dation accuracy than the relevant CNN models for both +datasets (shown in Table 4). Tables 2, 3, and 4, show that + +Model Name +Param Count +FLOPS +Latency +Validation Accuracy +CIFAR-10 +CIFAR-100 +ResNet18 [10] +11.1M +0.56G +0.22ms +94.08 +72.19 +RPHM18 +11.1M +0.55G +0.21ms +94.74 +77.83 +Quat18 [8] +8.5M +0.26G +0.36ms +94.08 +71.23 +Vect18 [7] +7.3M +0.21G +0.29ms +93.95 +72.82 +QPHM18 +8.5M +0.25G +0.35ms +95.03 +77.88 +VPHM18 +7.3M +0.20G +0.27ms +94.97 +77.80 +ResNet34 [10] +21.2M +1.16G +0.29ms +94.27 +72.19 +RPHM34 +21.1M +1.15G +0.28ms +94.98 +77.80 +Quat34 [8] +16.3M +0.438G +0.57ms +94.27 +72.76 +Vect34 [7] +14.04M +0.35G +0.45ms +94.45 +74.12 +QPHM34 +16.3M +0.432G +0.54ms +95.40 +78.51 +VPHM34 +14.03M +0.34G +0.44ms +95.41 +77.23 +ResNet50 [10] +23.5M +1.30G +0.478ms +93.90 +72.60 +RPHM50 +20.6M +1.29G +0.468ms +95.59 +79.21 +Quat50 [8] +18.08M +1.45G +0.97ms +93.90 +72.68 +Vect50 [7] +15.5M +1.19G +0.77ms +94.28 +74.84 +QPHM50 +18.07M +1.44G +0.96ms +95.59 +80.25 +VPHM50 +15.5M +1.15G +0.76ms +95.48 +78.91 +Table 2. Image classification performance on the CIFAR benchmarks for 18, 34 and 50-layer architectures. Here, Quat, Vect, +QPHM, and VPHM, define the quaternion ResNet, vectormap ResNet, quaternion networks with PHM FC layer, and vectormap +networks with PHM FC layer, respectively. +(a) Validation loss versus training. +(b) Validation accuracy versus training. +Figure 3. Validation loss and accuracy of 50 layer ResNet [7], quaternion [7], vectormap [7], QPHM, VPHM for CIFAR-100. +our QPHM model achieves best performance for CIFAR +10 and 100 datasets with fewer parameters, flops, and la- +tency. +4.2. ImageNet Classification +4.2.1 +Method +These experiments are performed on a 300k subset of +the ImageNet dataset which we call ImageNet300k [23]. +[23] explains how the full dataset was sampled. The +models compared are: standard ResNets [23], quater- +nion convolutional ResNets [23], and our proposed +QPHM. We ran 26, 35, and 50-layers architectures using +“[1, 2, 4, 1]”, “[2, 3, 4, 2]” and “[3, 4, 6, 3]” bottleneck +block multipliers. Training (all models in Table 5) used +the same optimizer and hyperparameters as CIFAR clas- +sification method. +4.2.2 +Experimental Results +Table 5 shows the results on the ImageNet300k dataset. +This result shows that our model takes three millions +fewer trainable parameters and yields almost 5% higher +validation performance for the same architectures. Pa- + +4.5 +ResNet +4 +Quaternion +Vectormap +3.5 +QPHM +3 +VPHM +Loss +2.5 +2 +1.5 +1 +0.5 +0 +8 +345438 +5 +0 +9 +118 +127 +6 +145 +154 +8 +172 +3 +Epochs90 +80 +70 +60 +Accuracy +ResNet +50 +Quaternion +40 +.Vectormap +30 +QPHM +20 +VPHM +10 +0 +1 +9 +8 +3 +4 +5 +43 +8 +100 +109 +8 +127 +136 +145 +154 +163 +172 +EpochsModel Architecture +Validation Accuracy +CIFAR-10 +CIFAR-100 +DCNs [2] +38.90 +42.6 +DCN [25] +94.53 +73.37 +QCNN [16] +77.48 +47.46 +Quat [31] +77.78 +- +QCNN [28] +83.09 +- +QCNN* [28] +84.15 +- +Quaternion18 [7] +94.80 +71.23 +Quaternion34 [7] +94.27 +72.76 +Quaternion50 [7] +93.90 +72.68 +Octonion [27] +94.65 +75.40 +Vectormap18 [7] +93.95 +72.82 +Vectormap34 [7] +94.45 +74.12 +Vectormap50 [7] +94.28 +74.84 +QPHM-50 +95.59 +80.25 +VPHM-50 +95.48 +78.91 +Table 3. +Top-1 validation accuracy for hypercomplex net- +works. DCN stands for deep complex convolutional network. +* variant used quaternion batch normalization. Quaternion and +vectormap networks are the base networks [7] +rameter reduction is not depicted in Table 3 for low res- +olution CIFAR benchmark images as they have saved +parameters in thousands. It is also clear that deeper net- +works perform better than shallow networks. +4.3. ASL Classification +4.3.1 +Method +To compare the proposed QPHM model with other +networks, +we +evaluated +it +on +the +ASL +Alpha- +bet dataset [26] publicly available on Kaggle at +https://www.kaggle.com/grassknoted/asl-alphabet. This +dataset has 87,000 hand-gesture images for 29 sign +classes where each class has about 3,000 images. And, +the image size is 200 × 200 × 3. +It has 26 finger spelling alphabet classes for the En- +glish alphabetic letters and three special characters. Due +to the divisibility restriction in PHM, we cannot use 29 +classes as 29 is prime. Like all other baseline leave-one- +out and half-half methods [5,13,15,20,24], we exclude +one class (letter B) from the training and validation sets. +We use the same hyperparameters as CIFAR classifica- +tion method. +4.3.2 +Experimental Results +Due to the divisibility limitation, it is not possible to +evaluate the ASL data using VPHM model as we choose +PHM with five dimensions for VPHM model. So we +only tested the QPHM (PHM with four dimensions) +model to compare with other networks on the ASL +Model Architecture +Validation Accuracy +CIF10 +CIF100 +Convolutional Networks +ResNet18 [9] +90.27 +63.41 +ResNet34 [9] +90.51 +64.52 +ResNet50 [9] +90.60 +61.68 +ResNet110 [9] +95.08 +76.63 +ResNet1001 [11] +95.08 +77.29 +MobileNet [9] +91.02 +67.44 +Cout [4] +95.28 +77.54 +Wide Residual Networks +WRN-28-10 +96.00 +80.75 +WRN-28-10-dropout +96.11 +81.15 +Our Method +QPHM50 +95.59 +80.25 +VPHM50 +95.48 +78.91 +QPHM-18-2 (ours) +96.24 +81.45 +QPHM-50-2 (ours) +96.63 +82.00 +Table 4. Top-1 validation accuracy comparison among deep +networks. +CIF10 and CIF100 stand for CIFAR10 and CI- +FAR100. Cout is the ResNet-18+cutout. WRN-28-10 [29], +QPHM-18-2, and QPHM-50-2 stand for wide ResNet 28, 18, +and 50-layers with the output channel widening factor 10, 2, +and 2, respectively. +dataset. Table 6 provides a comparison of top-1 val- +idation accuracy of our proposed QPHM model with +other networks in ASL data. Our proposed architecture +performs state-of-the-art accuracy in this ASL dataset. +Hence, the representation feature maps in the dense +layer are very effective for this dataset. +5. Conclusions +We replaced the dense backend of existing hypercom- +plex CNNs for image classification with PHM modules +to create weight sharing in this layer. This novel de- +sign improved classification accuracy, reduced parame- +ter counts, flops, and latency compared to the baseline +networks. The results support our hypothesis that the +PHM operation in the densely connected back end pro- +vides better representations as well as improves accu- +racy with fewer parameters. These results also high- +lighted the importance of the calculations in the back- +end. +The QPHM and VPHM outperformed the other +works mentioned in “Experiment” section. +The pro- +posed QPHM achieved higher validation accuracy (top- +1) for all network architectures than the proposed +VPHM. + +Architecture +Params +FLOPS +Latency +Training +Accuracy +Validation +Accuracy +ResNet26 +13.6M +1.72G +0.75ms +57.0 +45.48 +Quat ResNet26 +15.1M +1.30G +1.71ms +64.1 +50.09 +QPHM26 +11.4M +1.18G +1.7ms +65.3 +52.23 +ResNet35 +18.5M +3.57G +1.02ms +63.8 +48.99 +Quat ResNet35 +20.5M +4.59G +3.15ms +70.9 +48.11 +QPHM35 +17.5M +4.10G +3.15ms +75.3 +51.84 +ResNet50 +25.5M +4.01G +1.46ms +65.8 +50.92 +Quat ResNet50 +27.6M +5.82G +4.21ms +73.4 +49.69 +QPHM50 +24.5M +5.32G +4.16ms +78.8 +54.38 +Table 5. Classification performance on the ImageNet300k dataset for different ResNet architectures. Top-1 training and validation +accuracies. +Architecture +Top-1 Validation Accuracy +CNNs +82% +HOG-LBP-SVM +98.36% +HT with CNN +96.71% +RF-JA with l-o-o +70% +RF-JA with h-h +90% +GF-RF l-o-o +49% +GF-RF h-h +75% +ESF-MLRF l-o-o +57% +ESF-MLRF h-h +87% +RF-JP l-o-o +43% +RF-JP h-h +59% +Faster RCNN +89.72% +RCNNA +94.87% +DBN +79% +CMVA and IF l-o-o +92.7% +CMVA and IF h-h +99.9% +CNN with ASL +97.82% +QPHM +100.0 +Table 6. +Top-1 validation accuracy comparison with other +works on ASL dataset. Here, l-o-o, h-h, HT with CNN [6,21], +CMVA [24], RF-JA [5], GF-RF [20], ESF-MLRF [15], RF- +JP [13], RCNN [26], RCNNA [26], DBN [22], and HOG-LBP- +SVM [17] mean Leave one out, half-half, HYBRID TRANS- +FORM, CNNs [1] with multiview augmentation and IF Infer- +ence Fusion, Random Forest with Joint Angles, Gabor Filter- +based features with Random Forest, Ensemble of Shape Func- +tion with Multi-Layer Random Forest, Random Forest with +Joint Positions, Recurrent convolutional neural networks, Re- +current convolutional neural networks with attention, Deep be- +lief network, and Histogram of Oriented Gradients (HOG) and +Local Binary Pattern (LBP) with support vector machine, re- +spectively. +References +[1] Salem Ameen and Sunil Vadera. A convolutional neu- +ral network to classify american sign language finger- +spelling from depth and colour images. Expert Systems, +34(3):e12197, 2017. 8 +[2] Timothy Anderson. Split-complex convolutional neural +networks. 2017. 7 +[3] Pierre Buyssens, Abderrahim Elmoataz, and Olivier +L´ezoray. Multiscale convolutional neural networks for +vision–based classification of cells. In Asian Conference +on Computer Vision, pages 342–352. Springer, 2012. 1 +[4] Terrance DeVries and Graham W Taylor. Improved reg- +ularization of convolutional neural networks with cutout. +arXiv preprint arXiv:1708.04552, 2017. 7 +[5] Cao Dong, Ming C Leu, and Zhaozheng Yin. Ameri- +can sign language alphabet recognition using microsoft +kinect. In Proceedings of the IEEE conference on com- +puter vision and pattern recognition workshops, pages +44–52, 2015. 5, 7, 8 +[6] Brandon Garcia and Sigberto Alarcon Viesca. Real-time +american sign language recognition with convolutional +neural networks. Convolutional Neural Networks for Vi- +sual Recognition, 2:225–232, 2016. 8 +[7] Chase Gaudet and Anthony S. Maida. Removing dimen- +sional restrictions on complex/hyper-complex networks. +In 2021 IEEE International Conference on Image Pro- +cessing (ICIP), 2021. 1, 2, 3, 4, 5, 6, 7 +[8] Chase J Gaudet and Anthony S Maida. +Deep quater- +nion networks. In 2018 International Joint Conference +on Neural Networks (IJCNN), pages 1–8. IEEE, 2018. 2, +3, 4, 5, 6 +[9] Ali Hassani, Steven Walton, Nikhil Shah, Abulikemu +Abuduweili, Jiachen Li, and Humphrey Shi. Escaping +the big data paradigm with compact transformers. arXiv +preprint arXiv:2104.05704, 2021. 7 +[10] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian +Sun. Deep residual learning for image recognition. In +Proceedings of the IEEE conference on computer vision +and pattern recognition, pages 770–778, 2016. 5, 6 +[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian +Sun. Identity mappings in deep residual networks. In Eu- +ropean conference on computer vision, pages 630–645. +Springer, 2016. 7 +[12] Shima Javanmardi, +Seyed-Hassan Miraei Ashtiani, +Fons J Verbeek, and Alex Martynenko. Computer-vision + +classification of corn seed varieties using deep convolu- +tional neural network. Journal of Stored Products Re- +search, 92:101800, 2021. 1 +[13] Cem Keskin, Furkan Kırac¸, Yunus Emre Kara, and Lale +Akarun. Real time hand pose estimation using depth sen- +sors. In Consumer depth cameras for computer vision, +pages 119–137. Springer, 2013. 7, 8 +[14] Alex Krizhevsky, Geoffrey Hinton, et al. Learning mul- +tiple layers of features from tiny images. 2009. 5 +[15] Alina Kuznetsova, Laura Leal-Taix´e, and Bodo Rosen- +hahn. Real-time sign language recognition using a con- +sumer depth camera. In Proceedings of the IEEE interna- +tional conference on computer vision workshops, pages +83–90, 2013. 7, 8 +[16] Cameron E Long. Quaternion Temporal Convolutional +Neural Networks. +PhD thesis, University of Dayton, +2019. 1, 3, 7 +[17] Huy BD Nguyen and Hung Ngoc Do. Deep learning for +american sign language fingerspelling recognition sys- +tem. In 2019 26th International Conference on Telecom- +munications (ICT), pages 314–318. IEEE, 2019. 8 +[18] T. Parcollet, M. Morchid, and G. Linar´es. Quaternion +convolutional networks for heterogeneous image pro- +cessing. In IEEE Intl. Conf. on Acoustics, Speech and +Signal Processing (ICASSP), pages 8514–8518, 2019. 1, +2 +[19] Titouan Parcollet, Ying Zhang, Mohamed Morchid, +Chiheb Trabelsi, Georges Linar`es, Renato De Mori, +and Yoshua Bengio. +Quaternion convolutional neural +networks for end-to-end automatic speech recognition. +arXiv preprint arXiv:1806.07789, 2018. 1, 3, 5 +[20] Nicolas Pugeault and Richard Bowden. Spelling it out: +Real-time asl fingerspelling recognition. In 2011 IEEE +International conference on computer vision workshops +(ICCV workshops), pages 1114–1119. IEEE, 2011. 7, 8 +[21] Virender Ranga, Nikita Yadav, and Pulkit Garg. Amer- +ican sign language fingerspelling using hybrid discrete +wavelet transform-gabor filter and convolutional neural +network. Journal of Engineering Science and Technol- +ogy, 13(9):2655–2669, 2018. 8 +[22] Lucas Rioux-Maldague and Philippe Giguere. Sign lan- +guage fingerspelling classification from depth and color +images using a deep belief network. In 2014 Canadian +Conference on Computer and Robot Vision, pages 92–97. +IEEE, 2014. 8 +[23] Nazmul Shahadat and Anthony S Maida. Adding quater- +nion representations to attention networks for classifica- +tion. arXiv preprint arXiv:2110.01185, 2021. 1, 2, 5, +6 +[24] Wenjin Tao, Ming C Leu, and Zhaozheng Yin. Amer- +ican sign language alphabet recognition using convolu- +tional neural networks with multiview augmentation and +inference fusion. Engineering Applications of Artificial +Intelligence, 76:202–213, 2018. 7, 8 +[25] Chiheb Trabelsi, Olexa Bilaniuk, Ying Zhang, Dmitriy +Serdyuk, Sandeep Subramanian, Joao Felipe Santos, +Soroush Mehri, Negar Rostamzadeh, Yoshua Bengio, +and Christopher J Pal. Deep complex networks. arXiv +preprint arXiv:1705.09792, 2017. 2, 7 +[26] Shweta Upadhyay, RK Sharma, and Prashant Singh +Rana. Sign language recognition with visual attention. +Technical report, EasyChair, 2020. 7, 8 +[27] Jiasong Wu, Ling Xu, Fuzhi Wu, Youyong Kong, Lotfi +Senhadji, and Huazhong Shu. Deep octonion networks. +Neurocomputing, 397:179–191, 2020. 7 +[28] Qilin Yin, Jinwei Wang, Xiangyang Luo, Jiangtao Zhai, +Sunil Kr Jha, and Yun-Qing Shi. Quaternion convolu- +tional neural network for color image classification and +forensics. IEEE Access, 7:20293–20301, 2019. 3, 7 +[29] Sergey Zagoruyko and Nikos Komodakis. Wide residual +networks. arXiv preprint arXiv:1605.07146, 2016. 7 +[30] Aston Zhang, Yi Tay, Shuai Zhang, Alvin Chan, +Anh Tuan Luu, Siu Cheung Hui, and Jie Fu. Beyond +fully-connected layers with quaternions: Parameteriza- +tion of hypercomplex multiplications with 1/n parame- +ters. arXiv preprint arXiv:2102.08597, 2021. 2, 3 +[31] Xuanyu Zhu, Yi Xu, Hongteng Xu, and Changjian Chen. +Quaternion convolutional neural networks. In Proceed- +ings of the European Conference on Computer Vision +(ECCV), pages 631–647, 2018. 1, 3, 7 + diff --git a/1NE3T4oBgHgl3EQfnQph/content/tmp_files/load_file.txt b/1NE3T4oBgHgl3EQfnQph/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..109f520bc6ce923aab013627430c586c36d944ba --- /dev/null +++ b/1NE3T4oBgHgl3EQfnQph/content/tmp_files/load_file.txt @@ -0,0 +1,878 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf,len=877 +page_content='Enhancing ResNet Image Classification Performance by using Parameterized Hypercomplex Multiplication Nazmul Shahadat, Anthony S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Maida University of Louisiana at Lafayette Lafayette LA 70504, USA nazmul.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='ruet@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='com, maida@louisiana.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='edu Abstract Recently, many deep networks have introduced hy- percomplex and related calculations into their architec- tures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In regard to convolutional networks for classifica- tion, these enhancements have been applied to the con- volution operations in the frontend to enhance accuracy and/or reduce the parameter requirements while main- taining accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Although these enhancements have been applied to the convolutional frontend, it has not been studied whether adding hypercomplex calculations improves performance when applied to the densely con- nected backend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' This paper studies ResNet architectures and incorporates parameterized hypercomplex multipli- cation (PHM) into the backend of residual, quaternion, and vectormap convolutional neural networks to assess the effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We show that PHM does improve classifica- tion accuracy performance on several image datasets, including small, low-resolution CIFAR 10/100 and large high-resolution ImageNet and ASL, and can achieve state-of-the-art accuracy for hypercomplex networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Introduction Convolutional neural networks (CNNs) have been widely used, with great success, in visual classification tasks [3, 12] because of their good inductive priors and intuitive design.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Most deep learning building blocks in CNNs use real-valued operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' However, recent studies have explored the complex/hypercomplex space and showed that hypercomplex valued networks can perform bet- ter than their real-valued counterparts due to the weight sharing mechanism embedded in the hypercomplex mul- tiplication [7,18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' This weight sharing differs from that found in the real-valued convolution operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Specif- ically, quaternion convolutions share weights across in- put channels enabling them to discover cross-channel in- put relationships that support more accurate prediction (a) Validation accuracy comparison for CIFAR-10 data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' (b) Validation accuracy comparison for CIFAR-100 data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Top-1 validation accuracy comparison among orig- inal ResNets [7], original quaternion networks [7], original vectormap networks [7], our proposed QPHM and VPHM net- works for CIFAR benchmarks and generalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The effectiveness of quaternion net- works is shown in [7,16,19,23,31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The weight-sharing properties of the Hamiltonian product allow the discovery of cross-channel relation- ships.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' This is a new plausible inductive bias, namely, that there are data correlations across convolutional in- put channels that enhance discovery of effective cross- channel features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Practitioners have applied these cal- culations in the convolution stages of CNNs but not to the dense backend where real-valued operations are still used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The present paper puts weight-sharing cal- culations in the dense backend to further improve CNN arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='04623v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='CV] 11 Jan 2023 96 Top-1 Validation Accuracy 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5 95 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5 HH 94 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5 93 ResNet Original Quaternion Vectormap QPHM VPHM Original Original ResNet-18 ResNet-34 ResNet-5082 80 Top-1 Validation Accuracy 78 76 74 72 70 68 66 ResNet Original Quaternion Vectormap QPHM VPHM Original Original ResNet-18 ResNet-34 ResNet-50performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' To exploit this new type of weight shar- ing, we use a parameterized hypercomplex multiplica- tion (PHM) [30] layer as a building block.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' This block replaces the real-valued FC layers with hypercomplex FC layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We test the hypothesis using two types of hypercomplex CNNs, namely quaternion [8] CNNs and vectormap [7] CNNs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Our contributions are: Showing the effectiveness of using hypercomplex networks in the densely connected backend of a CNN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Introducing quaternion networks with PHM based dense layer (QPHM) to bring hypercomplex deep learning properties to the entire model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Introducing vectormap networks with a PHM based dense layer (VPHM) to remove hypercomplex di- mensionality constraints from the frontend and backend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The effectiveness of employing PHM based FC lay- ers with hypercomplex networks is seen in Figures 1a and 1b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We also show that these new models ob- tain SOTA results for hypercomplex networks in CIFAR benchmarks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Our experiments also show SOTA results for American Sign Language (ASL) data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Moreover, our models use fewer parameters, FLOPS, and latency com- pared to the base model proposed by [7,23] for classifi- cation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Background and Related Work 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Quaternion Convolution Quaternions are four dimensional vectors of the form Q = r + ix + jy + kz ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' r, x, y, z ∈ R (1) where, r, x, y, and z are real values and i, j, and k are the imaginary values which satisfy i2 = j2 = k2 = ijk = −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Quaternion convolution is defined by con- volving a quaternion filter matrix with a quaternion vec- tor (or feature map).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Let, QF = R + iX + jY + kZ be a quaternion filter matrix with R, X, Y, and Z be- ing real-valued kernels and QV = r + ix + jy + kz be a quaternion input vector with r, x, y, and z being real-valued vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Quaternion convolution is defined below [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' QF ⊛ QV = (R ∗ r − X ∗ x − Y ∗ y − Z ∗ z) +i(R ∗ x + X ∗ r + Y ∗ z − Z ∗ y) +j(R ∗ y − X ∗ z + Y ∗ r + Z ∗ x) +k(R ∗ z + X ∗ y − Y ∗ x + Z ∗ r) (2) There are 16 real-valued convolutions but only four ker- nels which are reused.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' This is how the weight shar- ing occurs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' [18] first described the weight sharing in the Hamilton product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Vectormap Convolution [7] noted that the Hamilton product and quaternion convolution, when used in deep networks, did not re- quire the entire Quaternion algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' They called these vectormap convolutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The weight sharing ratio is 1 N where N is the dimension of the vectormap, Dvm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Let V 3 in = [v1, v2, v3] be an RGB input vector and W 3 = [w1, w2, w3] a weight vector with N = 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We use a permutation τ on inputs so each input vector is multiplied by each weight vector element: τ(vi) = � v3 i = 1 vi−1 i > 1 (3) After applying circularly right shifted permutation to V 3 in, a new vector V 3 is formed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The permutation of weight τ(W 3) can be found like equation 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Hence, the output vector Vout is: V 3 out = [W 3 · V 3 in, τ(W 3) · V 3 in, τ 2(W 3) · V 3 in] (4) Here, “·” denotes dot product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The outputs V 3 out come from the linear combination of the elements of V 3 in and W 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Let the weight filter matrix for a vectormap be VF = [A,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' B,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' C] and the input vector after linear com- bination be Vh = [x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' y,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' z],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' the vectormap convolution between VF ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' and Vh for Dvm = 3 is: � � R(VF ∗ Vh) I (VF ∗ Vh) J (VF ∗ Vh) � � = L ⊙ � � A B C C A B B C A � � ∗ � � x y z � � (5) where,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' L is a learnable matrix defined as a matrix L ∈ RDvm×Dvm which is initialized using: lij = � � � � � � � � � � � � � � � 1 i = 1 1 i = j 1 j = Cali where Cali = (i + (i − 1)) & Cali = Cali − Dvm if Cali > Dvm −1 else.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' (6) By choosing Dvm and assigning a new constant matrix L ∈ RDvm×Dvm matching Dvm, any dimensional hy- percomplex convolution can be used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Vectormap weight initialization uses a similar mechanism to complex [25] and quaternion [8] weight initialization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Our weight ini- tialization follows [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' PHM Fully Connected Layer The above methods apply to convolutional layers but not to fully connected (FC) layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' [30] proposed pa- rameterized hypercomplex multiplication (PHM) for FC layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Like vectormaps, PHM can have any dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' If the dimension is four, it is like the Hamilton prod- uct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The success of the Hamiltonian product is shown in [7,8,16,19,28,31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Our work uses two different PHM dimensions: four for quaternion networks, and five for vectormap networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' A fully connected layer is defined [30] as y = FC(x) = Wx + b, where W ∈ Rk×d and b ∈ Rk are weights and bias, d and k are input and output dimensions, and x ∈ Rd, y ∈ Rk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' PHM uses the following hypercomplex transform to map input x ∈ Rd into output y ∈ Rk as y = PHM (x) = Hx + b, where H ∈ Rk×d is the sum of Kronecker products.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Like Dvm, let the dimension of the PHM module be Dphm = N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The PHM operation requires that both d and k are divisible by N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' H is the sum of Kronecker products of the parameter matrices Ai ∈ RN×N and Si ∈ Rk/N×d/N, where i = 1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' N: H = �N i=1 Ai ⊗ Si.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Parameter reduction comes from reusing matrices A and S in the PHM layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The ⊗ is the Kronecker product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' H is multiplied with the input in the dense layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The four dimensional PHM layer is explained in [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We also use five dimensions which is explained here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The learnable parameters for N = 5 are Pr, Pw, Px, Py, and Pz where P ∈ R1×1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' For Ai we use the hypercomplex matrix (5 dimensions) which is generated in a similar way of vectormap convolution (Equations 5 and 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' H is calculated using two learnable parameter matrices (Ai,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' and Si) for N = 5 as follows: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='H = ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='A1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='⊗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='�Pr ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='���� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='S1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='A2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='⊗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='�Pw ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� �� � ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='S2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='A3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='⊗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='�Px ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='���� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='S3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='A4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='⊗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='�Py ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='���� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='S4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='+ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='A5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='⊗ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='�Pz ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='���� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='S5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='= ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Pr ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Pw ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Px ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Py ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Pz ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Pz ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Pr ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Pw ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Px ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Py ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Py ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Pz ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Pr ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Pw ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Px ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Px ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Py ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Pz ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Pr ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Pw ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Pw ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Px ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='−Py ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Pz ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Pr ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='����� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='(7) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Equation 7 for N = 5 expresses the Hamiltonian prod- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='uct of hypercomplex layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' It preserves all PHM layer properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Proposed Models: QPHM and VPHM We propose a new fully hypercomplex model in lieu of hypercomplex CNNs that use a real-valued backend dense layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' That is, we replace the dense layer with a PHM layer to enjoy the benefits of hypercomplex weight sharing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We chose two base hypercomplex models for the con- volutional frontend, the quaternion network and vec- tormap network [7, 8] which were using real-valued backend layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' To match dimensions with frontend net- works, we used a PHM layer at four dimensions with the quaternion network and a PHM layer at five dimensions with the three dimensional vectormap network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In some cases, we also needed to use a PHM layer at five dimen- sions with quaternion networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' But we couldn’t use a three dimensional PHM layer as the output classes must be divisible by the dimensions in the PHM operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Figure 2 shows our proposed PHM based FC layer with quaternion convolutional neural networks (QC- NNs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' At the end of QCNNs (end of layer 4 in Figure 2 (top)), the output feature maps are flattened.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' This flat- tened layer is normally the input to a fully connected layer, but in our proposed method this layer is the input layer for the PHM based FC layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' This is represented as Pin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The parameterized weight H performs parame- terized multiplication to find the hyper-complex output Pout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The type of PHM layer depends on the dimensions needed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' For quaternion networks, we used dimensions four and five according to the number of classes in the datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The figures in Figure 2 (bottom) are expanded 4D PHM and 5D PHM layer of a single dense layer con- nection (red marked in Figure 2 (top)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Pin = Prin + Pwin + Pxin + Pyin + Pzin (8) For the PHM layer with five dimensions, each PHM layer accepts five channels of input like Prin, Pwin, Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Full hypercomplex network where quaternion convolutional neural networks (QCNNs) are used in the front and PHM based fully-connected layers are applied in the back-end.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 5-dimensional PHM is explained in Equation 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Equations 8 and 9 describe input and output for a 5D PHM layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 4D PHM is similar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Layer Output size Quaternion ResNet Vectormap ResNet QPHM VPHM Stem 32x32 3x3Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 112,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' std=1 3x3V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 90,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' std=1 3x3Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 112,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' std=1 3x3V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 90,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' std=1 Bottleneck group 1 32x32 � � 1x1Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 112 3x3Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 112 1x1Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 448 � � ×3 � � 1x1V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 90 3x3V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 90 1x1V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 360 � � ×3 � � 1x1QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 112 3x3QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 112 1x1QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 448 � � ×3 � � 1x1VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 90 3x3VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 90 1x1VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 390 � � ×3 Bottleneck group 2 16x16 � � 1x1Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 224 3x3Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 224 1x1Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 896 � � ×4 � � 1x1V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 180 3x3V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 180 1x1V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 720 � � ×4 � � 1x1QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 224 3x3QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 224 1x1QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 896 � � ×4 � � 1x1VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 180 3x3VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 180 1x1VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 720 � � ×4 Bottleneck group 3 8x8 � � 1x1Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 448 3x3Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 448 1x1Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1792 � � ×6 � � 1x1V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 360 3x3V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 360 1x1V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1440 � � ×6 � � 1x1QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 448 3x3QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 448 1x1QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1792 � � ×6 � � 1x1VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 360 3x3VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 360 1x1VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1440 � � ×6 Bottleneck group 4 4x4 � � 1x1Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 896 3x3Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 896 1x1Q,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 3584 � � ×3 � � 1x1V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 720 3x3V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 720 1x1V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 2880 � � ×3 � � 1x1QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 896 3x3QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 896 1x1QP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 3584 � � ×3 � � 1x1VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 720 3x3VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 720 1x1VP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 2880 � � ×3 Pooling Layer 1x1x100 global average-pool,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 100 outputs Output 1x1x100 fully connected Layer,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' softmax QPHM Layer VPHM Layer Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The 50-layer architectures tested on CIFAR-100: quaternion ResNet [7, 8], vectormap ResNet [7], our proposed QPHM, and VPHM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Input is a 32x32x3 color image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The number of stacked bottleneck modules is specified by multipliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' “Q”, “V”, “QP”, “VP”, and “std” denote quaternion convolution, vectormap convolution, QPHM (quaternion network with PHM layer), VPHM (vectormap network with PHM layer), and stride respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Integers (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=', 90, 112) denote number of output channels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Pxin, Pyin, and Pzin (Equation 8) and produces five channels of output like Prout, Pwout, Pxout, Pyout, and Pzout which are merged or stacked together to Pout as, Pout = Prout +Pwout +Pxout +Pyout +Pzout (9) Hence, the representational feature maps persist throughout the classification network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Similarly,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' this Parameterized STAGE 1 STAGE 2 STAGE 3 STAGE 4 Weight Pin Input H Horse Cat 224x224x4 Classification Stem Layer PHM based FC Layer QCNN 64 Filters 2nd Layer 3rd Layer 4rth Layer 1st Layer Filter size 7 QCNN QCNN QCNN Flatten QCNN 128 Filters 256 Filters 512 Filters Layer With stride 2 64 Filters max-pooling Stride 2 Stride 2 Stride 2 Stride 1 5-dimensional PHM layer (VPHM) 4-dimensional PHM layer (QPHM) PPHM (both 4D,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' and 5D) dense layer is applied in the backend of original ResNet [10] which we named RPHM (ResNet-with-PHM).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Experiment The purpose of the experiments reported herein was to test whether replacing the real-valued backend of a CNN model with a PHM backend improved clas- sification performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The architectures tested were real-valued, quaternion-valued [8, 19], and vectormap ResNet [7], either with or without the PHM backend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We refer to the quaternion ResNet model with the PHM backend as QPHM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Similarly, VPHM, RPHM denote the vectormap ResNet, and real-valued ResNet models with the PHM backend.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Our experiments were conducted on the following datasets: CIFAR-10/100 [14], the ImageNet300k dataset [23] and the American Sign Language Hand Gesture color image recognition dataset [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The first two datasets have less training samples with small image resolutions and the other datasets use a large number of training samples with higher resolution images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We used these datasets to check our proposed models for small and large training samples as well as for small and high resolution images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The experiments were run on a workstation with an Intel(R) Core(TM) i9-9820X CPU @ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='30GHz, 128 GB memory, and NVIDIA Titan RTX GPU (24GB).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' CIFAR Classification In addition to testing the PHM with real-valued, quaternion-valued, and vectormap ResNet, we tested the network models with three depths: 18, 34, and 50 layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 Method We tested all of the above mentioned architectures with and without the PHM backend on both the CIFAR-10 and CIFAR-100 datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' These datasets were composed of 32x32 pixel RGB images falling into either ten classes or 100 classes, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Both datasets have 50,000 training, and 10,000 test examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The models were trained using the same components as the real-valued networks, the original quaternion net- work, and the original vectormap network using the same datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' All models in Table 2 were trained us- ing the same hyperparameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Our QPHM and VPHM design is similar to the quaternion [7,8], and vectormap networks [7], respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The residual architectures differ in the number of output channels than the origi- nal hypercomplex networks and the proposed networks due to keeping the number of trainable parameters about the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The number of output channels for the resid- ual networks is the same as [7] and [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Table 1 shows the 50-layer architectures tested for CIFAR-100 dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' One goal is to see if the representations generated by the PHM based dense layer instead of the real-valued dense layer outperforms the quaternion, vectormap, and residual baselines reported in [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We also analyzed different residual architectures to assess the effect of depth on our proposed models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' For preprocessing, we followed [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We used stochastic gradient descent op- timization with 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='9 Nesterov momentum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The learn- ing rate was initially set to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 with warm-up learning for the first 10 epochs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' For smooth learning, we chose cosine learning from epochs 11 to 120.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' However, we were getting about same performance for linear learn- ing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' All models were trained for 120 epochs and batch size was set to 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' This experiment used batch normal- ization and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0001 weight decay.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The implementation is on github at-https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='com/nazmul729/QPHM- VPHM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='git.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='2 Results The main results appear in Figure 1 and 3, and in Ta- ble 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Figure 1 gives the overall pattern of results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Fig- ure 1a shows results for CIFAR-10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' It shows top-1 vali- dation accuracy for the five models: real-valued ResNet, quaternion-valued ResNet, vectormap ResNet, QPHM, and VPHM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Also, results are shown for 18, 34, and 50 layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We chose top-1 performance out of three.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Fig- ure 1b shows the same consistent pattern of results for the CIFAR-100 dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The magnitude of improvement is higher for CIFIR-100 than for CIFAR-10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The results are also shown in tabular form in Table 2, along with counts of trainable parameters, flops, and latency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' It can be seen in Table 2 that modifying the backend to have a PHM layers has little effect on the parameter count, flops, and latency as the input image resolutions, and the number of output classes are low.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The proposed QPHM model attains better top-1 val- idation accuracy than the original ResNet, quaternion, and vectormap networks for both datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The QPHM also produces better performance compared to the pro- posed VPHM, and RPHM models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Moreover, we com- pare our best performance which is obtained by the QPHM model, to the deep or shallow complex or hyper- complex networks and notice that the QPHM is achieved SOTA performance (shown in Table 3) for the CIFAR- 10 and -100 datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Table 3 compares different complex or hypercom- plex networks top-1 validation accuracy with our best result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Our comparison was not limited to [7] and com- plex space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The QPHM also gains highest top-1 vali- dation accuracy than the relevant CNN models for both datasets (shown in Table 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Tables 2, 3, and 4, show that Model Name Param Count FLOPS Latency Validation Accuracy CIFAR-10 CIFAR-100 ResNet18 [10] 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='56G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='22ms 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='08 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='19 RPHM18 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='55G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='21ms 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='74 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='83 Quat18 [8] 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='26G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='36ms 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='08 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='23 Vect18 [7] 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='3M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='21G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='29ms 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='95 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='82 QPHM18 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='25G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='35ms 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='03 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='88 VPHM18 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='3M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='20G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='27ms 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='97 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='80 ResNet34 [10] 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='2M 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='16G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='29ms 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='27 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='19 RPHM34 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1M 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='15G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='28ms 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='98 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='80 Quat34 [8] 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='3M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='438G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='57ms 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='27 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='76 Vect34 [7] 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='04M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='35G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='45ms 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='45 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='12 QPHM34 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='3M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='432G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='54ms 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='40 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='51 VPHM34 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='03M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='34G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='44ms 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='41 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='23 ResNet50 [10] 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5M 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='30G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='478ms 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='90 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='60 RPHM50 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='6M 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='29G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='468ms 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='59 79.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='21 Quat50 [8] 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='08M 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='45G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='97ms 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='90 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='68 Vect50 [7] 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5M 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='19G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='77ms 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='28 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='84 QPHM50 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='07M 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='44G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='96ms 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='59 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='25 VPHM50 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5M 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='15G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='76ms 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='48 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='91 Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Image classification performance on the CIFAR benchmarks for 18, 34 and 50-layer architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Here, Quat, Vect, QPHM, and VPHM, define the quaternion ResNet, vectormap ResNet, quaternion networks with PHM FC layer, and vectormap networks with PHM FC layer, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' (a) Validation loss versus training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' (b) Validation accuracy versus training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Validation loss and accuracy of 50 layer ResNet [7], quaternion [7], vectormap [7], QPHM, VPHM for CIFAR-100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' our QPHM model achieves best performance for CIFAR 10 and 100 datasets with fewer parameters, flops, and la- tency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' ImageNet Classification 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 Method These experiments are performed on a 300k subset of the ImageNet dataset which we call ImageNet300k [23].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' [23] explains how the full dataset was sampled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The models compared are: standard ResNets [23], quater- nion convolutional ResNets [23], and our proposed QPHM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We ran 26, 35, and 50-layers architectures using “[1, 2, 4, 1]”, “[2, 3, 4, 2]” and “[3, 4, 6, 3]” bottleneck block multipliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Training (all models in Table 5) used the same optimizer and hyperparameters as CIFAR clas- sification method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='2 Experimental Results Table 5 shows the results on the ImageNet300k dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' This result shows that our model takes three millions fewer trainable parameters and yields almost 5% higher validation performance for the same architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Pa- 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5 ResNet 4 Quaternion Vectormap 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5 QPHM 3 VPHM Loss 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5 2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5 0 8 345438 5 0 9 118 127 6 145 154 8 172 3 Epochs90 80 70 60 Accuracy ResNet 50 Quaternion 40 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='Vectormap 30 QPHM 20 VPHM 10 0 1 9 8 3 4 5 43 8 100 109 8 127 136 145 154 163 172 EpochsModel Architecture Validation Accuracy CIFAR-10 CIFAR-100 DCNs [2] 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='90 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='6 DCN [25] 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='53 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='37 QCNN [16] 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='48 47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='46 Quat [31] 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='78 QCNN [28] 83.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='09 QCNN* [28] 84.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='15 Quaternion18 [7] 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='80 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='23 Quaternion34 [7] 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='27 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='76 Quaternion50 [7] 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='90 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='68 Octonion [27] 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='65 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='40 Vectormap18 [7] 93.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='95 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='82 Vectormap34 [7] 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='45 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='12 Vectormap50 [7] 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='28 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='84 QPHM-50 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='59 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='25 VPHM-50 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='48 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='91 Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Top-1 validation accuracy for hypercomplex net- works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' DCN stands for deep complex convolutional network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' variant used quaternion batch normalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Quaternion and vectormap networks are the base networks [7] rameter reduction is not depicted in Table 3 for low res- olution CIFAR benchmark images as they have saved parameters in thousands.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' It is also clear that deeper net- works perform better than shallow networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' ASL Classification 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 Method To compare the proposed QPHM model with other networks, we evaluated it on the ASL Alpha- bet dataset [26] publicly available on Kaggle at https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='kaggle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='com/grassknoted/asl-alphabet.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' This dataset has 87,000 hand-gesture images for 29 sign classes where each class has about 3,000 images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' And, the image size is 200 × 200 × 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' It has 26 finger spelling alphabet classes for the En- glish alphabetic letters and three special characters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Due to the divisibility restriction in PHM, we cannot use 29 classes as 29 is prime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Like all other baseline leave-one- out and half-half methods [5,13,15,20,24], we exclude one class (letter B) from the training and validation sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' We use the same hyperparameters as CIFAR classifica- tion method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='2 Experimental Results Due to the divisibility limitation, it is not possible to evaluate the ASL data using VPHM model as we choose PHM with five dimensions for VPHM model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' So we only tested the QPHM (PHM with four dimensions) model to compare with other networks on the ASL Model Architecture Validation Accuracy CIF10 CIF100 Convolutional Networks ResNet18 [9] 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='27 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='41 ResNet34 [9] 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='51 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='52 ResNet50 [9] 90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='60 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='68 ResNet110 [9] 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='08 76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='63 ResNet1001 [11] 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='08 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='29 MobileNet [9] 91.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='02 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='44 Cout [4] 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='28 77.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='54 Wide Residual Networks WRN-28-10 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='00 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='75 WRN-28-10-dropout 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='11 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='15 Our Method QPHM50 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='59 80.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='25 VPHM50 95.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='48 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='91 QPHM-18-2 (ours) 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='24 81.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='45 QPHM-50-2 (ours) 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='63 82.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='00 Table 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Top-1 validation accuracy comparison among deep networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' CIF10 and CIF100 stand for CIFAR10 and CI- FAR100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Cout is the ResNet-18+cutout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' WRN-28-10 [29], QPHM-18-2, and QPHM-50-2 stand for wide ResNet 28, 18, and 50-layers with the output channel widening factor 10, 2, and 2, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Table 6 provides a comparison of top-1 val- idation accuracy of our proposed QPHM model with other networks in ASL data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Our proposed architecture performs state-of-the-art accuracy in this ASL dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Hence, the representation feature maps in the dense layer are very effective for this dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Conclusions We replaced the dense backend of existing hypercom- plex CNNs for image classification with PHM modules to create weight sharing in this layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' This novel de- sign improved classification accuracy, reduced parame- ter counts, flops, and latency compared to the baseline networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The results support our hypothesis that the PHM operation in the densely connected back end pro- vides better representations as well as improves accu- racy with fewer parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' These results also high- lighted the importance of the calculations in the back- end.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The QPHM and VPHM outperformed the other works mentioned in “Experiment” section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' The pro- posed QPHM achieved higher validation accuracy (top- 1) for all network architectures than the proposed VPHM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Architecture Params FLOPS Latency Training Accuracy Validation Accuracy ResNet26 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='6M 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='72G 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='75ms 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='48 Quat ResNet26 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1M 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='30G 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='71ms 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='1 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='09 QPHM26 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='4M 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='18G 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='7ms 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='3 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='23 ResNet35 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5M 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='57G 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='02ms 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='8 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='99 Quat ResNet35 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5M 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='59G 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='15ms 70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='9 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='11 QPHM35 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5M 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='10G 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='15ms 75.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='3 51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='84 ResNet50 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5M 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='01G 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='46ms 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='8 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='92 Quat ResNet50 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='6M 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='82G 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='21ms 73.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='4 49.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='69 QPHM50 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='5M 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='32G 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='16ms 78.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='8 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='38 Table 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Classification performance on the ImageNet300k dataset for different ResNet architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Top-1 training and validation accuracies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Architecture Top-1 Validation Accuracy CNNs 82% HOG-LBP-SVM 98.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='36% HT with CNN 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='71% RF-JA with l-o-o 70% RF-JA with h-h 90% GF-RF l-o-o 49% GF-RF h-h 75% ESF-MLRF l-o-o 57% ESF-MLRF h-h 87% RF-JP l-o-o 43% RF-JP h-h 59% Faster RCNN 89.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='72% RCNNA 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='87% DBN 79% CMVA and IF l-o-o 92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='7% CMVA and IF h-h 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='9% CNN with ASL 97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='82% QPHM 100.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='0 Table 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Top-1 validation accuracy comparison with other works on ASL dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Here,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' l-o-o,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' h-h,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' HT with CNN [6,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='21],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' CMVA [24],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' RF-JA [5],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' GF-RF [20],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' ESF-MLRF [15],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' RF- JP [13],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' RCNN [26],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' RCNNA [26],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' DBN [22],' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' and HOG-LBP- SVM [17] mean Leave one out,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' half-half,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' HYBRID TRANS- FORM,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' CNNs [1] with multiview augmentation and IF Infer- ence Fusion,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Random Forest with Joint Angles,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Gabor Filter- based features with Random Forest,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Ensemble of Shape Func- tion with Multi-Layer Random Forest,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Random Forest with Joint Positions,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Recurrent convolutional neural networks,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Re- current convolutional neural networks with attention,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Deep be- lief network,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' and Histogram of Oriented Gradients (HOG) and Local Binary Pattern (LBP) with support vector machine,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' re- spectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' References [1] Salem Ameen and Sunil Vadera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' A convolutional neu- ral network to classify american sign language finger- spelling from depth and colour images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Expert Systems, 34(3):e12197, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 8 [2] Timothy Anderson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Split-complex convolutional neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 7 [3] Pierre Buyssens, Abderrahim Elmoataz, and Olivier L´ezoray.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Multiscale convolutional neural networks for vision–based classification of cells.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In Asian Conference on Computer Vision, pages 342–352.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Springer, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1 [4] Terrance DeVries and Graham W Taylor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Improved reg- ularization of convolutional neural networks with cutout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' arXiv preprint arXiv:1708.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='04552, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 7 [5] Cao Dong, Ming C Leu, and Zhaozheng Yin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Ameri- can sign language alphabet recognition using microsoft kinect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In Proceedings of the IEEE conference on com- puter vision and pattern recognition workshops, pages 44–52, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 5, 7, 8 [6] Brandon Garcia and Sigberto Alarcon Viesca.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Real-time american sign language recognition with convolutional neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Convolutional Neural Networks for Vi- sual Recognition, 2:225–232, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 8 [7] Chase Gaudet and Anthony S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Maida.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Removing dimen- sional restrictions on complex/hyper-complex networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In 2021 IEEE International Conference on Image Pro- cessing (ICIP), 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1, 2, 3, 4, 5, 6, 7 [8] Chase J Gaudet and Anthony S Maida.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Deep quater- nion networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In 2018 International Joint Conference on Neural Networks (IJCNN), pages 1–8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' IEEE, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 2, 3, 4, 5, 6 [9] Ali Hassani, Steven Walton, Nikhil Shah, Abulikemu Abuduweili, Jiachen Li, and Humphrey Shi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Escaping the big data paradigm with compact transformers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' arXiv preprint arXiv:2104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='05704, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 7 [10] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Deep residual learning for image recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770–778, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 5, 6 [11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Identity mappings in deep residual networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In Eu- ropean conference on computer vision, pages 630–645.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Springer, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 7 [12] Shima Javanmardi, Seyed-Hassan Miraei Ashtiani, Fons J Verbeek, and Alex Martynenko.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Computer-vision classification of corn seed varieties using deep convolu- tional neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Journal of Stored Products Re- search, 92:101800, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1 [13] Cem Keskin, Furkan Kırac¸, Yunus Emre Kara, and Lale Akarun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Real time hand pose estimation using depth sen- sors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In Consumer depth cameras for computer vision, pages 119–137.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Springer, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 7, 8 [14] Alex Krizhevsky, Geoffrey Hinton, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Learning mul- tiple layers of features from tiny images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 5 [15] Alina Kuznetsova, Laura Leal-Taix´e, and Bodo Rosen- hahn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Real-time sign language recognition using a con- sumer depth camera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In Proceedings of the IEEE interna- tional conference on computer vision workshops, pages 83–90, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 7, 8 [16] Cameron E Long.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Quaternion Temporal Convolutional Neural Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' PhD thesis, University of Dayton, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1, 3, 7 [17] Huy BD Nguyen and Hung Ngoc Do.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Deep learning for american sign language fingerspelling recognition sys- tem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In 2019 26th International Conference on Telecom- munications (ICT), pages 314–318.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' IEEE, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 8 [18] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Parcollet, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Morchid, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Linar´es.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Quaternion convolutional networks for heterogeneous image pro- cessing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In IEEE Intl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Conf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' on Acoustics, Speech and Signal Processing (ICASSP), pages 8514–8518, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1, 2 [19] Titouan Parcollet, Ying Zhang, Mohamed Morchid, Chiheb Trabelsi, Georges Linar`es, Renato De Mori, and Yoshua Bengio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Quaternion convolutional neural networks for end-to-end automatic speech recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' arXiv preprint arXiv:1806.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='07789, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1, 3, 5 [20] Nicolas Pugeault and Richard Bowden.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Spelling it out: Real-time asl fingerspelling recognition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In 2011 IEEE International conference on computer vision workshops (ICCV workshops), pages 1114–1119.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' IEEE, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 7, 8 [21] Virender Ranga, Nikita Yadav, and Pulkit Garg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Amer- ican sign language fingerspelling using hybrid discrete wavelet transform-gabor filter and convolutional neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Journal of Engineering Science and Technol- ogy, 13(9):2655–2669, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 8 [22] Lucas Rioux-Maldague and Philippe Giguere.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Sign lan- guage fingerspelling classification from depth and color images using a deep belief network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In 2014 Canadian Conference on Computer and Robot Vision, pages 92–97.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' IEEE, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 8 [23] Nazmul Shahadat and Anthony S Maida.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Adding quater- nion representations to attention networks for classifica- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' arXiv preprint arXiv:2110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='01185, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1, 2, 5, 6 [24] Wenjin Tao, Ming C Leu, and Zhaozheng Yin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Amer- ican sign language alphabet recognition using convolu- tional neural networks with multiview augmentation and inference fusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Engineering Applications of Artificial Intelligence, 76:202–213, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 7, 8 [25] Chiheb Trabelsi, Olexa Bilaniuk, Ying Zhang, Dmitriy Serdyuk, Sandeep Subramanian, Joao Felipe Santos, Soroush Mehri, Negar Rostamzadeh, Yoshua Bengio, and Christopher J Pal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Deep complex networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' arXiv preprint arXiv:1705.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='09792, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 2, 7 [26] Shweta Upadhyay, RK Sharma, and Prashant Singh Rana.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Sign language recognition with visual attention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Technical report, EasyChair, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 7, 8 [27] Jiasong Wu, Ling Xu, Fuzhi Wu, Youyong Kong, Lotfi Senhadji, and Huazhong Shu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Deep octonion networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Neurocomputing, 397:179–191, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 7 [28] Qilin Yin, Jinwei Wang, Xiangyang Luo, Jiangtao Zhai, Sunil Kr Jha, and Yun-Qing Shi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Quaternion convolu- tional neural network for color image classification and forensics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' IEEE Access, 7:20293–20301, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 3, 7 [29] Sergey Zagoruyko and Nikos Komodakis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Wide residual networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' arXiv preprint arXiv:1605.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='07146, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 7 [30] Aston Zhang, Yi Tay, Shuai Zhang, Alvin Chan, Anh Tuan Luu, Siu Cheung Hui, and Jie Fu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Beyond fully-connected layers with quaternions: Parameteriza- tion of hypercomplex multiplications with 1/n parame- ters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' arXiv preprint arXiv:2102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content='08597, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 2, 3 [31] Xuanyu Zhu, Yi Xu, Hongteng Xu, and Changjian Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' Quaternion convolutional neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' In Proceed- ings of the European Conference on Computer Vision (ECCV), pages 631–647, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} +page_content=' 1, 3, 7' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/1NE3T4oBgHgl3EQfnQph/content/2301.04623v1.pdf'} diff --git a/29A0T4oBgHgl3EQfNP8E/vector_store/index.faiss b/29A0T4oBgHgl3EQfNP8E/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..73fc3e60114a439f0823d66921db213207de6f79 --- /dev/null +++ b/29A0T4oBgHgl3EQfNP8E/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7f00627fd2863ba3fe5e4aba3bcec49a82e8563add78747097044677b9799c7 +size 3866669 diff --git a/29A0T4oBgHgl3EQfNP8E/vector_store/index.pkl b/29A0T4oBgHgl3EQfNP8E/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..2af46275e97c9dd20e303d23e9b81b6aa18c8047 --- /dev/null +++ b/29A0T4oBgHgl3EQfNP8E/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76f5d66746028570658affa5e6993f7e3ed67377f62e908eefeefae01d72b082 +size 162590 diff --git a/2NE0T4oBgHgl3EQfugGB/vector_store/index.pkl b/2NE0T4oBgHgl3EQfugGB/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..865d17745c57df4191042584f04a5e4e35f8afaa --- /dev/null +++ b/2NE0T4oBgHgl3EQfugGB/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98ebe609fdd79ad666a3efbed654e7b0363a7fe68784380a81ab5f0f964f4108 +size 877866 diff --git a/4NFAT4oBgHgl3EQflx07/content/2301.08619v1.pdf b/4NFAT4oBgHgl3EQflx07/content/2301.08619v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f3a553b5fb4cda257e19602a048872a664e4afe8 --- /dev/null +++ b/4NFAT4oBgHgl3EQflx07/content/2301.08619v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:665a4be05c9d49fc78e384cf820b6082b199141b4ecbf86711934857167b1152 +size 370373 diff --git a/4NFAT4oBgHgl3EQflx07/vector_store/index.faiss b/4NFAT4oBgHgl3EQflx07/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..d1826fd554dff60149d6aa28610bbd295563cb8d --- /dev/null +++ b/4NFAT4oBgHgl3EQflx07/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac48eba86b40e01ba2ee82a663e84fa432675b439136d490208978238611310a +size 2359341 diff --git a/4NFAT4oBgHgl3EQflx07/vector_store/index.pkl b/4NFAT4oBgHgl3EQflx07/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..8eeb21b6adf66fdc0dc1c831bf0e37a2e10eae58 --- /dev/null +++ b/4NFAT4oBgHgl3EQflx07/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:789420c84370f3fc6c35e40249dcbcfec2b40d563886a9173f7924c46993e1b0 +size 79123 diff --git a/4tE1T4oBgHgl3EQfSwM3/content/tmp_files/2301.03069v1.pdf.txt b/4tE1T4oBgHgl3EQfSwM3/content/tmp_files/2301.03069v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..a8f4d352829f695b21d556232835543315f98bc3 --- /dev/null +++ b/4tE1T4oBgHgl3EQfSwM3/content/tmp_files/2301.03069v1.pdf.txt @@ -0,0 +1,2884 @@ +EXACT HYDRODYNAMIC MANIFOLDS FOR THE LINEARIZED +THREE-DIMENSIONAL BOLTZMANN BGK EQUATION +FLORIAN KOGELBAUER AND ILYA KARLIN +Abstract. We perform a complete spectral analysis of the linear three-dimensional +Boltzmann BGK operator resulting in an explicit transcendental equation for the eigen- +values. Using the theory of finite-rank perturbations, we prove that there exists a critical +wave number kcrit which limits the number of hydrodynamic modes in the frequency +space. This implies that there are only finitely many isolated eigenvalues above the es- +sential spectrum, thus showing the existence of a finite-dimensional, well-separated linear +hydrodynamic manifold as a combination of invariant eigenspaces. The obtained results +can serve as a benchmark for validating approximate theories of hydrodynamic closures +and moment methods. +1. Introduction +The derivation of hydrodynamic equations from kinetic theory is a fundamental, yet not +completely resolved, problem in thermodynamics and fluids, dating back at least to part +(b) of Hilbert’s sixth problem [26]. Given the Boltzmann equation or an approximation of +it, can the the basic equations of fluid dynamics (Euler, Navier–Stokes) be derived directly +from the dynamics of the distribution function? +One classical approach is to seek a series expansion in terms of a small parameter, such +as the relaxation time τ or the Knudsen number ε [39]. One widely used expansion is the +Chapman–Enskog series [12], where it is assumed that the collision term scales with ε−1, +thus indicating a (singular) Taylor expansion in ε. Indeed, the zeroth order PDE obtained +this way gives the Euler equation, while the first order PDE reproduces the Navier–Stokes +equation. On the linear level, the Navier–Stokes equation is globally dissipative and decay +of entropy on the kinetic level translates to decay of energy on the fluid level. +For higher-order expansions, however, we are in trouble. In [4], it was first shown that +an expansion in terms of Knudsen number can lead to nonphysical properties of the hy- +drodynamic models: At order two (Burnett equation [12]), the dispersion relation shows +a change of sign, thus leading to modes which grow in energy (Bobylev instability). In +particular, the Burnett hydrodynamics are not hyperbolic and there exists no H-theorem +for them [6]. +1 +arXiv:2301.03069v1 [math-ph] 8 Jan 2023 + +2 +FLORIAN KOGELBAUER AND ILYA KARLIN +From a mathematical point of view, of course, there is no guarantee that the expansion +of a non-local operator in frequency space, i.e., an approximation in terms of local (dif- +ferential) operators, gives a good approximation for the long-time dynamics of the overall +system. Among the first to suggest a non-local closure relation was probably Rosenau +[34]. In a series of works (see, e.g., [19, 18, 21] and references therein), Karlin and Gorban +derived explicit non-local closures by essentially summing the Chapman–Enskog series for +all orders. Furthermore, we note that the Chapman–Enskog expansion mixes linear and +nonlinear terms for the full Boltzmann equation since it only considers powers of ε, while +the existence (and approximation) of a hydrodynamic manifold can be performed indepen- +dently of the Knudsen number, for which it only enters as a parameter. +Spectral properties of linearized kinetic equations are of basic interest in thermodynam- +ics and have been performed by numerous authors. Already Hilbert himself was concerned +with the spectral properties of linear integral operators derived from the Boltzmann equa- +tion [25]. +Carleman [8] proved that the essential spectrum remains the same under a +compact perturbation (Weyl’s theorem) in the hard sphere case and was able to estimate +the spectral gap. This result was generalized to a broader class of collision kernels by Grad +[23] and to soft potentials in [7]. +For spatially uniform Maxwell molecules, a complete spectral description was derived +in [5] (together with exact special solutions and normal form calculations for the full, +non-linear problem), see also [11]. Famously, in [15], some fundamental properties of the +spectrum of a comparably broad class of kinetic operators was derived. In particular, the +existence of eigenvalue branches and asymptotic expansion of the (small) eigenvalues for +vanishing wave number was derived. We stress, however, that no analysis for large wave +numbers or close to the essential spectrum was performed in [15]. +Let us also comment on the relation to Hilbert’s sixth problem. Along these lines, several +result on the converges to Navier–Stokes (and Euler) equations have been obtained. Al- +ready Grad [24] was interested in this question. In [15], it is also shown that the semi-group +generated by the linearized Euler equation converges - for fixed time - to the semi-group +generated by the linearized Boltzmann equation (and similarly, for the linear Navier–Stokes +semi-group). In [35], convergence of scaled solutions to the Navier–Stokes equation along +the lines of [2] was proved. We also mention the results related to convergence rates to the +equilibrium (hypercoercivity) of the variants of the BGK equation [40, 14]. For an excellent +review on the mathematical perspective of Hilbert’s sixth problem, we refer to [36]. +In this work, we perform a complete spectral analysis for the Bhatnagar–Gross–Krook +(BGK) equation [3] linearized around a global Maxwellian. +The BGK model - despite +being a comparatively simple approximation to the full Boltzmann equation - shares im- +portant features such as decay of entropy and the conservation laws of mass, momentum +and energy [3]. Global existence and estimates of the solution were proved in [32, 33] for + +EXACT HYDRODYNAMICS FROM LINEAR BGK +3 +the full, non-linear BGK system. +The single relaxation time τ in the BGK equation will serve as the analog of the Knudsen +number and fundamental parameter in our analysis. Previous work on the full spectrum +of kinetic models together with a hydrodynamic interpretation has been performed in [28] +for the three-dimensional Grad system and in [29] for the linear BGK equation with mass +density only. A similar independent analysis for the one-dimensional linear BGK with one +fluid moment was performed in [10, 9] in the context of grossly determined solutions (in +the sense of [39]), where convergence to the slow manifold is also proven explicitly. While +the results obtained in [10, 9] are proved for the real line (for which the corresponding +eigen-distributions are derived), we will focus on the torus TL, for which we expect a dis- +crete set of eigenvalues. +Indeed, we will give a complete and (up to the solution of a transcendental equa- +tion) explicit description of the spectrum of the BGK equation linearized around a global +Maxwellian. We will show the existence of finitely many discrete eigenvalues above the +essential spectrum as well as the existence of a critical wave number for each family of +modes. More precisely, we prove the following: +Theorem 1.1. The spectrum of the non-dimensional linearized BGK operator L with re- +laxation time τ around a global Maxwellian is given by +σ(L) = +� +−1 +τ + iR +� +∪ +� +N∈Modes +� +|k| 0. More generally, for any n ∈ N, we have the useful formula +� +R +v2ne−Av2 dv = +� π +A +(2n − 1)! ! +(2A)n +. +(2.6) +3. Preliminaries and Formulation of the Problem +We will be concerned with the three-dimensional BGK kinetic equation +∂f +∂t + v · ∇xf = −1 +τ QBGK, +(3.1) +for the scalar distribution function f : T3 +L ×R3 ×[0, ∞) → R+, f = f(x, v, t) and the BGK +collision operator +QBGK = +� +f(x, v, t) − feq(n[f], u[f], T[f]; v) +� +. +(3.2) + +EXACT HYDRODYNAMICS FROM LINEAR BGK +5 +Here, T3 +L denotes the three-dimensional torus of length L, the parameter τ > 0 is the +relaxation time, the equilibrium distribution is given by the standard Gaussian +feq(n, u, T; v) = n +�2πkBT +m +�− 3 +2 +e− +m +2kBT |u−v|2 +, +(3.3) +for the molecular mass m and the Boltzmann constant kB, while the five scalar hydrody- +namic variables are given by the number density, +n[f](x, t) = +� +R3 f(x, v, t) dv, +(3.4) +the velocity, +u[f](x, t) = +1 +n[f](x, t) +� +R3 vf(x, v, t) dv, +(3.5) +and the temperature, which is defined implicitly through conservation of energy as +3 +2 +kB +m T[f](x, t)n[f](x, t) + n[f](x, t)|u[f](x, t)|2 +2 += +� +R3 +|v|2 +2 f(x, v, t) dv. +(3.6) +The physical units are given as [kB] = m2kgs−2K−1 and [kBT] = m2kgs−2 respectively. +We introduce the moments of the distribution function f as +M(n)(x, t) = +� +R3 f(x, v, t) v⊗ndv, +(3.7) +where v⊗0 = 1, v⊗1 = v and +v⊗n = v ⊗ ... ⊗ v +� +�� +� +n−times +, +(3.8) +for n ≥ 2 is the n-th tensor power. The moment defined in (3.7) is an n-th order symmetric +tensor, depending on space and time. +The first three moments relate to the hydrodynamic variables through +M(0) = n, +M(1) = nu, +traceM(2) = n +� +|u|2+3kBT +m +� +. +(3.9) +Conversely, we can express the hydrodynamic variables in terms of the moments as +n = M0, +u = M1 +M0 +, +kB +m T = 1 +3 +�traceM2 +M0 +− |M1|2 +M2 +0 +� +. +(3.10) + +6 +FLORIAN KOGELBAUER AND ILYA KARLIN +We can reformulate equation (3.1) as an infinite system of coupled momentum equations +as +� +1 + τ ∂ +∂t +� +M(n) = −τ∇ · M(n+1) + M(n) +eq , +(3.11) +for n ≥ 0, where +M(n) +eq = +� +R3 feq(n[f], u[f], T[f]; v)v⊗n dv. +(3.12) +The special property of the BGK hierarchy is that the first three moment equations reduce +to +∂ +∂tM(0) = −∇ · M(1), +∂ +∂tM(1) = −∇ · M(2), +∂ +∂ttraceM(2) = −trace(∇ · M(3)). +(3.13) +In particular, the first three moment equations in terms of the hydrodynamic variables +read +∂ +∂tn = −∇ · (nu), +∂ +∂t(nu) = −∇ · +� +R3 v ⊗ vf dv, +∂ +∂t +�� +R3 +m|v|2 +2 +f dv +� += −∇ · +� +R3 +|v|2 +2 vf dv. +(3.14) +The collision operator QBGK shares some key properties with the collision operator of +the full Boltzmann equation. Namely, we have that +� +R3 QBGK(v) +� +� +1 +v +|v|2 +� +� dv = 0, +(3.15) +as well as the negativity condition +⟨QBGKf, f⟩x,v ≤ 0, +(3.16) +for all f ∈ Hx,v for which the above expression is defined. +We will be interested in the linearized dynamics of (3.1) around a global Maxwellian +φ(v) = n0 +� +2πkBT0 +m +�− 3 +2 +e− m|v|2 +2kBT0 , +(3.17) +for the equilibrium density n0 and the equilibrium temperature T0. Setting +f �→ φ + εf, +(3.18) + +EXACT HYDRODYNAMICS FROM LINEAR BGK +7 +implies that +M0 �→ n0 + εM0, +M1 �→ εM1, +M2 �→ n0 +kBT0 +m Id3×3 + εM2, +(3.19) +and consequently +n �→ n0 + εM0, +u �→ +εM1 +n0 + εM0 +, +T �→ m +3kB +� +3n0 kBT0 +m ++ εtraceM2 +n0 + εM0 +− ε2 +|M1|2 +(n0 + εM0)2 +� +. +(3.20) +Using +∂n +∂ε +���� +ε=0 += M0, +∂u +∂ε +���� +ε=0 += M1 +n0 +, +∂T +∂ε +���� +ε=0 += m +3kB +traceM2 − 3T0 kB +m M0 +n0 +, +(3.21) +we can readily calculate: +∂ +∂ε +���� +ε=0 +feq[φ + εf] = ∂ +∂ε +���� +ε=0 +n +�2πkBT +m +�− 3 +2 +e− +m +2kBT |u−v|2 += n0 +�2πkB +m +�− 3 +2 +e− +m +2kBT0 |v|2 +� +M0 +n0 ++ m +3kB +traceM2 − 3T0 kB +m M0 +n0 +� +−3 +2 +� +T +− 5 +2 +0 ++ T +− 3 +2 +0 +� +− +m +2kBT0 +� � +−2M1 +n0 +· v +� ++T +− 3 +2 +0 +m +3kB +traceM2 − 3T0 kB +m M0 +n0 +|v|2 +� +− m +2kB +� +(−T −2 +0 ) +� +, +(3.22) +which, after regrouping and cancellations, becomes +∂ +∂ε +���� +ε=0 +feq[φ + εf] = +�2πkBT0 +m +�− 3 +2 +e− +m +2kBT0 |v|2 +� +M0 − +m +kBT0 +traceM2 − 3kBT0 +m M0 +2 ++ +� +− m +kBT0 +� +M1 · v + traceM2 − 3 T0kB +m M0 +6 +|v|2 +� m +T0kB +�2� +. +(3.23) + +8 +FLORIAN KOGELBAUER AND ILYA KARLIN +Defining the thermal velocity as +vthermal = +� +kB +m T0, +(3.24) +and re-scaling according to +v �→ vthermalv, +(3.25) +implies that +Mn �→ +�kB +m T0 +� 3+n +2 +Mn, +(3.26) +which allows us to simplify +∂ +∂ε +���� +ε=0 +feq[φ+εf] = (2π)−3/2e− |v|2 +2 +� +M0 − traceM2 − 3M0 +2 ++ M1 · v + traceM2 − 3M0 +6 +|v|2 +� +. +(3.27) +Similarly, we re-scale +x �→ Lx, +(3.28) +which implies that x ∈ T3 henceforth. Defining the thermal time +tthermal = L +� m +kBT0 +, +(3.29) +we can re-scale and non-dimensionalize +t �→ ttthermal, +τ �→ τtthermal, +(3.30) +which leads to the linearized, non-dimensional BGK equation +∂f +∂t = −v · ∇xf − 1 +τ f + 1 +τ (2π)−3/2e +−|v|2 +2 +��5 +2 − |v|2 +2 +� +M0 + M1 · v + 1 +6(|v|2−3)traceM2 +� +. +(3.31) +Equation (3.31) will be the starting point for further analysis. For later reference, we also +define the mean free path as +lmfp = τvthermal. +(3.32) +Let us remark that, by equation (3.21), the linearized macro-variables (nlin, ulin, Tlin) are +related to the moments (M0, M1, traceM2) via the matrix transform +� +� +nlin +ulin +Tlin +� +� = v3 +thermal +n0 +� +� +n0 +01×3 +0 +03×1 +vthermalI3×3 +03×1 +−T0 +01×3 +T0 +3 +� +� +� +� +M0 +M1 +traceM2 +� +� . +(3.33) + +EXACT HYDRODYNAMICS FROM LINEAR BGK +9 +4. Spectral Analysis of the linearized BGK operator +In this section, we will carry out a complete spectral analysis of the right-hand side of +(3.31). This will allow us to draw conclusions on the decay properties of hydrodynamic +variables, the existence of a critical wave number and the hydrodynamic closure. After +reformulating the problem in frequency space, we will use the resolvent calculus to formulate +a condition for the discrete spectrum (Subsection 4.1). Then, we will use properties of the +plasma dispersion function (see Appendix) to define a spectral function Γτ|k|, whose zeros +coincide with the discrete, isolated eigenvalues (Subsection 4.2). Then, in Subsection 4.3, +using Rouch´e’s Theorem, we prove the existence of a critical wave number kcrit such that +Γτ|k| has no zeros (i.e., there exists no eigenvalues) for |k|> kcrit. Finally, in Subsection +4.4, we take a closer look at the branches of eigenvalues (modes) and their corresponding +critical wave numbers. +4.1. The discrete spectrum of a finite-rank perturbation. To ease notation, we +define five distinguished vectors associated with the hydrodynamic moments as +e0(v) = (2π)− 3 +4 , +e1(v) = (2π)− 3 +4 v1, +e2(v) = (2π)− 3 +4 v2, +e3(v) = (2π)− 3 +4 v3, +e4(v) = (2π)− 3 +4 |v|2−3 +√ +6 +, +(4.1) +which satisfy the orthonormality condition, +⟨ei, ej⟩v = δij, +for +0 ≤ i, j ≤ 4, +(4.2) +where δij is the Kronecker’s delta. To ease notation, we denote the projection onto the +span of {ej}0≤j≤4 as +P5f = +4 +� +j=0 +⟨f, ej⟩vej, +(4.3) +for any f ∈ Hv. The linearized dynamics then takes the form +∂f +∂t = Lf, +(4.4) +for the linear operator +L = −v · ∇x − 1 +τ + 1 +τ P5. +(4.5) +Remark 4.1. Let us recall that any function f ∈ Hv admits a unique expansion as a +multi-dimensional Hermite series: +f(v) = +∞ +� +n=0 +fn : Hn(v), +(4.6) + +10 +FLORIAN KOGELBAUER AND ILYA KARLIN +where +Hn = (−1)ne +|v|2 +2 ∇ne +−|v|2 +2 +, +(4.7) +and fn is an n-tensor. Since the five basis vectors (4.1) appear in the expansion (4.6) via +an orthogonal splitting, we have that +⟨P5f, (1 − P5)f⟩v = 0, +(4.8) +for any f ∈ Hv. Hermite expansions were famously used by Grad in his seminal paper [22] +to establish finite-moment closures. +From +⟨Lf, f⟩x,v = ⟨−v · ∇xf − 1 +τ f + 1 +τ P5f, f⟩x,v += +� +T3 +� +R3(−v · ∇xf − 1 +τ f + 1 +τ P5f)fe− |v|2 +2 dxdv += +� +T3 +� +R3 −1 +τ [(1 − P5)f](P5f + (1 − P5)f)e− |v|2 +2 dxdv += −1 +τ ∥(1 − P5)f∥2 +x,v, +(4.9) +where we have assumed that f is sufficiently regular to justify the application of the diver- +gence theorem in x in order to remove the gradient term as well as (4.8), it follows that +the operator L is dissipative and that +ℜσ(L) ≤ 0. +(4.10) +On the other hand, from (4.9) and from ∥1 − P5∥op= 1, since 1 − P5 is a projection as well, +it follows that +⟨Lf, f⟩x,v ≥ −1 +τ ∥f∥2 +x,v. +(4.11) +This shows that any solution to (4.4) has to converge to zero, i.e., the global Maxwellian +is a stable equilibrium up to the conserved quantities from the center mode. On the other +hand, we infer that the overall convergence rate to equilibrium can be at most − 1 +τ , which +immediately implies that there cannot be any eigenvalues below the essential spectrum (see +also the next section). +Let us proceed with the spectral analysis by switching to frequency space. Since x ∈ T3, +we can decompose f in a Fourier series as +f(x, v) = +∞ +� +|k|=0 +ˆf(k, v)eix·k, +(4.12) +for the Fourier coefficients +ˆf(k, v) = +1 +(2π)3 +� +R3 f(x, v)e−ix·k dx. +(4.13) + +EXACT HYDRODYNAMICS FROM LINEAR BGK +11 +In frequency space, the operator (4.5) is conjugated to the linear operator +ˆLk = −iv · kf − 1 +τ f + 1 +τ P5f, +(4.14) +which implies that +σ(L) = +� +k∈Z3 +σ( ˆLk). +(4.15) +Defining +fj = ⟨ej, f⟩v, +(4.16) +we can define the following relations between the moments and the coefficients (4.16): +5 − |v|2 +2(2π) +3 +2 +M0 = 5 − |v|2 +2(2π) +3 +4 +f0 = f0e0 − +√ +6 +2 f0e4, +1 +(2π) +3 +2 +v · M1 = f1e1 + f2e2 + f3e3, +|v|2−3 +6(2π) +3 +2 +traceM2 = e4 +1 +√ +6(2π) +3 +4 +� +R +f|v|2 dv = e4 +1 +√ +6(2π) +3 +4 +�� +R +f(|v|2−3) dv + 3M0 +� += f2e4 + 3 +√ +6f0e4. +(4.17) +For compactness, we bundle these five basis polynomials into a single vector +e = (e0, e1, e2, e3, e4). +(4.18) +First, let us take a look at the spectrum of ˆL0. For k = 0, we see that ˆL collapses to a +diagonal operator with five dimensional kernel spanned by {ej}0≤j≤4: +ˆL0ej = −1 +τ (ej − P5ej) = 0, +0 ≤ j ≤ 4. +(4.19) +On the other hand, the operator ˆL0 acts just like − 1 +τ on the orthogonal complement of +span{ej}0≤j≤4. This shows that +σ( ˆL0) = +� +−1 +τ , 0 +� +, +(4.20) +where the eigenspace associated to zero has dimension five, while the eigenspace associated +to − 1 +τ has co-dimension five. +Now, let us analyse ˆLk for k ̸= 0. To ease notation in the following argument, we define +the operator +Skf = v · kf, +(4.21) + +12 +FLORIAN KOGELBAUER AND ILYA KARLIN +for any k ̸= 0, which gives +σ( ˆLk) = −1 +τ − σ +� +iSk − 1 +τ P5 +� += −1 +τ − 1 +τ σ (iτSk − P5) . +(4.22) +Because the resolvent of Sk is just given by multiplication with (v · k − z)−1, we see +immediately that σ(Sk) = R, see also [38]. We define the Green’s function matrices as +GT (z, n, m) = ⟨en, (iτSk − P5 − z)−1em⟩v, +GS(z, n, m) = ⟨en, (iτSk − z)−1em⟩v, +(4.23) +for 0 ≤ n, m ≤ 4 and set GS(z) = {GS(z, n, m)}0≤n≤4, GT (z) = {GT (z, n, m)}0≤n≤4. +By the second resolvent identity, +R(z; A) − R(z; B) = R(z; A)(B − A)R(z; B), +(4.24) +for any operators A, B and z ∈ ρ(A) ∩ ρ(B), we have for A = iτSk and B = iτSk − P5 that +(iτSk − P5 − z)−1 = (iτSk − z)−1 + (iτSk − z)−1P5(iτSk − P5 − z)−1. +(4.25) +Applying equation (4.25) to em for 0 ≤ m ≤ 4 and rearranging gives +(iτSk − P5 − z)−1em = (iτSk − z)−1em + (iτSk − z)−1P5(iτSk − P5 − z)−1em += (iτSk − z)−1em + (iτSk − z)−1 +4 +� +j=0 +⟨(iτSk − P5 − z)−1em, ej⟩vej += (iτSk − z)−1em + +4 +� +j=0 +G∗ +T (z, j, m)(iτSk − z)−1ej, +(4.26) +for z ∈ C \ iR. Thus, the resolvent of iτSk − P5 − z includes the resolvent of iτSk as well +as information from the matrix {GT (z, n, m)}0≤n,m≤4 as coefficients. +Taking an inner product of (4.26) with en gives +GT (z, n, m) = GS(z, n, m) + +4 +� +j=0 +GT (z, j, m)⟨en, (iτSk − z)−1ej⟩v += GS(z, n, m) + +4 +� +j=0 +GT (z, j, m)GS(z, n, j) +(4.27) +for 0 ≤ n, m ≤ 4 and z ∈ C \ iR, where in the last step, we have used the symmetry of the +Green’s function matrix. System (4.27) defines twenty-five equations for the coefficients +GT (z, n, m), which can be re-written more compactly as +GT = GS + GSGT , +(4.28) + +EXACT HYDRODYNAMICS FROM LINEAR BGK +13 +or, equivalently, +(Id − GS)GT = GS. +(4.29) +Equation (4.29) can be interpreted as a special case of Krein’s resolvent identity [30]. This +shows that we can solve for the entries of GT unless det(Id − GS) = 0, or, to phrase it +differently, we have that for each wave number k, the discrete spectrum of (iτSk) − P5 can +be used to infer that +σdisc( ˆLk) = −1 +τ − 1 +τ +� +� +�z ∈ C : det +� +� +� +R3 e(v) ⊗ e(v) +e− |v|2 +2 +iτk · v − z dv − Id +� +� = 0 +� +� +� . +(4.30) +An eigenvalue λ of the operator ˆLk is related to the zero z in (4.30) via +z = −τλ − 1. +(4.31) +In particular, the finite-rank perturbation P5 can only add discrete eigenvalues to the spec- +trum and we have that σess(iτSk − P5) = σess(iτSk) = iR. +4.2. Reformulation in terms of the spectral function. We proceed with the spectral +analysis of (4.5) by rewriting the determinant expression in (4.30). To this end, we note +that any wave vector k can be written as +k = Qk(|k|, 0, 0)T , +(4.32) +for some rotation matrix Qk. Defining w = QT +kv, we have that +k · v = Qk(|k|, 0, 0)T · v = (|k|, 0, 0) · w = |k|w1, +(4.33) +while the vector of basis functions e transforms according to +e(v) = (2π)− 3 +4 +� +1, v, |v|2−3 +√ +6 +� += (2π)− 3 +4 +� +1, Qkw, |w|2−3 +√ +6 +� += +� +� +1 +0 +0 +0 +Qk +0 +0 +0 +1 +� +� e(w). +(4.34) +This, together with dv = dw from the orthogonality of Qk, implies that +det +� +� +� +R3 e(v) ⊗ e(v) +e− |v|2 +2 +iτk · v − z dv − Id +� +� += det +� +� +� +R3 +� +� +1 +0 +0 +0 +Qk +0 +0 +0 +1 +� +� e(w) ⊗ +� +� +� +� +1 +0 +0 +0 +Qk +0 +0 +0 +1 +� +� e(w) +� +� +e− |w|2 +2 +iτ|k|w1 − z dw − Id +� +� += det +� +� +� +R3 e(w) ⊗ e(w) +e− |w|2 +2 +iτ|k|w1 − z dw − Id +� +� , +(4.35) + +14 +FLORIAN KOGELBAUER AND ILYA KARLIN +where we have used the orthogonality of Qk. +We proceed: +det +� +� +� +R3 e(w) ⊗ e(w) +e− |w|2 +2 +iτ|k|w1 − z dw − Id +� +� = += det +� +��������� +(2π)− 3 +2 +� +R3 +� +� +� +� +� +� +� +� +� +� +1 +w1 +w2 +w3 +|w|2−3 +√ +6 +w1 +w2 +1 +w1w2 +w1w3 +w1 +|w|2−3 +√ +6 +w2 +w1w2 +w2 +2 +w2w3 +w2 +|w|2−3 +√ +6 +w3 +w1w3 +w3w2 +w2 +3 +w3 +|w|2−3 +√ +6 +|w|2−3 +√ +6 +w1 +|w|2−3 +√ +6 +w2 +|w|2−3 +√ +6 +w3 +|w|2−3 +√ +6 +(|w|2−3)2 +6 +� +� +� +� +� +� +� +� +� +� +e− |w|2 +2 +iτ|k|w1 − z dw − Id +� +��������� +, +(4.36) +Integrating out the variables w2 and w3 with the help of (2.5), it follows that +det +� +� +� +R3 e(w) ⊗ e(w) +e− |w|2 +2 +iτ|k|w1 − z dw − Id +� +� = += det +� +������� +(2π)− 3 +2 +� +R +� +� +� +� +� +� +� +� +2π +2πw1 +0 +0 +2π w2 +1−1 +√ +6 +2πw1 +2πw2 +1 +0 +0 +2πw1 +w2 +1−1 +√ +6 +0 +0 +2π +0 +0 +0 +0 +0 +2π +0 +2π w2 +1−1 +√ +6 +2πw1 +w2 +1−1 +√ +6 +0 +0 +2π w4 +1−2w2 +1+5 +6 +� +� +� +� +� +� +� +� +e− +w2 +1 +2 +iτ|k|w1 − z dw1 − Id +� +������� += det +� +��� +1 +√ +2π +� +R +� +� +� +� +1 +w +w2−1 +√ +6 +w +w2 +w w2−1 +√ +6 +w2−1 +√ +6 +w w2−1 +√ +6 +w4−2w2+5 +6 +� +� +� +� +e− w2 +2 +iτ|k|w − z dw − Id +� +��� +� +� +1 +√ +2π +� +R +e− w2 +2 +iτ|k|w − z − 1 +� +� +2 +, +(4.37) + +EXACT HYDRODYNAMICS FROM LINEAR BGK +15 +where we have used the linearity of the integral and properties of the determinant of block +matrices. Also, we have used that +� +R2(w2 +1 + w2 +2 + w2 +3 − 3)2e− +w2 +2 +2 − +w2 +3 +2 dw2dw3 += +� +R2(w4 +1 + w4 +2 + w4 +3 + 9 − 6w2 +1 − 6w2 +2 − 6w2 +3 + 2w2 +1w2 +2 + 2w2 +2w2 +3 + 2w2 +1w2 +3)e− +w2 +2 +2 − +w2 +3 +2 dw2dw3 += 2π +� +w4 +1 + 3 + 3 + 9 − 6w2 +1 − 6 − 6 + 2w2 +1 + 2 + 2w2 +1 +� += 2π +� +w4 +1 − 2w2 +1 + 5 +� +. +(4.38) +For the following calculation, let us define the function +Z(z) = +1 +√ +2π +� +R +e− v2 +2 +v − z dv, +(4.39) +for z ∈ C \ R. From (4.9), it suffices to consider Z for ℑz > 0. The symmetry property +Z(z∗) = Z∗(z), however, allows us to extend the function to the whole complex plane (with +a discontinuity at the real line) once an expression for a half-plane is known. +Remark 4.2. Integral expressions of the form (4.39) appear frequently in thermodynamics +and plasma physics [17], where the function (4.39) is called plasma dispersion function +[13] accordingly. Some properties of Z - including a more explicit expression in terms of +complex error functions - are collected in the Appendix. +Using the recurrence relation (A.9), we calculate the first few derivatives of Z in terms +of polynomials and Z itself: +dZ +dz = −1 − zZ, +d2Z +dz2 = z + (z2 − 1)Z, +d3Z +dz3 = 2 − z2 + (3z − z2)Z, +d4Z +dz4 = −5z + z3 + (z4 − 6z2 + 3)Z. +(4.40) +Using the identity +1 +√ +2π +� +R +Hk(v) e− v2 +2 +v − z dv = +1 +√ +2π +� +R +�� +− d +dv +�k +e− v2 +2 +� +dv +v − z = (−1)kk! +√ +2π +� +R +e− v2 +2 +dv +(v − z)k+1 += (−1)k +√ +2π +dk +dzk +� +R +e− v2 +2 +dv +v − z = (−1)k dkZ +dzk , +(4.41) + +16 +FLORIAN KOGELBAUER AND ILYA KARLIN +together with (4.40) allows us to further simplify the determinant expression in (4.36). +Indeed, expanding the polynomial matrix in (4.36) in Hermite basis and using (4.41), we +deduce that +1 +√ +2π +� +R +� +� +� +� +1 +w +w2−1 +√ +6 +w +w2 +w w2−1 +√ +6 +w2−1 +√ +6 +w w2−1 +√ +6 +w4−2w2+5 +6 +� +� +� +� +e− w2 +2 +w − ζ dw += +1 +√ +2π +� +R +� +� +� +� +H0(w) +H1(w) +H2(w) +√ +6 +H1(w) +H2(w) + H0(w) +H3(w)+2H1(w) +√ +6 +H2(w) +√ +6 +H3(w)+2H1(w) +√ +6 +H4(w)+4H2(w)+6 +6 +� +� +� +� +e− w2 +2 +w − ζ dw += +� +� +� +� +Z +−Z′ +Z′′ +√ +6 +−Z′ +Z′′ + Z +− Z′′′+2Z′ +√ +6 +Z′′ +√ +6 +− Z′′′+Z′ +√ +6 +Z(4)+4Z′′+6H0 +6 +� +� +� +� += +� +� +� +� +Z +1 + ζZ +ζ+(ζ2−1)Z +√ +6 +1 + ζZ +ζ + ζ2Z +ζ2+(ζ3−ζ)Z +√ +6 +ζ+(ζ2−1)Z +√ +6 +ζ2+(ζ3−ζ)Z +√ +6 +ζ3−ζ+(ζ4−2ζ2+5)Z +6 +� +� +� +� . +(4.42) +To ease notation, we define the function +Γτ|k|(ζ) := det +� +� +� +� +Z(ζ) − iτ|k| +1 + ζZ(ζ) +ζ+(ζ2−1)Z(ζ) +√ +6 +1 + ζZ(ζ) +ζ + ζ2Z(ζ) − iτ|k| +ζ2+(ζ3−ζ)Z(ζ) +√ +6 +ζ+(ζ2−1)Z(ζ) +√ +6 +ζ2+(ζ3−ζ)Z(ζ) +√ +6 +ζ3−ζ+(ζ4−2ζ2+5)Z(ζ) +6 +− iτ|k| +� +� +� +� += 1 +6 +� +ζ + 6i|k|3τ 3 − ζ(ζ2 + 5)|k|2τ 2 + 2i(ζ2 + 3)|k|τ ++Z(ζ)(ζ2 − (ζ4 + 4ζ2 + 11)|k|2τ 2 + 2iζ3|k|τ − 5) − 4iZ2(ζ)((ζ2 + 1)|k|τ − iζ) +� +, +(4.43) +which allows us to conclude that +det +� +� +� +R3 e(w) ⊗ e(w) +e− |v|2 +2 +iτk · v − z dv − Id +� +� = +1 +(i|k|τ)5 (Z(ζ) − iτ|k|)2Γτ|k|(ζ) +���� +ζ= +z +i|k|τ +, +(4.44) +by the scaling properties of the determinant function. Consequently, from (4.30) and (4.31) +we deduce that +σdisc( ˆLk) = +� +λ ∈ C : Γτ|k| +�−τλ − 1 +i|k|τ +� += 0 +� +∪ +� +λ ∈ C : Z +�−τλ − 1 +i|k|τ +� += iτ|k| +� +. (4.45) +Typical spectra (4.45) for different wave numbers are shown in Figures 4.1 - 4.3. +The +explicit transcendental equation (4.45) determining the discrete spectrum is the first main + +EXACT HYDRODYNAMICS FROM LINEAR BGK +17 +-π +-π/2 +0 +π/2 +π +0.1 +1. +10. +100. +(a) |k|= 1 +-π +-π/2 +0 +π/2 +π +0.1 +1. +10. +100. +(b) |k|= +√ +2 +Figure 4.1. Argument plot of the spectral function (4.44) for τ = 0.5 and +different values of |k|. The zeros of the function (4.44) in the complex plane +define eigenvalues of the linearized BGK operator. These are points, were +a small, counter-clockwise loop runs through the whole rainbow according +to multiplicity. +result of our paper. It will allow us to draw further conclusions about the discrete (hydro- +dynamic) spectrum. +4.3. Existence of a Critical Wave Number and Finiteness of the Hydrodynamic +Spectrum. Next, let us prove that there exists a critical wave number kcrit, such that +σdisc( ˆLk) = ∅, +for |k|> kcrit. +(4.46) +Proof. First, let us recall that any discrete eigenvalue λ of ˆLk (and hence of L) satisfies +− 1 +τ < ℜλ ≤ 0, +(4.47) +by (4.9), which we will assume henceforth (of course, it would in fact follow from a slightly +more detailed analysis of the following). Since λ and ζ are related by +λ = −i|k|τζ + 1 +τ +, +(4.48) +this implies that ℜλ = |k|ℑζ − 1 +τ and consequently +0 < ℑζ ≤ +1 +τ|k|. +(4.49) + +18 +FLORIAN KOGELBAUER AND ILYA KARLIN +-π +-π/2 +0 +π/2 +π +0.1 +1. +10. +100. +(a) |k|= +√ +3 +-π +-π/2 +0 +π/2 +π +0.1 +1. +10. +100. +(b) |k|= +√ +6 +Figure 4.2. Argument plot of the spectral function (4.44) for τ = 0.5 and +different values of |k|. The zeros of the function (4.44) in the complex plane +define eigenvalues of the linearized BGK operator. These are points, were +a small, counter-clockwise loop runs through the whole rainbow according +to multiplicity. As we approach the critical wave number, the zeros move +closer and closer to the essential spectrum (ℜλ = − 1 +τ ) +. +Our strategy is to apply Rouch´e’s theorem to the function Γτ|k| by splitting it into a +dominant part plus an (asymptotically) small part. +To this end, we can focus on the +family of rectangles Ra = {−a, a, a + i 1 +τ|k|, −a + i 1 +τ|k|} for a > 0. First, let us consider the +asymptotics of Γτ|k| in ζ for fixed τ|k|. +Since we are focused on the upper half-plane, we can consider Z+ defined in (A.10) as an +analytic continuation together with its limit on the real line. In particular, we see from + +EXACT HYDRODYNAMICS FROM LINEAR BGK +19 +-π +-π/2 +0 +π/2 +π +0.1 +1. +10. +100. +(a) |k|= +√ +8 +-π +-π/2 +0 +π/2 +π +0.1 +1. +10. +100. +(b) |k|= 3 +Figure 4.3. Argument plot of the spectral function (4.44) for τ = 0.5 and +different values of |k|. The zeros of the function (4.44) in the complex plane +define eigenvalues of the linearized BGK operator. These are points, were +a small, counter-clockwise loop runs through the whole rainbow according +to multiplicity. Since the wave number is above kcrit, there exist, indeed, +no zeros. +the asymptotics (A.15) that +Γτ|k|(z) ∼ 1 +6 +� +ζ + 6i|k|3τ 3 − ζ(ζ2 + 5)|k|2τ 2 + 2i(ζ2 + 3)|k|τ ++Z(ζ)(ζ2 − (ζ4 + 4ζ2 + 11)|k|2τ 2 + 2iζ3|k|τ − 5) − 4iZ2(ζ)((ζ2 + 1)|k|τ − iζ) +� +∼ 1 +6 +� +ζ + 6i|k|3τ 3 − ζ(ζ2 + 5)|k|2τ 2 + 2i(ζ2 + 3)|k|τ +− +∞ +� +n=0 +(2n − 1)! ! +ζ2n+1 +(ζ2 − (ζ4 + 4ζ2 + 11)|k|2τ 2 + 2iζ3|k|τ − 5) +−4i +� +− +∞ +� +n=0 +(2n − 1)! ! +ζ2n+1 +�2 +((ζ2 + 1)|k|τ − iζ) +� +� , +(4.50) + +20 +FLORIAN KOGELBAUER AND ILYA KARLIN +which, after rearranging and regrouping higher-order terms in ζ−1, gives +Γτ|k|(z) ∼ 1 +6 +� +ζ + 6i|k|3τ 3 − ζ(ζ2 + 5)|k|2τ 2 + 2i(ζ2 + 3)|k|τ +− (ζ−1 + ζ−3)(ζ2 − (ζ4 + 4ζ2 + 11)|k|2τ 2 + 2iζ3|k|τ − 5) + O(|ζ|−1) +−4iζ−2((ζ2 + 1)|k|τ − iζ) +� ++ O(|ζ|−2) +∼ 1 +6 +� +ζ + 6i|k|3τ 3 − |k|2τ 2ζ3 − 5|k|2τ 2ζ + 2i|k|τζ2 + 6i|k|τ +− ζ + |k|2τ 2ζ3 + 4|k|2τ 2ζ + 11|k|2τ 2ζ−1 − 2i|k|τζ2 − 5ζ−1 +− ζ−1 + |k|2τ 2ζ + 4|k|2τ 2ζ−1 + 11|k|2τ 2ζ−2 − 2i|k|τ − 5ζ−3 +−4i|k|τ − 4i|k|τζ−2 − 4ζ−1 + O(|ζ|−1) +� +∼ i(|k|τ)3 + O(|ζ|−1), +(4.51) +for |arg(ζ)|≤ π +2 − δ, +ζ → ∞, for any real number 0 < δ ≤ π +2 . +Remark 4.3. It is a quite remarkable property of the spectral function Γτ|k| that all the +polynomial terms (up to order four) cancel exactly with the negative-power terms in the +asymptotic expansion (A.15) to give a constant asymptotic value in the limit. This is due +to a subtle fine-tuning of the numerical coefficients of the polynomials. This property also +guarantees the existence of a critical wave number (and hence implies that there are only +finitely many discrete eigenvalues above the essential spectrum). At the outset, it is by no +means clear that the spectrum should exhibit this cancellation property. Indeed, numerical +investigations actually leave this question unanswered [27]. +Let us start with estimating Γτ|k| − i(|k|τ)3 on the real line. Because x �→ |Γτ|k|(x) − +i(|k|τ)3| is an even function for x ∈ R, we can focus on x > 0. Since Γτ|k|(x) → i(|k|τ)3 +as x → ∞, we know that x �→ |Γτ|k|(x) − i(|k|τ)3| is bounded on the real line. Since +Γτ|k|(x) − i(|k|τ)3 only contains powers of |k| up to order two, we know that there exists +a k1 > 0 such that +|Γτ|k|(x) − i(|k|τ)3|< (|k|τ)3, +(4.52) +for all x ∈ R and all |k|> k1. +By the same token, we conclude that x �→ |Γτ|k|(x + +i +|k|τ ) − i(|k|τ)3|, is bounded for x ∈ R +since (4.50) holds in cone containing the real axis. Therefore, since again Γτ|k|(x + +i +|k|τ ) − +i(|k|τ)3 is bounded for x ∈ R, there exists a k2 > 0 such that +����Γτ|k| +� +x + +i +|k|τ +� +− i(|k|τ)3 +���� < (|k|τ)3, +(4.53) +for all x ∈ R and all |k|> k2. +Clearly, an estimate of the form (4.53) for all x ∈ R, +0 ≤ y ≤ +1 +τ|k| and |k|> k3 holds true by compactness and the decay properties of Γτ|k|. +This shows that, for |k| large enough, we can bound the function Γτ|k| − i(|k|τ)3 on the +rectangle Ra for any a > 0 by the modulus of i(|k|τ)3, which has no zeros in the strip at +all (in particular, not in the strip 0 ≤ ℑζ ≤ +1 +τ|k|). For |k| large enough, Rouch´e’s theorem + +EXACT HYDRODYNAMICS FROM LINEAR BGK +21 +(a) On the real line +(b) For ℑζ = +1 +τ|k| +Figure 4.4. The function ζ �→ |Γτ|k|(ζ) − i(|k|τ)3| on the real line and on +the line ℑζ = +1 +τ|k| for τ = 0.5 and |k|= 1 (solid lines) compared to (|k|τ)3 +(dashed lines). +(a) On the real line +(b) For ℑζ = +1 +τ|k| +Figure 4.5. The function ζ �→ |Γτ|k|(ζ) − i(|k|τ)3| on the real line and on +the line ℑζ = +1 +τ|k| for|k|= 4 (solid lines) compared to (|k|τ)3 (dashed lines). +then implies that Γτ|k| cannot have any zeros for 0 ≤ ℑζ ≤ +1 +τ|k| either. +This proves the claim. +□ +Now, let us prove that +Γ2(λ) := +1 +(i|k|τ)3 Γτ|k| +�−τλ − 1 +i|k|τ +� +(4.54) +has exactly three zeros (one real, two complex conjugate, which we will prove later) for |k| +small enough. + +TiK +6 +5 +4 F +3 E +2 +4 +6 +8 +10[riki(x)-ik33 +0.6 +0.5 +0.4 +0.3 +0.2 +0.1 +2 +4 +6 +8 +100.14 +0.12 +0.10 +0.08 +2 +6 +8 +10[riki(x)-ik33 +5 +46 +3 E +2 +4 +6 +8 +1022 +FLORIAN KOGELBAUER AND ILYA KARLIN +Proof. To this end, we again use the asymptotic expansion (A.15) up to order three for the +limit |k|→ 0 together with expansion similar to those derived in (4.50) and (4.51): +Γ2(λ) ∼ +1 +6(i|k|τ)3 +� +ζ + 6i|k|3τ 3 − ζ(ζ2 + 5)|k|2τ 2 + 2i(ζ2 + 3)|k|τ ++ Z(ζ)(ζ2 − (ζ4 + 4ζ2 + 11)|k|2τ 2 + 2iζ3|k|τ − 5) +−4iZ2(ζ)((ζ2 + 1)|k|τ − iζ) +� ��� +ζ= −τλ−1 +i|k|τ +∼ +1 +6(i|k|τ)3 +� +ζ + 6i|k|3τ 3 − ζ(ζ2 + 5)|k|2τ 2 + 2i(ζ2 + 3)|k|τ ++ (−ζ−1 − ζ−3 − 3ζ−5 + O(|ζ|−7))(ζ2 − (ζ4 + 4ζ2 + 11)|k|2τ 2 + 2iζ3|k|τ − 5) +−4i(−ζ−1 − ζ−3 − 3ζ−5 + O(|ζ|−7))2((ζ2 + 1)|k|τ − iζ) +� ��� +ζ= −τλ−1 +i|k|τ +, +(4.55) +which, after plugging in the transformation (4.48), gives +Γ2(λ) ∼ +1 +6(i|k|τ)3 +� +O(|ζ|−3)(|k|τ)2 + 6i(|k|τ)3 + (|k|τ)2 � +18ζ−1 + 23ζ−3 + 33ζ−5� +−2i|k|τ +� +9ζ−2 + 18ζ−4 + 26ζ−6 + 30ζ−8 + 18ζ−10� +− +� +6ζ−3 + 13ζ−5 + 24ζ−7 + 36 +�� ��� +ζ= −τλ−1 +i|k|τ +∼ +1 +6(i|k|τ)3 +� +6i(|k|τ)3 + 18i(|k|τ)3(−τλ − 1)−1 − 18i(|k|τ|)(i|k|τ)2(−τλ − 1)−2 +−6(i|k|τ)3(−τλ − 1)−3 + O(|k|4) +� +∼ − +λ3 +(λτ + 1)3 + O(|k|), +(4.56) +i.e., in the limit |k|→ 0, the spectral function (4.43) has a triple zero at λ = 0. The cubic +scaling in |k| in front of the above expression cancels exactly with the terms inside the +bracket, leaving only the term λ3 in the limit |k|→ 0. This is consistent with the spectrum +of ˆL0 containing zero as an isolated eigenvalue, see (4.20). By continuity of the spectrum, +this implies that the there will emanate exactly three discrete eigenvalues as zeros of the +spectral function Γτ|k|. +□ +4.4. Hydrodynamic Modes and their Corresponding Critical Wave Numbers. +Now, let us take a closer look at the eigenvalues. From (4.43), it follows immediately that +there exists a sequence of real eigenvalue of algebraic multiplicity two which we call shear +mode and denote as |k|�→ λshear(|k|). +A closer look at (4.43) reveals that the function Γτ|k| maps imaginary numbers to imaginary +numbers (since also Z|iR⊆ iR by (A.6)). As a consequence, Γ2(λ) maps real numbers to +real numbers. This shows that, together with the above considerations, that, for each wave +number small enough, there exists exactly one real zero and two complex conjugated zeros. + +EXACT HYDRODYNAMICS FROM LINEAR BGK +23 +Consequently, apart from the shear mode, there exists a sequence of pairs of complex +conjugated eigenvalues which we call acoustic modes and denote as |k|�→ λac(|k|) and +|k|�→ λ∗ +ac(|k|). Figure 4.6 shows the distribution of acoustic modes for a given relaxation +time and varying wave number. +Furthermore, there exists another simple, real eigenvalue called diffusion mode which we +denote as |k|�→ λdiff(|k|). Each mode has its own critical wave number. In conclusion, the +spectrum is given by +σ( ˆLk) = +� +−1 +τ + iR +� +∪ {λshear(|k|), λdiff(|k|), λac(|k|), λ∗ +ac(|k|)}, +(4.57) +for |k| smaller than the respective critical wave number. +Remark 4.4. We note that the eigenvalues (and hence the spectrum) depends on wave +number only through τ|k|. This implies that, while the eigenvectors depend on the full +wave vector k, the form of the spectrum only depends on the dimensionless parameter τ|k| +and the existence of the hydrodynamic manifold (as a linear combination of eigenvectors) +is independent of the relaxation time. If the relaxation time decreases, the critical wave +number of each mode is increased, thus allowing for more eigenvalues in each family of +modes. Consequently, decreasing the relaxation time increases the (finite) dimension of +the hydrodynamic manifold. +In the limit τ → 0, the eigenvalues accumulate at the essential spectrum and we cannot +separate a hydrodynamic manifold any longer, since the corresponding spectral projection +does not exist (no closed contour can be defined that encircles the set of discrete eigenvalues, +while not intersecting the essential spectrum). +To finish the spectral analysis, let us derive some information about the critical wave +number of the four hydrodynamic modes. Since |Z|≤ � π +2 with equality exactly at zero +(continuously extended from both sides), we immediately conclude that +kcrit(λshear) = +�π +2 +1 +τ ≈ 1.253311 +τ . +(4.58) +from equation (4.45). This is consistent with the result obtained in [29] (equation (2.53) +in [29]). +Since the diffusion mode is real, and wanders from zero to − 1 +τ as |k| increases, we can +recover the critical wave number by taking the limit λ → − 1 +τ (on the branch Z+) in (4.43). +Since limζ→0,ℑζ>0 Z(ζ) = i�π +2 , see (A.14), we obtain the critical wave number kcrit(λdiff) +as a zero of the cubic polynomial +6(kτ)3 − 11 +�π +2 (kτ)2 + (6 + 2π) kτ − 5 +�π +2 = 0. +(4.59) +The only real solution is approximately given by +kcrit(λdiff) ≈ 1.356031 +τ . +(4.60) + +24 +FLORIAN KOGELBAUER AND ILYA KARLIN +Figure 4.6. The acoustic modes for τ = 0.001 and wave numbers up to +the critical wave number together withe the vertical line ℜλ = − 1 +τ +Now, let us turn to the acoustic mode. We know that at the critical wave number, the two +complex conjugated acoustic modes will merge into the essential spectrum. This happens +when ℜλ = − 1 +τ . So, let us assume that λ = − 1 +τ − i|k|x, which amount to setting ζ = x in +(4.43). We obtain two equations (real and imaginary part of Γτ|k|(x)): +1 +12e−x2 � +erfi +� x +√ +2 +� �√ +2π(τ|k|)2e +x2 +2 � +x4 + 4x2 + 11 +� +− 8πτ|k| +� +x2 + 1 +� +− +√ +2πe +x2 +2 � +x2 − 5 +�� +−4πxerfi +� x +√ +2 +�2 +− 2x +� +ex2 � +(τ|k|)2 � +x2 + 5 +� +− 1 +� ++ +√ +2πτ|k|e +x2 +2 x2 − 2π +�� += 0, +1 +12e−x2 +� +−4πτ|k| +� +x2 + 1 +� +erfi +� x +√ +2 +�2 ++ erfi +� x +√ +2 +� � +8πx − 2 +√ +2πτ|k|e +x2 +2 x3 +� ++ 4τ|k|ex2 � +3τ|k|2+x2 + 3 +� ++ +√ +2πe +x2 +2 � +− +� +(τ|k|)2 � +x4 + 4x2 + 11 +�� ++ x2 − 5 +� ++ 4πτ|k| +� +x2 + 1 +�� += 0, +(4.61) +for x ∈ R. The zero sets of equations (4.61) are shown in Figure 4.7. +Solving system (4.61) numerically gives the following approximation for the critical wave +number of the acoustic mode: +kcrit(λac) = kcrit(λ∗ +ac) ≈ 1.311761 +τ . +(4.62) +Remark 4.5. The critical wave numbers obtained before depend inversely on the (non- +dimensional) relaxation parameter. Transforming back to physical units, we see that the + +Im(>) +1500 +1000 +500 +Re(^) +-1000 +800 +600 +400 +-200 +500 +-1000 +-1500EXACT HYDRODYNAMICS FROM LINEAR BGK +25 +Figure 4.7. The zero sets of equations (4.61). +The intersection of the +solid line (ℜΓτ|k|(x)) with the dashed line (ℑΓτ|k|(x)) gives the critical wave +numbers for the acoustic modes (and the diffusion mode on the real line as +well). +critical wave number is numerically proportional to the inverse mean-free path (3.32). +Indeed, we obtain that +kcrit ∼ +� +kBT0 +m +1 +τphys +∼ +1 +lmfp +. +(4.63) +5. Linear Hydrodynamic Manifolds +In this section, we give a description of the hydrodynamic manifolds together with +their respective dynamics. We define the hydrodynamic manifold through the following +properties: +(1) It contains an appropriately scaled, spatially independent stationary distribution +(e.g. global Maxwellian) as a base solution +(2) The projection onto the hydrodynamic moments along the manifold provide a clo- +sure of the hydrodynamic moments (mass-density, velocity and temperature) +(3) It attracts all trajectories in the space of probability-density functions (which are +close enough to the base solution) exponentially fast, thus acting as a slow manifold +(4) It is unique. +From the explicit analysis in the previous section, we know that the addition of P5 only +adds finitely many discrete eigenvalues to the spectrum. Let us take a closer look at their + +.5 +0 +0.5 +0026 +FLORIAN KOGELBAUER AND ILYA KARLIN +associated eigenvectors. +For each wave number k ∈ Z3, the eigenvectors associated to +λN(τ|k|), N ∈ Modes, where Modes = {diff, shear, ac, ac∗}, satisfy the equation +− iv · k ˆfeig +N,j − 1 +τ +ˆfeig +N,j + 1 +τ P5 ˆfeig +N,j = λN(τ|k|) ˆfeig +N,j, +(5.1) +where 1 ≤ j ≤ µN(|k|) denotes the geometric multiplicity of λN(τ|k|). Defining +αN,j,l(k) := ⟨ ˆfeig +N,j,k(v, k), el(v)⟩v, +(5.2) +we can rewrite (5.1) as +ˆfeig +N,j(v, k) = +1 +iτk · v + 1 + τλN(τ|k|) +4 +� +l=0 +αN,j,l(k)el(v). +(5.3) +To omit cluttering in the notation, we will suppressed the dependence of αN,j,l on k. Taking +an inner product with ep(v) in (5.3) gives +αN,j,p = +4 +� +l=0 +αN,j,l +� +R3 el(v)ep(v) +e− |v|2 +2 +iτk · v + 1 + τλN(τ|k|) dv, +(5.4) +which is equivalent to the non-invertibilty of the matrix (Id − GS) in equation (4.29) and +(4.30) for z = −1 − τλN(τ|k|). Indeed, denoting αN,j = (αN,j,0, .., αN,j,4), it follows that +αN,j ∈ ker((Id − GS))|z=−1−τλN(τ|k|). +(5.5) +This defines the eigenvector (5.3) for each wave number k and each mode N completely. +To obtain the closure relation for the linearized hydrodynamic variables (nlin, ulin, Tlin), +we define a solution to the linearized dynamics (4.4) as +fhydro(x, v, t) = +� +|k|≤kcrit +� +N∈Modes +µN(k) +� +j=1 +ˆfeig +N,j(v, k)eλN(τ|k|)t+ik·x, +(5.6) +where we set +ˆfeig +N,j,k = 0, +if |k|> kcrit(λN), +(5.7) +and µN(k). +Following (3.33), let +Θ := (2π) +3 +4 v3 +thermal +n0 +� +� +n0 +01×3 +0 +03×1 +vthermalI3×3 +03×1 +−T0 +01×3 +T0 +3 +� +� , +(5.8) +denote the matrix that realized the linear coordinate change +(nlin, ulin, Tlin)T = Θe. +(5.9) + +EXACT HYDRODYNAMICS FROM LINEAR BGK +27 +On the hydrodynamic manifold defined by (5.7), the variables (nlin, ulin, Tlin) evolve ac- +cording to an explicit (non-local) system. Indeed, in frequency space, denoting the Fourier +coefficients of (nlin, ulin, Tlin) as (ˆnlin, ˆulin, ˆTlin) we find that +(ˆnlin, ˆulin, ˆTlin) = Θdiag(⟨ ˆfhydro, e⟩)e, +(5.10) +which, setting αN = �µN +j=1 αN,j and defining +α := [αshear, αdiff, αac, αac∗], +(5.11) +as well as Λ = diag(λshear, λdiff, λac, λac∗), λ = etΛ, can be written more explicitly as +(ˆnlin, ˆulin, ˆTlin) = Θdiag(αλ)e. +(5.12) +We can invert for e, +e = (Θdiag(αλ))−1(ˆnlin, ˆulin, ˆTlin), +(5.13) +and, finally, taking a time derivative, we arrive at +∂ +∂t(ˆnlin, ˆulin, ˆTlin) = Θdiag(αΛλ)(Θdiag(αλ))−1(ˆnlin, ˆulin, ˆTlin). +(5.14) +This defines a (non-local) closure to the linearized dynamics (3.1). +Since - up to the +conserved quantities (3.15) - any solution approaches the slow dynamics given by (5.7) +exponentially fast in time, the closure (5.14) defines the unique, global, hydrodynamic +limit of (3.1). +6. Conclusion and Further Perspectives +We have given a complete and (up to the solution of a transcendental equation) explicit +description of the spectrum of the three-dimensional BGK equation linearized around a +global Maxwellian. Further, we identified (and therefore confirmed) the existence of three +families of modes (shear, diffusion and acoustic) and we gave a description of critical wave +numbers. The analysis allowed us to infer that the discrete spectrum consists of a finite +number of eigenvalues, thus implying that the dispersion relation remains bounded also for +the acoustic modes. +Let us give an outlook on some future lines of research in this context. We expect that +the results obtained in this paper are explicit enough to carry out a comparison of viscous +dissipation versus capillarity as carried out in [37] for the three-dimensional Grad system. +Furthermore, the explicit knowledge of the spectral function (4.43) allows us to infer more +refined approximations to the exact non-local hydrodynamics. This will involve expansions +not in terms of relaxation time or wave number, but much rather in terms of the variable +ζ in (4.43). This could also improve present numerical methods [27]. +Finally, the spectral properties of the linear three-dimensional BGK equation will also serve +as the basis for nonlinear analysis in terms of invariant manifolds. Indeed, the fact that the +discrete spectrum is well separated from the essential spectrum allows us to define a spectral +projection for the whole set of eigenvalues, thus giving the first-order approximation (in +terms of nonlinear deformations) to the hydrodynamic manifolds. In particular, we expect + +28 +FLORIAN KOGELBAUER AND ILYA KARLIN +that the theory of thermodynamic projectors [20] may be helpful in proving the nonlinear +extension. +Acknowledgement +This work was supported by European Research Council (ERC) Advanced Grant no. +834763-PonD (F.K. and I.K.). +Data Availability Statement +All data generated or analysed during this study are included in this published article +(and its supplementary information files). +Appendix A. Some Properties of the Plasma Dispersion Function +In the following, we collect some properties of the integral expression (4.39). In partic- +ular, to evaluate the integral in (4.39) in terms of error functions, we rely on the identities +in [1, p.297]. Let +w(z) = e−z2(1 − erf(−iz)), +z ∈ C, +(A.1) +which satisfies the functional identity +w(−z) = 2e−z2 − w(z), +z ∈ C. +(A.2) +Function (A.1) is called Faddeeva function and is frequently encountered in problems re- +lated to kinetic equations [17]. We then have that +w(z) = i +π +� +R +e−s2 +z − s ds, +ℑz > 0, +(A.3) +and, by relation (A.2), we have for ℑz < 0: +i +π +� +R +e−s2 +z − s ds = − i +π +� +R +e−s2 +(−z) + s ds += − i +π +� +R +e−s2 +(−z) − s ds += −w(−z) += e−z2[−1 − erf(−iz)]. +(A.4) + +EXACT HYDRODYNAMICS FROM LINEAR BGK +29 +(a) Argument Plot of Z +(b) Modulus-Argument Plot of Z +Figure A.1. Complex plots of the function Z. +Consequently, we obtain +� +R +1 +s − z e− s2 +2 ds = +� +R +e−s2 +s − +z +√ +2 +ds += iπ i +π +� +R +e−s2 +z +√ +2 − s ds += +� +� +� +iπe− z2 +2 +� +1 − erf +� +−iz +√ +2 +�� +, +if ℑz > 0, +iπe− z2 +2 +� +−1 − erf +� +−iz +√ +2 +�� +, +if ℑz < 0, +(A.5) +where in the first step, we have re-scaled s �→ +√ +2s in the integral. Written more compactly, +we arrive at +Z(z) = i +�π +2 e− z2 +2 +� +sign(ℑz) − erf +�−iz +√ +2 +�� +, +ℑz ̸= 0. +(A.6) +An an argument plot together with an modulus-argument plot of Z are shown in Figure +A.1. +Clearly, Z is discontinuous across the real line (albeit that Z|R exists in the sense +of principal values as the Hilbert transform of a real Gaussian [13]). The properties +|Z(z)|≤ +�π +2 , for z ∈ C \ R, +0 < arg Z(z) < π for ℑ(z) > 0, +−π < arg Z(z) < 0 for ℑ(z) < 0, +(A.7) +are easy to show and can be read off from the plots (A.1) directly as well. +Function (A.6) satisfies an ordinary differential equation (in the sense of complex analytic + +4 +100= +T +2 +元/2 +10= +0 +- +-元/2 +-2 +0.1 = +-元 +-4 +2 +0 +2 +4Im() +0 +100 +1. +元/2 +-5/ +-0 +1.0 +Zo() +一元2 +0.5 +0.1# +5 +0 +Re() +530 +FLORIAN KOGELBAUER AND ILYA KARLIN +functions) on the upper and on the lower half-plane. Indeed, integrating (4.39) by parts +gives +1 = +1 +√ +2π +� +R +(v − z) e− v2 +2 +v − z dv = −zZ + +1 +√ +2π +� +R +v e− v2 +2 +v − z dv += −zZ − +1 +√ +2π +� +R +e− v2 +2 +(v − z)2 dv = −zZ − d +dz Z, +(A.8) +which implies that Z satisfies the differential equation +d +dz Z = −zZ − 1, +(A.9) +for z ∈ C \ R. Formula (A.9) can also be used as a recurrence relation for the higher +derivatives of Z. +Since we will be interested in function (A.6) for ℑz positive and negative as global functions, +we define +Z+(z) = i +�π +2 e− z2 +2 +� +1 − erf +�−iz +√ +2 +�� +, +Z−(z) = i +�π +2 e− z2 +2 +� +−1 − erf +�−iz +√ +2 +�� +, +(A.10) +for all z ∈ C. Both functions can be extended to analytic functions on the whole complex +plane via analytic continuation. +Recall that the error function has the properties that +erf(−z) = −erf(z), +erf(z∗) = erf(z)∗, +(A.11) +for all z ∈ C, which implies that for x ∈ R, +erf(ix) = −erf(−ix) = −erf(ix)∗, +(A.12) +i.e, the error function maps imaginary numbers to imaginary numbers. Defining the imag- +inary error function, +erfi(z) := −ierf(iz), +(A.13) +for z ∈ C, which, by (A.12) satisfies erfi|R⊂ R, it follows that for x ∈ R: +ℜZ+(x) = − +�π +2 e− x2 +2 erfi +� x +√ +2 +� +, +ℑZ+(x) = − +�π +2 e− x2 +2 , +(A.14) +similarly for Z−(x). +Next, let us prove the following asymptotic expansion of Z+: +Z+(z) ∼ − +∞ +� +n=0 +(2n − 1)! ! +z2n+1 +, +for |arg(z)|≤ π +2 − δ, +z → ∞, +(A.15) + +EXACT HYDRODYNAMICS FROM LINEAR BGK +31 +for any 0 < δ ≤ π +2 . The proof will be based on a generalized version of Watson’s Lemma +[41]. To this end, let us define the Laplace transform +L[f](z) = +� ∞ +0 +f(x)e−zx dx, +z ∈ C, +(A.16) +of an integrable function f : [0, ∞) → C. +Lemma A.1. [Generalized Watson’s Lemma] Assume that (A.16) exists for some z = z0 ∈ C +and assume that f admits an asymptotic expansion of the form +f(x) = +N +� +n=0 +anxβn−1 + o(xβN−1), +x > 0, +x → 0, +(A.17) +where an ∈ C and βn ∈ C with ℜβ0 > 0 and ℜβn > ℜβn−1 for 1 ≤ n ≤ N. Then L[f](z) +admits an asymptotic expansion of the form +L[f](z) = +N +� +n=0 +anΓ(βn)z−βn + o(z−βN ), +v, +z → ∞, +(A.18) +for any real number 0 < δ ≤ π +2 , where Γ is the standard Gamma function. +For a proof of the above Lemma, we refer e.g. to [16]. Classically, Lemma (A.1) is +applied to prove that the imaginary error function admits an asymptotic expansion for +x ∈ R of the form +erfi(x) ∼ ex2 +√πx +∞ +� +k=0 +(2k − 1)! ! +(2x2)k +, +for x > 0, +x → ∞, +(A.19) +see also [31], based on the classical version of Watson’s Lemma, whose assumptions are, +however, unnecessarily restrictive [43]. +For completeness, we recall the derivation of (A.15) based on Lemma A.1. First, let us +rewrite erfi as a Laplace transform using the change of variables t = √1 − s with dt = +ds +2√1−s +erfi(z) = +� 1 +0 +d +dterfi(tz) dt = 2z +√π +� 1 +0 +et2z2 dt = 2z +√π +� 1 +0 +ez2(1−s) +ds +2√1 − s += zez2 +√π +� 1 +0 +1 +√1 − se−sz2 ds = zez2 +√π +� ∞ +0 +χ[0,1](s) +√1 − s e−sz2 ds. +(A.20) +From the Taylor expansion of the Binomial function, we know that +1 +√1 − s = +∞ +� +n=0 +�− 1 +2 +n +� +(−s)n = +∞ +� +n=0 +4−n +�2n +n +� +sn, +(A.21) + +32 +FLORIAN KOGELBAUER AND ILYA KARLIN +which allows us to apply Lemma (A.1) with βn = n + 1 and an = 4−n�2n +n +� +, thus leading to +erfi(z) ∼ zez2 +√π +∞ +� +n=0 +4−n +�2n +n +� +Γ(n + 1)z−2(n+1) +∼ ez2 +√π +∞ +� +n=0 +(2n)! +4nn! z−2n−1 +∼ ez2 +z√π +∞ +� +n=0 +(2n − 1)! ! +(2z)n +, +(A.22) +for z → ∞ and |arg(z)|≤ π +2 − δ, 0 < δ ≤ π +2 . This is consistent with formula (A.19) for the +limit along the real line. Finally, we arrive at the following asymptotic expansion for Z: +Z+(z) ∼ i +�π +2 e− z2 +2 − +∞ +� +n=0 +(2n − 1)! ! +z2n+1 +, +for |arg(z)|≤ π +2 − δ, +z → ∞, +(A.23) +which is, of course, equivalent to +Z+(z) ∼ − +∞ +� +n=0 +(2n − 1)! ! +z2n+1 +, +for |arg(z)|≤ π +2 − δ, +z → ∞, +(A.24) +since |e−z2|2= e−2(x2−y2) → 0 for ℜz = x → ∞. +References +[1] M. Abramowitz and I. A. Stegun. Handbook of mathematical functions with formulas, graphs, and +mathematical tables, volume 55. US Government printing office, 1948. +[2] C. Bardos, F. Golse, and C. D. Levermore. Fluid dynamic limits of kinetic equations ii. Convergence +proofs for the Boltzmann equation. Communications on pure and applied mathematics, 46(5):667–753, +1993. +[3] P. L. Bhatnagar, E. P. Gross, and M. Krook. A model for collision processes in gases. I. small amplitude +processes in charged and neutral one-component systems. Physical review, 94(3):511, 1954. +[4] A. Bobylev. The Chapman-Enskog and Grad methods for solving the Boltzmann equation. In +Akademiia Nauk SSSR Doklady, volume 262, pages 71–75, 1982. +[5] A. Bobylev. The theory of the nonlinear spatially uniform Boltzmann equation for Maxwell molecules. +Soviet Scientific Reviews. Section C, 7, 01 1988. +[6] A. V. Bobylev. Instabilities in the Chapman–Enskog expansion and hyperbolic Burnett equations. +Journal of statistical physics, 124(2):371–399, 2006. +[7] R. E. Caflisch. The Boltzmann equation with a soft potential. Communications in Mathematical +Physics, 74(1):71–95, 1980. +[8] T. Carleman. Problemes math´ematiques dans la th´eorie cin´etique des gaz, volume 2. Almqvist & Wik- +sells boktr., 1957. +[9] T. Carty. Grossly determined solutions for a Boltzmann-like equation. Kinetic and Related Models, +10(4):957–976, 2017. +[10] T. E. Carty. Elementary solutions for a model Boltzmann equation in one dimension and the connection +to grossly determined solutions. Physica D: Nonlinear Phenomena, 347:1–11, 2017. +[11] C. W. Chang, J. Foch, G. W. Ford, and G. E. Uhlenbeck. Studies in Statistical Mechanics. North- +Holland, 1970. + +EXACT HYDRODYNAMICS FROM LINEAR BGK +33 +[12] S. Chapman and T. Cowling. The Mathematical Theory of Non-uniform Gases: An Account of the +Kinetic Theory of Viscosity, Thermal Conduction and Diffusion in Gases. Cambridge Mathematical +Library. Cambridge University Press, 1990. +[13] B. Conte and S. Conte. The Plasma Dispersion Function: The Hilbert Transform of the Gaussian. +Academic Press, 1961. +[14] L. Desvillettes, C. Mouhot, and C. Villani. Celebrating Cercignani’s conjecture for the Boltzmann +equation. Kinetic and Related Models, 4, 09 2010. +[15] R. S. Ellis and M. A. Pinsky. The first and second fluid approximations to the linearized Boltzmann +equation. J. Math. Pures Appl, 54(9):125–156, 1975. +[16] A. Erd´elyi. General asymptotic expansions of Laplace integrals. Archive for Rational Mechanics and +Analysis, 7(1):1–20, 1961. +[17] R. Fitzpatrick. Plasma Physics: An Introduction. Taylor & Francis, 2014. +[18] A. Gorban and I. Karlin. Hilbert’s 6th problem: Exact and approximate hydrodynamic manifolds for +kinetic equations. Bulletin of the American Mathematical Society, 51:186–246, 11 2013. +[19] A. N. Gorban and I. V. Karlin. Method of invariant manifolds and regularization of acoustic spectra. +Transport Theory and Statistical Physics, 23:559–632, 1994. +[20] A. N. Gorban and I. V. Karlin. Uniqueness of thermodynamic projector and kinetic basis of molecular +individualism. Physica A: Statistical Mechanics and its Applications, 336(3):391–432, 2004. +[21] A. N. Gorban and I. V. Karlin. Invariant Manifolds for Physical and Chemical Kinetics, volume 660 +of Lecture Notes in Physics. Springer Science & Business Media, 2005. +[22] H. Grad. On the kinetic theory of rarefied gases. Communications on pure and applied mathematics, +2(4):331–407, 1949. +[23] H. Grad. Asymptotic theory of the Boltzmann equation. The physics of Fluids, 6(2):147–181, 1963. +[24] H. Grad. Asymptotic equivalence of the Navier–Stokes and nonlinear Boltzmann equations. Magneto- +Fluid Dynamics Division, Courant Institute of Mathematical Sciences, 1964. +[25] D. Hilbert. Grundz¨uge einer allgemeinen Theorie der linearen Integralgleichungen, volume 3. BG Teub- +ner, 1912. +[26] D. Hilbert et al. Mathematical problems. Bulletin of American Mathematical Society, 37(4):407–436, +2000. +[27] I. V. Karlin, M. Colangeli, and M. Kr¨oger. Exact linear hydrodynamics from the Boltzmann equation. +Physical Review Letters, 100(21):214503, 2008. +[28] F. Kogelbauer. Slow hydrodynamic manifolds for the three-component linearized Grad system. Con- +tinuum Mechanics and Thermodynamics, Aug 2019. +[29] F. Kogelbauer. Non-local hydrodynamics as a slow manifold for the one-dimensional kinetic equation. +Continuum Mechanics and Thermodynamics, 33, 03 2021. +[30] P. Kurasov and S.-T. Kuroda. Krein’s resolvent formula and perturbation theory. Journal of Operator +Theory, pages 321–334, 2004. +[31] F. Olver. Asymptotics and special functions. AK Peters/CRC Press, 1997. +[32] B. Perthame. Global existence to the BGK model of Boltzmann equation. Journal of Differential +equations, 82(1):191–205, 1989. +[33] B. Perthame and M. Pulvirenti. Weighted L∞ bounds and uniqueness for the Boltzmann BGK model. +Archive for rational mechanics and analysis, 125(3):289–295, 1993. +[34] P. Rosenau. Extending hydrodynamics via the regularization of the Chapman–Enskog expansion. Phys. +Rev. A, 40:7193–7196, Dec 1989. +[35] L. Saint-Raymond. Discrete time Navier–Stokes limit for the BGK Boltzmann equation. Comm. in +Partial Differential Equations, 27(1 and 2):149–184, 08 2006. +[36] L. Saint-Raymond. A mathematical PDE perspective on the Chapman–Enskog expansion. Bulletin of +the American Mathematical Society, 51(2):247–275, 2014. +[37] M. Slemrod. Chapman–Enskog +=⇒ +viscosity-capillarity. Quarterly of Applied Mathematics, +70(3):613–624, 2012. + +34 +FLORIAN KOGELBAUER AND ILYA KARLIN +[38] G. Teschl. Jacobi operators and completely integrable nonlinear lattices. Number 72. American Mathe- +matical Soc., 2000. +[39] C. Truesdell and R. G. Muncaster. Fundamentals of Maxwel’s Kinetic Theory of a Simple Monatomic +Gas: Treated as a Branch of Rational Mechanics. Academic Press, 1980. +[40] C. Villani. Hypocoercivity. arXiv preprint math/0609050, 2006. +[41] G. N. Watson. The harmonic functions associated with the parabolic cylinder. Proceedings of the +London Mathematical Society, s2-17(1):116–148, 1918. +[42] A. Weinstein. On nonselfadjoint perturbations of finite rank. Journal of Mathematical Analysis and +Applications, 45(3):604–614, 1974. +[43] R. Wong and M. Wyman. Generalization of Watson’s lemma. Canadian Journal of Mathematics, +24(2):185–208, 1972. +ETH Z¨urich, Department of Mechanical and Process Engineering, Leonhardstrasse 27, +8092 Z¨urich, Switzerland +Email address: floriank@ethz.ch +ETH Z¨urich, Department of Mechanical and Process Engineering, Leonhardstrasse 27, +8092 Z¨urich, Switzerland +Email address: ikarlin@ethz.ch + diff --git a/4tE1T4oBgHgl3EQfSwM3/content/tmp_files/load_file.txt b/4tE1T4oBgHgl3EQfSwM3/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..67f032de2bdf11b3d306c1dd43c78c5f8ce83f09 --- /dev/null +++ b/4tE1T4oBgHgl3EQfSwM3/content/tmp_files/load_file.txt @@ -0,0 +1,1170 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf,len=1169 +page_content='EXACT HYDRODYNAMIC MANIFOLDS FOR THE LINEARIZED THREE-DIMENSIONAL BOLTZMANN BGK EQUATION FLORIAN KOGELBAUER AND ILYA KARLIN Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' We perform a complete spectral analysis of the linear three-dimensional Boltzmann BGK operator resulting in an explicit transcendental equation for the eigen- values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Using the theory of finite-rank perturbations, we prove that there exists a critical wave number kcrit which limits the number of hydrodynamic modes in the frequency space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' This implies that there are only finitely many isolated eigenvalues above the es- sential spectrum, thus showing the existence of a finite-dimensional, well-separated linear hydrodynamic manifold as a combination of invariant eigenspaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' The obtained results can serve as a benchmark for validating approximate theories of hydrodynamic closures and moment methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Introduction The derivation of hydrodynamic equations from kinetic theory is a fundamental, yet not completely resolved, problem in thermodynamics and fluids, dating back at least to part (b) of Hilbert’s sixth problem [26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Given the Boltzmann equation or an approximation of it, can the the basic equations of fluid dynamics (Euler, Navier–Stokes) be derived directly from the dynamics of the distribution function?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' One classical approach is to seek a series expansion in terms of a small parameter, such as the relaxation time τ or the Knudsen number ε [39].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' One widely used expansion is the Chapman–Enskog series [12], where it is assumed that the collision term scales with ε−1, thus indicating a (singular) Taylor expansion in ε.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Indeed, the zeroth order PDE obtained this way gives the Euler equation, while the first order PDE reproduces the Navier–Stokes equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' On the linear level, the Navier–Stokes equation is globally dissipative and decay of entropy on the kinetic level translates to decay of energy on the fluid level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' For higher-order expansions, however, we are in trouble.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' In [4], it was first shown that an expansion in terms of Knudsen number can lead to nonphysical properties of the hy- drodynamic models: At order two (Burnett equation [12]), the dispersion relation shows a change of sign, thus leading to modes which grow in energy (Bobylev instability).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' In particular, the Burnett hydrodynamics are not hyperbolic and there exists no H-theorem for them [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content='03069v1 [math-ph] 8 Jan 2023 2 FLORIAN KOGELBAUER AND ILYA KARLIN From a mathematical point of view, of course, there is no guarantee that the expansion of a non-local operator in frequency space, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=', an approximation in terms of local (dif- ferential) operators, gives a good approximation for the long-time dynamics of the overall system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Among the first to suggest a non-local closure relation was probably Rosenau [34].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' In a series of works (see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=', [19, 18, 21] and references therein), Karlin and Gorban derived explicit non-local closures by essentially summing the Chapman–Enskog series for all orders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Furthermore, we note that the Chapman–Enskog expansion mixes linear and nonlinear terms for the full Boltzmann equation since it only considers powers of ε, while the existence (and approximation) of a hydrodynamic manifold can be performed indepen- dently of the Knudsen number, for which it only enters as a parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Spectral properties of linearized kinetic equations are of basic interest in thermodynam- ics and have been performed by numerous authors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Already Hilbert himself was concerned with the spectral properties of linear integral operators derived from the Boltzmann equa- tion [25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Carleman [8] proved that the essential spectrum remains the same under a compact perturbation (Weyl’s theorem) in the hard sphere case and was able to estimate the spectral gap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' This result was generalized to a broader class of collision kernels by Grad [23] and to soft potentials in [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' For spatially uniform Maxwell molecules, a complete spectral description was derived in [5] (together with exact special solutions and normal form calculations for the full, non-linear problem), see also [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Famously, in [15], some fundamental properties of the spectrum of a comparably broad class of kinetic operators was derived.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' In particular, the existence of eigenvalue branches and asymptotic expansion of the (small) eigenvalues for vanishing wave number was derived.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' We stress, however, that no analysis for large wave numbers or close to the essential spectrum was performed in [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Let us also comment on the relation to Hilbert’s sixth problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Along these lines, several result on the converges to Navier–Stokes (and Euler) equations have been obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Al- ready Grad [24] was interested in this question.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' In [15], it is also shown that the semi-group generated by the linearized Euler equation converges - for fixed time - to the semi-group generated by the linearized Boltzmann equation (and similarly, for the linear Navier–Stokes semi-group).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' In [35], convergence of scaled solutions to the Navier–Stokes equation along the lines of [2] was proved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' We also mention the results related to convergence rates to the equilibrium (hypercoercivity) of the variants of the BGK equation [40, 14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' For an excellent review on the mathematical perspective of Hilbert’s sixth problem, we refer to [36].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' In this work, we perform a complete spectral analysis for the Bhatnagar–Gross–Krook (BGK) equation [3] linearized around a global Maxwellian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' The BGK model - despite being a comparatively simple approximation to the full Boltzmann equation - shares im- portant features such as decay of entropy and the conservation laws of mass, momentum and energy [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Global existence and estimates of the solution were proved in [32, 33] for EXACT HYDRODYNAMICS FROM LINEAR BGK 3 the full, non-linear BGK system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' The single relaxation time τ in the BGK equation will serve as the analog of the Knudsen number and fundamental parameter in our analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Previous work on the full spectrum of kinetic models together with a hydrodynamic interpretation has been performed in [28] for the three-dimensional Grad system and in [29] for the linear BGK equation with mass density only.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' A similar independent analysis for the one-dimensional linear BGK with one fluid moment was performed in [10, 9] in the context of grossly determined solutions (in the sense of [39]), where convergence to the slow manifold is also proven explicitly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' While the results obtained in [10, 9] are proved for the real line (for which the corresponding eigen-distributions are derived), we will focus on the torus TL, for which we expect a dis- crete set of eigenvalues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' Indeed, we will give a complete and (up to the solution of a transcendental equa- tion) explicit description of the spectrum of the BGK equation linearized around a global Maxwellian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' We will show the existence of finitely many discrete eigenvalues above the essential spectrum as well as the existence of a critical wave number for each family of modes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' More precisely, we prove the following: Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4tE1T4oBgHgl3EQfSwM3/content/2301.03069v1.pdf'} +page_content=' The spectrum of the non-dimensional linearized BGK operator L with re- laxation time τ around a global Maxwellian is given by σ(L) = � −1 τ + iR � ∪ � N∈Modes � |k| p/2 +mod p +for each x ∈ {0, 1}n−1, and prime p. +(2) +Viola introduced majmodp in [17] and showed that the distribution (X, majmodp(X)) is hard to sample from +for low-depth classical circuits with bounded fan-in. +Overview of Techniques +Before proving Theorem 3, we first prove an analogous result in the setting +where we allow the quantum circuit to take as input the GHZn state: |GHZn⟩ = +1 +√ +2(|0n⟩ + |1n⟩). For this +setting we consider the distribution (X, majmodp(X) ⊕ parity(X)). +Theorem 5. For each n ∈ N, and δ ∈ (0, 1), there exists a prime p such that +1. There exists a constant-depth quantum circuit that takes the GHZn state as input and produces a +distribution which has total variation distance at most 1/6+O(n−c) from (X, majmodp(X)⊕parity(X)) +for some c ∈ (0, 1). +2. Each classical circuit with bounded fan-in which takes n+nδ random bits as input and has total variation +distance at least 1 +2 − ω(1/ log n) from (X, majmodp(X) ⊕ parity(X)) has depth at least Ω(log log(n)). +We construct the corresponding quantum circuit in two steps. First, we construct a pseudo-quantum +circuit, which approximately samples from the correct distribution but includes some single-qubit non-unitary +operations. In the second step, we replace these non-unitary operations with actual unitaries and show that +the desired output statistics are preserved. +Our classical circuit lower bound techniques are inspired by, and heavily borrow from, Viola’s techniques +in [17], where he proves classical circuit lower bounds for various distributions. Rather than explicitly lower +bounding classical circuit depth, Viola proves lower bounds for the locality of functions. To illustrate the +relationship between locality and circuit depth let f : {0, 1}ℓ → {0, 1}n be a function implemented by a +classical circuit attempting to sample from (X, majmodp ⊕ parity(X)). We say that f is d-local if, for each +i ∈ [n], the i-th output bit of f(u) +depends on at most d bits of the input u. Note that any circuit with bounded fan-in and depth log(d) can +implement a function with locality at most O(d). And so, to prove a circuit lower bound of Ω(log log n) for +sampling from the distribution (X, majmodp ⊕ parity(X)) it suffices to prove that any function with locality +at most Ω(logk n) cannot sample from the distribution (X, majmodp ⊕ parity(X)) given access to uniformly +random bits as input. +Our proof of sampling hardness for (X, majmodp(X) ⊕ parity(X)) closely follows Viola’s original proof +of hardness for (X, majmodp(X)). Both arguments begin with the observation that for any d-local function +f : {0, 1}ℓ → {0, 1}n there exists a partition of the input u = (x, y) and a permutation of output bits of +f(x, y) such that 4: +f(x, y) = g1(x1, y) ◦ g2(x2, y) ◦ · · · ◦ gs(xs, y) ◦ h(y), +(3) +where each gi(xi, y) is a subset (or “block”) of the output bits that are completely determined by y and +a single bit of x, and s = Ω(n/d2). +Therefore, if we fix y, each of the blocks gi are independent. +Let +z ∈ {0, 1}n−1 be the first n − 1 outputs of f(x, y) and let b be the final output bit. We can assume without +loss of generality (by absorbing at most one gi into h) that the last output bit is not permuted so that b +only depends on y. In order for the function f to sample from the correct distribution the output bits z +must be uniformly distributed and, for every input (x, y), we must have majmodp(z) ⊕ parity(z) = b. We +note that, after fixing the input bits y, the Hamming weight of z is a sum of independent random variables +but b is fixed. Then (still following Viola) we show that if many of these independent variables are fixed +the output distribution of z will not have sufficiently high entropy. Alternatively, if they are unfixed, the +condition majmodp(z)⊕parity(z) = b is unlikely to be satisfied. Making these observations formal completes +the proof. +4We use “◦” to denote concatenation. +4 + +In order to extend the sampling separation to a distribution that can be prepared by a constant-depth +quantum circuit without a GHZ state as input, we replace the GHZ state in the quantum circuit for Theorem 5 +with a “Poor-Man’s GHZ state” (introduced in [19]) defined over a binary tree B. The resulting distribu- +tion produced by this circuit is still related to (X, majmodp(X) ⊕ parity(X)) but is more complicated. In +particular, it is still of the form (X, MMp(SX) ⊕ parity(X)) where +MMp(j) := +� +0 +if j < p/2 +mod p +1 +if j > p/2 +mod p +for j ∈ Z +(4) +and Sz is a sum of terms that depends on output bits z ∈ {0, 1}n−1 in a complicated way.5 Unfortunately, +we no longer have the nice property that the terms of the sum Sz depend on disjoint bits of z. The main +technical challenge for the classical lower bound is accounting for these dependencies within the sum, which is +accomplished by carefully fixing additional bits of the input (and therefore output) to recover independence +of the unfixed terms. +1.2 +Reader’s Guide +Both of the Theorems mentioned above, Theorem 3 and Theorem 5, consist of 2 parts. We restate each of +these parts as separate theorems, each in their own section of the paper. +The following two sections contain the proof of Theorem 5 – the sampling separation in the setting +where we allow the quantum circuit to take a GHZ state as input. Section 2 contains the proof of part 1 +of Theorem 5, the quantum circuit upper bound, as Theorem 7. Section 3 contains the proof of part 2 of +Theorem 5, the classical circuit lower bound, as Theorem 20. +In the last two sections, we prove the main result of this paper: Theorem 3, the separation in the sampling +power between low-depth quantum and classical circuits. In Section 4 we prove part 1 of Theorem 3, that +there is a quantum circuit that approximately samples from the target distribution as Theorem 33. Finally, +in Section 5, we prove the classical hardness of sampling from this distribution as Theorem 34. +2 +Sampling from (X, majmodp(X) ⊕ parity(X)) using a GHZ state +In this section we consider constant-depth quantum circuits with access to an n-qubit GHZ state as input. +We show these circuits can produce samples close to the distribution (X, majmodp(X)⊕parity(X)), where X +is a uniformly random bitstring of length n−1. We will prove this result in two steps – in Section 2.1 we give +a “quantum-like” circuit that samples from the correct distribution but includes non-unitary single-qubit +operations. In Section 2.2 we show how to replace those non-unitary operations with multi-qubit (but still +constant-sized) unitaries. Before beginning these proofs we review some details about GHZ states. +Review of GHZ States +An n-qubit GHZ state is defined to be the state +|GHZn⟩ = +1 +√ +2 +� +|0⟩⊗n + |1⟩⊗n� +. +(5) +It is well-known that applying a Hadamard transform to each qubit of a GHZ state produces a uniform +superposition over bitstrings with even Hamming weight: +H⊗n |GHZn⟩ = 2−n/2 � +e∈En +|e⟩ +(6) +where En is the set containing all even parity n-bit strings. We can equivalently describe this state as a +coherent superposition of n − 1 random bits and a final bit whose value equals the parity of the n − 1 other +bits, so +H⊗n |GHZn⟩ = +�n−1 +� +i=1 +CNOTi,n +� +|+⟩⊗n−1 ⊗ |0⟩ +(7) +5In particular, Sz is a sum of parities of sub-strings of z – see Definition 30 for details. +5 + +where CNOTi,j denotes a CNOT gate controlled on qubit i and applied to qubit j. Equation (7) will be our +starting point for designing circuits that use the GHZ state as a resource state. +|+⟩ +• +|+⟩ +• +... +|+⟩ +• +|0⟩ +Figure 2: A circuit constructing the state H⊗n |GHZn⟩, as described in Equation (7). +2.1 +Sampling with non-unitary operations +We now consider constant-depth quantum circuits augmented with specific single qubit non-unitary “gates” +Aθ, which we will soon define. We show these circuits can sample (approximately) from the distribution +(X, majority(X) ⊕ parity(X)). While this model is non-physical, introducing it allows us to isolate some key +ideas which we will reuse in the fully quantum circuit developed in the next section. +First, for each θ ∈ R, define the (non-unitary) matrix Aθ to be the two-qubit matrix which acts on the +computational basis states as +Aθ |0⟩ = |0⟩ +(8) +Aθ |1⟩ = exp(iθX) |1⟩ +(9) +When drawing circuit diagrams in this section we sometimes include Aθ gates, and understand that they +represent the matrix A acting on the qubits indicated. We also sometimes draw A† +θ gates, which represent +the adjoint of the matrix Aθ acting on the qubits indicated. +We now prove the following useful circuit identity. +Lemma 6. For any one qubit state |ψ⟩ and computational basis state |x⟩ with x ∈ {0, 1}, we have +⟨x|2 +� +A† +θ +� +2 CNOT2,1 |ψ⟩1 |+⟩2 = +1 +√ +2 exp(i(θ + π/2)xX1) |ψ⟩1 +(10) +Proof. Direct computation gives +⟨x|2 +� +A† +θ +� +2 CNOT2,1 |ψ⟩1 |+⟩2 = ⟨x|2 exp(iθxX2)CNOT2,1 |ψ⟩1 |+⟩2 +(11) += ⟨x|2 CNOT2,1 exp(iθxX1X2) |ψ⟩1 |+⟩2 +(12) += ⟨x|2 CNOT2,1 exp(iθxX1) |ψ⟩1 |+⟩2 +(13) += exp(i(θ + π/2)xX1) |ψ⟩1 ⟨x|+⟩2 +(14) += +1 +√ +2 exp(i(θ + π/2)xX1) |ψ⟩1 +(15) +where we used on the first line that +Aθ|x⟩ = exp(iθXx) |x⟩ +(16) +by definition, the commutation relation6 +X2CNOT2,1 = CNOT2,1X1X2 +(17) +=⇒ exp(iθX2)CNOT2,1 = CNOT2,1 exp(iθX1X2) +(18) +on the second line, that |+⟩ is a 1-eigenstate of the X operator on the third line, and then the definition +of the CNOT gate and the |+⟩ state on the final two lines. Figure 3 gives a diagrammatic version of this +proof. +6 + +|ψ⟩ +|ψ⟩ += +|+⟩ +• +A† +θ +⟨x| +|+⟩ +• +exp(iθxX) +⟨x| +|ψ⟩ +exp (iθxXX) += +|+⟩ +• +⟨x| +|ψ⟩ +exp (iθxX) += +|+⟩ +• +⟨x| +|ψ⟩ +exp (ix(θ + π/2)X) += +|+⟩ +⟨x| +Figure 3: A diagrammatic proof of Lemma 6. The equivalence between each line is explained in the proof +of the lemma. +We now prove the main result of this section and construct a constant-depth circuit with a GHZ state +as input and Aθ gates which samples approximately from the distribution (X, majmodp(X)) for any p. The +construction builds on Lemma 6 as well as the observations about the GHZ state discussed in Section 2. +Theorem 7. For each prime number p there is a constant-depth circuit consisting of one and two-qubit +unitary gates and Aθ operations which takes a GHZ state as input and produces an output which, when +measured in the computational basis, produces an output distribution (X′, Y ) with +∆((X′, Y ), (X, majmodp(X) ⊕ parity(X))) ≤ 1 +2 − 1 +π + 1 +2p + O(p3/2e−n/p2). +(19) +Proof. We first describe the circuit which, when measured in the computational basis, produces output which +correlates with (X, majmodp(X) ⊕ parity(X)). Fix θ = π/p. The circuit takes as input a GHZ state, applies +a Hadamard transform to each qubit of the state, then applies a A† +θ operation to the first n − 1 qubits in +the GHZ state and a exp(−iπX/4) rotation to the final qubit. This circuit is indicated diagrammatically in +Figure 4. +To prove this circuit samples (approximately) from the correct distribution we write the (unnormalized) +output state of the circuit conditioned on first n − 1 qubits of the circuit being measured in computational +6To prove the implication, use the standard decomposition exp(iθX) = cos(θ)+i sin(θ)X, then commute the resulting terms. +7 + +H +A† +π/p +✌✌✌ +H +A† +π/p +✌✌✌ +... +H +A† +π/p +✌✌✌ +H +exp(−iπX/4) +✌✌✌ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +|GHZn⟩ +Figure 4: Constant-depth circuit producing approximate samples from the distribution (X, majmodp(X) ⊕ +parity(X)). +basis state |x⟩ = |x1⟩ ⊗ |x2⟩ ⊗ ... ⊗ |xn−1⟩ as: +⟨x|1...n−1 +�� +A† +π/p +�⊗n−1 +⊗ exp(−iπX/4) +� +H⊗n |GHZn⟩ += ⟨x|1...n−1 +�� +A† +π/p +�⊗n−1 +⊗ exp(−iπX/4) +� �n−1 +� +i=1 +CNOTi,n +� +|+⟩⊗n−1 ⊗ |0⟩ +(20) += +n−1 +� +i=1 +⟨xi|A† +π/p (CNOTi,n)|+⟩i ⊗ exp(−iπX/4) |0⟩n +(21) += 2−(n−1)/2 exp +� +iX +� +−π +4 + +n−1 +� +i=1 +xi +�π +p + π +2 +��� +|0⟩n +(22) +where we used Equation (7) on the first line, reordered terms on the second (noting that exp(iπX/4)n +commutes with CNOTi,n for any i ∈ [n − 1]), and then used Lemma 6 on the third. A diagrammatic version +of this analysis is given in Figure 5. +Now, tracing over the final qubit we see the probability of the first n − 1 qubits being measured in any +computational basis state |x⟩ is 2−(n−1) so the measurement of the first n − 1 bits produces a uniformly +random bit string, as desired. Additionally, conditioning on bit string x = x1x2...xn−1 being measured, we +see the state of the n-th qubit is +exp +� +iX +� +−π +4 + |x| +�π +p + π +2 +��� +|0⟩n +(23) += exp +� +iX +� +−π +4 + π +p |x| +�� +|parity(x)⟩n +(24) += cos +� +−π +4 + π +p |x| +� +|parity(x)⟩n + i sin +� +−π +4 + π +p |x| +� +|1 ⊕ parity(x)⟩n . +(25) +Where |x| = �n−1 +i=1 xi denotes the Hamming weight of x. +Now let Yx be the random variable giving the outcome of a computational basis measurement performed +on the n-th qubit, conditioned on a computational basis measurement of the first n−1 bits giving outcome x. +We bound the probability that this random variable does not equal parity(x)⊕majmodp(x). Straightforward +calculation gives that the probability that Yx equals parity(x) is given by +Pr[Yx = parity(x)] = cos2 +� +−π +4 + π +p |x| +� +. +(26) +It is then easy to see (see Figure 6) that this function is inversely correlated with majmodp(x) (meaning that +Yx more likely equals parity(x) when majmodp(x) = 0 and likely does not equal parity(x) when majmodp = +8 + +H +A† +π/p +⟨x1| +|+⟩ +• +A† +π/p +⟨x1| +H +A† +π/p +⟨x2| +|+⟩ +• +A† +π/p +⟨x2| +... += +... +H +A† +π/p +⟨xn−1| +|+⟩ +• +A† +π/p +⟨xn−1| +H +exp(−iπX/4) +|0⟩ +exp(−iπX/4) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +|GHZn⟩ +|+⟩ +⟨x1| +|+⟩ +⟨x2| += +... +|+⟩ +⟨xn−1| +|0⟩ +exp +� +iX +� +− π +4 + �n−1 +i=1 xi +� +2π +p + π +2 +��� +Figure 5: Diagrammatic analysis of the circuit presented in the proof of Theorem 7. The first line follows +from Equation (7), while the second follows from Lemma 6. +1). Expanding on this we can bound the average probability that Yx does not equal parity(x)⊕majmodp(x)]: +1 +2n−1 +� +x∈{0,1}n−1 +Pr +� +Yx ̸= parity(x) ⊕ majmodp(x) +� +≤ 1 +2 − 1 +π + 1 +2p + O(p3/2e−n/p2) +(27) +Details of this calculation are given after this proof, in Lemma 8. +Finally, we bound the total variation distance between the output of the quantum circuit depicted in +Figure 4 and the distribution (X, majmodp(X) ⊕ parity(X)) with uniformly random X. Let (X′, Y ) be the +random variable giving the output of the quantum circuit. Then +∆((X, majmodp(X) ⊕ parity(X)), (X′, Y )) += 1 +2 +� +x∈{0,1}n−1 +y∈{0,1} +��� Pr +� +(X, majmodp(X) ⊕ parity(X)) = (x, y) +� +− Pr[(X′, Y ) = (x, y)] +��� +(28) += 1 +2 +� +x∈{0,1}n−1 +y∈{0,1} +��� Pr[X = x] Pr +� +majmodp(x) ⊕ parity(x) = y +� +− Pr[X′ = x] Pr[Yx = y] +��� +(29) += 1 +2n +� +x∈{0,1}n−1 +y∈{0,1} +��� Pr +� +majmodp(x) ⊕ parity(x) = y +� +− Pr[Yx = y] +��� +(30) += +1 +2n−1 +� +x∈{0,1}n−1 +Pr +� +Yx ̸= majmodp(x) ⊕ parity(x) +� +≤ 1 +2 − 1 +π + 1 +2p + O(p3/2e−n/p2) +(31) +This completes the proof. +9 + +0 +p/4 +p/2 +3p/4 +p +1/2 +1 +|x| +Pr[y = parity(x)] +majmodp(|x|) +(a) +Inverse +correlation +of +Pr[Yx = parity(x)] +and majmodp(x) +0 +p/4 +p/2 +3p/4 +p +1/2 +1 +|x| +Pr +� +y ̸= majmodp(x) ⊕ parity(x) +� +(b) Probability that Yx is incorrect, f(|x|) +Figure 6: Plots displaying the correlation of Yx and majmodp(x) ⊕ parity(x) where Yx is the last bit output +by the circuit in Figure 4 conditioned on the first n − 1 measurements resulting in string x ∈ {0, 1}n−1. +Lemma 8. Define the random variable Yx as in the proof of Theorem 7, so Yx takes values in {0, 1} and +Pr[Yx = parity(x)] = cos2 +� +−π +4 + π +p |x| +� +. +(32) +Then +2−(n−1) +� +x∈{0,1}n−1 +Pr +� +Yx ̸= majmodp(x) ⊕ parity(x) +� +≤ 1 +2 − 1 +π + 1 +2p + O(p3/2e−n/p2). +(33) +Proof. Let X be a random variable taking value uniformly at random from {0, 1}n−1. Then we have +2−(n−1) +� +x∈{0,1}n−1 +Pr +� +Yx ̸= majmodp(x) ⊕ parity(x) +� += +p−1 +� +k=0 +Pr +� +YX ̸= majmodp(X) ⊕ parity(X) +��|X| = k +� +· Pr[|X| = k] +(34) +Let f(k) be the probability that our output measurement is incorrect given that the Hamming weight of the +first n bits have Hamming weight k. +f(k) := Pr +� +Y ̸= majmodp(X) ⊕ parity(X) +��|X| = k +� +(35) +It follows from Equation (32), that +f(k) = + + + +sin2 � +− π +4 + π +p k +� +, +k ≤ p/2 +mod p +cos2 � +− π +4 + π +p k +� +, +k > p/2 +mod p +(36) +which is plotted in Figure 6b. Let δ be the total variation distance between |X| mod p and Up, the uniform +distribution over {0, 1, . . ., p − 1}. Then Pr[|X| = k mod p] ≤ 1 +p + δ. We can upper bound Equation (34), +10 + +as +Pr +� +Y ̸= majmodp(X) ⊕ parity(X) +� +≤ +�1 +p + δ +� p−1 +� +k=0 +f(k) +(37) += +�1 +p + δ +�  +1 +2 + 2 +(p−1)/2 +� +k=1 +f(k) + + +(38) += +�1 +p + δ +� � +1 +2 + 2 +� p/2 +1/2 +f(k) +� +dk +(39) +Where in the second line we use the fact that f(k) is symmetric about p/2, so � p−1 +2 +k=1 f(k) = �p−1 +k= p+1 +2 f(k). In +the third line we used that f(k) is convex over (0, p/2), and therefore �(p−1)/2 +i=1 +f(k) is a (midpoint-Riemann +sum) over-approximation of +� p/2 +1/2 f(k). Next we evaluate the integral. +� p/2 +1/2 +f(k) dk = +� p/2 +0 +sin2 +� +−π +4 + π +p k +� +dk +(40) += +� p/2 +0 +1 +2 +� +1 + cos +�2π +p k + π +2 +�� +dk +(41) += 1 +2 +� +k + p +2π sin +�2π +p k + π +2 +������ +p/2 +0 +(42) += p +4 +� +1 − 2 +π +� +(43) +Combining this with Equation (39), we get the probability we measure an incorrect string is at most +Pr +� +Y ̸= majmodp(X) ⊕ parity(X) +� +≤ +�1 +p + δ +� �p +2 +� +1 − 2 +π +� ++ 1 +2 +� +(44) += 1 +2 − 1 +π + δp +2 +� +1 − 2 +π +� ++ 1 +2 +�1 +p + δ +� +(45) += 1 +2 − +� 1 +π − 1 +2p +� ++ O(pδ) +(46) +All that’s left is to upper bound δ, the total variation distance between |X| mod p and Up. For this, we use +the following Fact from [17]. +Fact 9 (special case of Fact 3.2 in [17]). Let (x1, x2, . . . , xt) ∈ {0, 1}n be sampled uniformly. Then the total +variation distance between �t +i=1 xi mod p and Up, the uniform distribution over {0, 1, . . ., p − 1} is at most +√pe−t/p2 +Using this fact, we get the upper bound δ ≤ p1/2e−n/p2. The probability the measured string is incorrect +is then +Pr +� +Y ̸= majmodp(X) ⊕ parity(X) +� +≤ 1 +2 − 1 +π + 1 +2p + O(p3/2e−n/p2). +(47) +2.2 +Removing non-unitary operations +We now construct a fully quantum circuit that takes a GHZ state as input and produces a state which, +when measured in the computational basis, samples approximately from the distribution (X, majmodp(X)⊕ +parity(X)). Our starting point is the non-unitary circuit constructed in Section 2.1. First, we modify this +11 + +circuit by replacing the non-unitary Aθ gates with a different set of non-unitary gates, and show the classical +distributions output by the two circuits after measurement are identical. Then we show these new non- +unitary gates are close to unitary gates, and hence the circuit can be made fully unitary with minimal +change to the output distribution. +2.2.1 +Introducing multi-qubit non-unitary operations +We start by defining the m-qubit non-unitary operation Aθ,m whose action on the m qubit basis state +|x⟩ = |x1x2...xm⟩ is given by: +Aθ,m |x1x2...xm⟩ = exp(iθxm) |x1⟩ ⊗ exp(iθx1) |x2⟩ ⊗ ... ⊗ exp(iθxm−1) |xm⟩ . +(48) +Intuitively, we can think of the Aθ,m operation as consisting of m distinct Aθ operations, just with the qubits +they act on “shifted” away from the qubits controlling the gate by 1 modulo m. +Now we observe that, in certain situations, an Aθ,m operation can replace a tensor product of m different +Aθ operations. +Lemma 10. For any m-qubit computational basis state |x⟩ = |x1x2...xm⟩ and arbitrary one qubit state |ψ⟩, +the following equivalence holds: +⟨x|1...m +� +A† +θ,m +� +1...m +� m +� +i=1 +CNOTi,m+1 +� +|+⟩⊗m ⊗ |ψ⟩ += ⟨x|1...m +� m +� +i=1 +� +A† +θ +� +i CNOTi,m+1 +� +|+⟩⊗m ⊗ |ψ⟩ +(49) +Proof. The proof is similar to the proof of Lemma 6. In what follows we identify indices mod m so, in +particular, we have x0 = xm. Then we see: +⟨x|1...m +� +A† +θ,m +� +1...m + + +m +� +j=1 +CNOTj,m+1 + + |+⟩⊗m ⊗ |ψ⟩ += ⟨x|1...m + + +m +� +j=1 +exp(iθXjxj−1)CNOTj,m+1 + + |+⟩⊗m ⊗ |ψ⟩ +(50) += ⟨x|1...m + + +m +� +j=1 +CNOTj,m+1 exp(iθXjXm+1xj−1) + + |+⟩⊗m ⊗ |ψ⟩ +(51) += ⟨x|1...m + + +m +� +j=1 +CNOTj,m+1 + + |+⟩⊗m ⊗ exp + +iθX +m +� +j=1 +xj−1 + + |ψ⟩ +(52) += ⟨x|1...m + + +m +� +j=1 +CNOTj,m+1 + + |+⟩⊗m ⊗ exp + +iθX +m +� +j=1 +xj + + |ψ⟩ +(53) += ⟨x|1...m + + +m +� +j=1 +exp(iθXjxj)CNOTj,m+1 + + |+⟩⊗m ⊗ |ψ⟩ +(54) += ⟨x|1...m + + +m +� +j=1 +� +A† +θ +� +j CNOTj,m+1 + + |+⟩⊗m ⊗ |ψ⟩ . +(55) +Here the first line follows from the definition of Aθ,m, the second line follows from commuting an exp(iθX) +gate past a CNOT gate as in the proof of Lemma 6, the third line follows because |+⟩ is a 1 eigenstate of +the X operator and the fourth line follows from a simple relabeling of indices. The fifth line follows from +applying the same argument as in the second and third lines, just in the reverse direction, and the sixth line +follows by definition of Aθ. Figure 7 gives a diagrammatic version of this proof. +12 + +A straightforward consequence of Lemma 10 and the arguments of Section 2.1 is that constant-depth +quantum circuits augmented with Aθ,m gates and acting on a GHZ state can also approximately sample +from the distribution (X, majmodp(X) ⊕ parity(X)). +Corollary 11. Let m and D be integers, and n = Dm + 1. Then the state +�� +A† +π/p,m +�⊗D +⊗ exp(−iπX/4) +� +H⊗n |GHZn⟩ , +(56) +when measured in the computational basis, produces an output distribution (X′, Y ) with +∆((X′, Y ), (X, majmodp(X) ⊕ parity(X))) ≤ 1 +2 − 1 +π + 1 +2p + O(p3/2e−n/p2). +(57) +Proof. By Lemma 10 and Equation (7) we have +�� +A† +π/p,m +�⊗D +⊗ exp(−iπX/4) +� +H⊗n |GHZn⟩ += +�� +A† +π/p,m +�⊗D +⊗ exp(−iπX/4) +� �n−1 +� +i=1 +CNOTi,n +� +|+⟩⊗n−1 ⊗ |0⟩ +(58) += +�� +A† +π/p +�⊗n−1 +⊗ exp(−iπX/4) +� �n−1 +� +i=1 +CNOTi,n +� +|+⟩⊗n−1 ⊗ |0⟩ +(59) += +�� +A† +π/p +�⊗n−1 +⊗ exp(−iπX/4) +� +H⊗n |GHZn⟩ +(60) +In the proof of Theorem 7 we show this state, when measured in the computational basis, is close to the +distribution (X, majmodp(X) ⊕ parity(X)). +2.2.2 +Replacing multi-qubit non-unitary operations with unitary operations +In this section, we construct a fully unitary circuit which takes a GHZ state as input and produces an output +which, when measured in the computation basis, samples for a distribution close in Total Variation Distance +to the distribution (X, majmodp(X)⊕parity(X)). We do this by proving that we can replace the non-unitary +operations Am,θ introduced in the previous section with unitary operations while causing minimal change +to a circuit using these elements. +To make these statements formal, we first recall some definitions and useful standard facts about matrix +norms. +Definition 12. The Frobenius norm of a matrix M, denoted ∥M∥F , is defined by +∥M∥F = +� +tr[M ∗M] +(61) +Definition 13. The infinity (or operator) norm of a matrix M, denoted ∥M∥∞, is defined by +∥M∥∞ = +max +|ψ⟩:∥|ψ⟩∥=1 ∥M |ψ⟩∥, +(62) +where ∥|ψ⟩∥ denotes the regular Euclidean norm of any vector |ψ⟩. +Fact 14. For any matrix M, the Frobenius norm upper bounds the operator norm +∥M∥∞ ≤ ∥M∥F . +(63) +Proof. For an arbitrary matrix M, let λ1, ..., λd denote the eigenvalues of M ∗M, with λ1 ≥ λ2 ≥ ...λd. Note +all λi are positive. Then we have +∥M∥2 +∞ = λ1 ≤ +d +� +i=1 +λi = ∥M∥2 +F +(64) +as desired. +13 + +|+⟩ +• +A† +θ,m +⟨x1| +|+⟩ +• +exp(iθXxm) +⟨x1| +|+⟩ +• +⟨x2| +|+⟩ +• +exp(iθXx1) +⟨x2| +... += +... +|+⟩ +• +⟨xm| +|+⟩ +• +exp(iθXxm−1) +⟨xm| +|ψ⟩ +|ψ⟩ +|+⟩ +• +⟨x1| +|+⟩ +• +⟨x2| += +... +|+⟩ +• +⟨xm| +|ψ⟩ +exp +� +iθX �m +j=1 xj +� +|+⟩ +• +exp(iθXx1) +⟨x1| +|+⟩ +• +exp(iθXx2) +⟨x2| += +... +|+⟩ +• +exp(iθXxm) +⟨xm| +|ψ⟩ +|+⟩ +• +A† +θ +⟨x1| +|+⟩ +• +A† +θ +⟨x2| += +... +|+⟩ +• +A† +θ +⟨xm| +|ψ⟩ +Figure 7: Diagrammatic proof of Lemma 10. |ψ⟩ is an arbitrary single qubit state. The equivalence between +lines is explained in the proof of the lemma. +14 + +Fact 15. Given matrices A1, A2, ...As and B1, B2, ..., Bs with +∥Ai − Bi∥∞ ≤ ǫ, +(65) +∥Ai∥ ≤ 1 +(66) +for all i ∈ [s], and +sǫ < 1, +(67) +we also have +������ +� +i∈[s] +Ai − +� +i∈[s] +Bi +������ +∞ +≤ 2sǫ. +(68) +Proof. First note that ∥M∥∞ is equal to the largest singular value of the matrix M, from which it follows +that +∥M ⊗ N∥∞ = ∥M∥∞∥N∥∞ +(69) +for any matrices M and N. Then an inductive argument gives +����� +s +� +i=1 +Ai − +s +� +i=1 +Ai +����� +∞ += +����� +� +i=1s +Ai − B1 +s +� +i=2 +Ai + B1 +s +� +i=2 +Ai − +s +� +i=1 +Bi +����� +∞ +(70) +≤ +�����(A1 − B1) +s +� +i=2 +Ai +����� + +�����B1 ⊗ +� s +� +i=2 +Ai − +s +� +i=2 +Bi +������ +(71) +≤ ǫ + (1 + ǫ) +����� +s +� +i=2 +Ai − +s +� +i=2 +Bi +����� +(72) += ǫ + (1 + ǫ)(2ǫ(s − 1)) ≤ 2sǫ +(73) +as desired. +Fact 16. Given two states |ρ⟩ and |σ⟩, let p(x) and q(x) denote the resulting classical distributions when +|ρ⟩ and |σ⟩ are measured in some basis {|x⟩}. Then we have +� +x +|p(x) − q(x)| ≤ 4∥|ρ⟩ − |σ⟩∥ +(74) +Proof. First, we note that for any two states |ρ⟩ and |σ⟩ and PSD matrix M ≤ I we have +2∥|ρ⟩ − |σ⟩∥ ≥ 2∥M(|ρ⟩ − |σ⟩)∥ +(75) +≥ 2 (∥M |ρ⟩∥ − ∥M |σ⟩∥) +(76) +≥ (∥M |ρ⟩∥ − ∥M |σ⟩∥) (∥M |ρ⟩∥ + ∥M |σ⟩∥) +(77) += ∥M |ρ⟩∥2 − ∥M |σ⟩∥2 +(78) +Then defining probability distributions p(x) and q(x) and the basis {|x⟩} as above, let +Px := {x : p(x) ≥ q(x)} +(79) +and +Mx = +� +x∈Px +|x⟩⟨x| . +(80) +15 + +Then note +∥Mx |ρ⟩∥2 − ∥Mx |σ⟩∥2 = +� +x∈Px +|⟨x|ρ⟩|2 − |⟨x|σ⟩|2 +(81) += +� +x∈Px +(p(x) − q(x)) +(82) += 1 +2 +� +x +|p(x) − q(x)| +(83) +with the final inequality holding because both p(x) and q(x) must sum to one. Combining the two inequalities +above proves the result. +Next, we recall the definition of the matrix Am,θ in terms of its action on computational basis states. +Am,θ |x1x2...xm⟩ := exp(iθXxm) |x1⟩ ⊗ exp(iθXx1) |x2⟩ ⊗ ... ⊗ exp(iθXxm−1) |xm⟩ . +(84) +The matrix Am,θ would be a unitary matrix iff it mapped computational basis states to some set of orthonor- +mal basis states.7 The following lemma shows that this condition is close to being satisfied. In what follows, +for any bitstring x = x1x2...xm ∈ {0, 1}m we let x denote the bitwise compliment of x. We also interpret all +subscripts in the remainder of this section mod m so, in particular, x0 = xm. +Lemma 17. For any θ ∈ R, m ∈ Z+ and x = x1x2...xm ∈ {0, 1}m the matrix Aθ,m satisfies the following +properties: +1. ⟨x|A† +θ,mAθ,m|x⟩ = 1. +2. ⟨x|A† +θ,mAθ,m|x⟩ = −im+2|x| sinm(θ). +3. ⟨y|A† +θ,mAθ,m|x⟩ = 0 for any y ∈ {0, 1}m\{x, x}. +Proof. The proof of Items 1 and 2 are purely computational. For any x = x1x2...xm ∈ {0, 1}m we have +⟨x| A† +m,θAm,θ |x⟩ = +� +j∈[m] +⟨xj| exp(−iθxj−1) exp(iθxj−1) |xj⟩ +(85) += +� +j∈[m] +⟨xj|xj⟩ = 1, +(86) +proving Item 1. A similar calculation gives +⟨x|A† +m,θAm,θ|x⟩ = +� +j∈[m] +⟨xj|exp(−iθXxj) exp(iθXxj)|xj⟩ +(87) += +� +j∈[m] +⟨xj|exp +� +i1+2xjθX +� +|xj⟩ +(88) += +� +j∈[m] +⟨xj|cos(θ) + i1+2xj sin(θ)X|xj⟩ +(89) += +� +j∈[m] +i1+2xj sin(θ) +(90) += im+2|x| sinm(θ) +(91) += −im+2|x| sinm(θ), +(92) +where we used that X |xj⟩ = |xj⟩ by definition of the compliment on the fourth line and that |x| + |x| = m +for any x in the final line. This proves Item 2. +7More generally it is unitary iff it maps any set of orthonormal basis states to some other orthornomal basis. +16 + +To prove Item 3 note that for any m bit strings x and y with x /∈ {y, y} there exists a k ∈ [m] with +xk−1 = yk−1 and xk ̸= yk. Fixing k to be that value we find: +⟨y|A† +m,θAm,θ|x⟩ = +m +� +j=1 +⟨xj|exp(−iθXyj−1) exp(iθXxj−1)|yj⟩ +(93) += ⟨yk|exp(iθX(xk − yk))|xk⟩ × +� +j∈[m]\{k} +⟨yj|exp(iθX(xj−1 − yj−1))|xj⟩ +(94) += ⟨yk|xk⟩ × +� +j∈[m]\{k} +⟨yj|exp(iθX(xj−1 − yj−1))|xj⟩ +(95) += 0 +(96) +since yk ̸= xk by definition. This competes the proof of Item 3. +We show that, as a consequence of Lemma 17, there exists an m qubit unitary matrix which is close +(in Frobenius norm) to the non-unitary matrix Aθ,m. We construct this unitary by applying Gram-Schmidt +orthnomalization applied to the state’s output by Am,θ acting on computational basis states. +Lemma 18. For any m, there exists unitary matrices Um,θ satisfying +∥Am,θ − Um,θ∥F ∈ O +� +θ−m� +(97) +as θ → 0. +Proof. We will define Um,θ by its action on computational basis states. First, fix Bm to be any set containing +half the bit strings of length m with the property that for any x ∈ {0, 1}m either x ∈ Bm or x ∈ Bm. (That +is, Bm contains one representative element from the equivalence classes of the set {0, 1}m induced by the +equivalence relation x ∼ y if x = y or x = y). Then define: +Um,θ |x⟩ := +� +Am,θ |x⟩ +if x ∈ Bm +C−1 � +Am,θ |x⟩ + im+2|x| sinm(θ)Am,θ |x⟩ +� +otherwise. +(98) +with C := +� +1 − sin2m(θ) a normalizing constant. Observe that, by Item 2 of Lemma 17, for x /∈ Bm we can +also write +Um,θ |x⟩ = C−1 � +Am,θ |x⟩ − ⟨x|A† +m,θAm,θ|x⟩ Am,θ |x⟩ +� +(99) +and +C = +� +1 − +��� ⟨x|A† +m,θAm,θ|x⟩ +��� +2�1/2 +. +(100) +We now prove that Um,θ is unitary. To do this, we prove Um,θ maps computational basis states to an +orthonormal basis. First note that Item 1 of Lemma 17 gives that for any x ∈ Bm: +⟨x|U † +m,θUm,θ|x⟩ = ⟨x|A† +m,θAm,θ|x⟩ = 1 +(101) +while a similar calculation gives for any x /∈ Bm: +⟨x|U † +m,θUm,θ|x⟩ = C−2 � +⟨x| A† +m,θ − ⟨x|A† +m,θAm,θ|x⟩† ⟨x| A† +m,θ +� � +Am,θ |x⟩ − ⟨x|A† +m,θAm,θ|x⟩ Am,θ |x⟩ +� +(102) += C−2 +� +1 − +��� ⟨x|A† +m,θAm,θ|x⟩ +��� +2� += 1. +(103) +17 + +Where we used Equations (99) and (100) on the first and second lines, respectively. Then we see the states +{Um,θ |x⟩} for x ∈ {0, 1}m acting on computational basis states are correctly normalized. +It remains to show that these states are orthogonal. First, we note that Item 3 of Lemma 17 gives that +for any x, y ∈ {0, 1}m with y /∈ {x, x} we have +⟨y|A† +θ,mAθ,m|x⟩ = ⟨y|A† +θ,mAθ,m|x⟩ = ⟨y|A† +θ,mAθ,m|x⟩ = ⟨y|A† +θ,mAθ,m|x⟩ = 0 +(104) +and then a quick proof by cases shows that ⟨y|U † +θ,mUθ,m|x⟩ = 0 for any x ∈ {0, 1}m and y /∈ {x, x}. Finally, +we consider the inner product ⟨x|U † +θ,mUθ,m|x⟩. By definition of Bm, exactly one of x or x is in Bm. Assume +for the moment that x /∈ Bm. Then using Equation (99) we have +⟨x|A† +θ,mAθ,m|x⟩ = C−1 � +⟨x| A† +m,θ +� � +Am,θ |x⟩ − ⟨x|A† +m,θAm,θ|x⟩ Am,θ |x⟩ +� +(105) += C−1 � +⟨x|A† +m,θAm,θ|x⟩ − ⟨x|A† +m,θAm,θ|x⟩ ⟨x|A† +m,θAm,θ|x⟩ +� +(106) += C−1 � +⟨x|A† +m,θAm,θ|x⟩ − ⟨x|A† +m,θAm,θ|x⟩ +� += 0 +(107) +as desired. We conclude Um,θ is unitary. +Finally, to show Um,θ is close to Am,θ we compute +∥Am,θ − Um,θ∥2 +F = +� +x∈{0,1}m +|(Am,θ − Um,θ) |x⟩|2 +(108) += +� +x∈Bm +��� +� +1 − C−1� +Am,θ |x⟩ − im+2|x|C−1 sinm(θ)Am,θ |x⟩ +��� +2 +(109) +≤ +� +x∈Bm +� +1 − C−1�2 + C−2 sin2m(θ) +(110) +≤ 2m/2 +�sin4m(θ) +2 ++ +sin2m(θ) +1 − sin2m(θ) +� +∈ O +� +θ2m� +(111) +where the final big O approximation holds for any fixed m as θ → 0. Taking a square root then completes +the proof. +Finally, we are in a position to describe the fully unitary (X, majmodp(X) ⊕ parity(X)) sampling circuit. +Theorem 19. For n sufficiently large and p = nc for any constant c ∈ (0, 1/2) there is a constant-depth +circuit consisting of one and two qubit unitary gates and Um′,θ′ gates with m′ = ⌈c−1+1⌉ and θ′ = π/p which +takes an n qubit GHZ state as input and produces an output which, when measured in the computational basis, +produces an output distribution (X′, Y ) with +∆((X′, Y ), (X, majmodp(X) ⊕ parity(X))) ≤ 1 +2 − 1 +π + O(1/p). +(112) +Proof. For convenience, we assume n = Dm′ + 1 for some constant D. This circuit consists of a Hadamard +gate applied to each qubit of the GHZ state, followed by U † +m′,θ′ gates applied to all qubits except the final +qubit and an exp(−iπX/4) rotation applied to the final qubit. Figure 8 illustrates this circuit. Note the +quantum state produced by this circuit pre-measurement is +�� +U † +θ′,m′ +�⊗D +⊗ exp(−iπX/4) +� +H⊗n |ψ⟩ . +(113) +To prove this circuit samples from the correct distribution first note that Lemma 18 and Fact 14 give +that +��Uπ/p,m − Aπ/p,m +�� +∞ ∈ O(θ′m) = O(n−mc) ≤ O(n−(1+c)) +(114) +18 + +H +U † +m′,θ′ +✌✌✌ +... +... +H +✌✌✌ +H +U † +m′,θ′ +✌✌✌ +... +... +H +✌✌✌ +... +H +U † +m′,θ′ +✌✌✌ +... +... +H +✌✌✌ +H +exp(−iπX/4) +✌✌✌ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +|GHZn⟩ +Figure 8: +Constant-depth fully unitary circuit producing approximate samples from the distribution +(majmodp(X) ⊕ parity(X), X). +Here p = nc for some constant c ∈ (0, 1], θ′ = π/p, m′ = +� +c−1 + 1 +� +and n = Dm′ + 1 for some large integer D. +Them, Fact 15 gives that +���� +�� +U † +θ′,m′ +�⊗D +⊗ exp(−iπX/4) +� +H⊗n − +�� +A† +π/p,m +�⊗D +⊗ exp(−iπX/4) +� +H⊗n +���� +∞ +∈ O(Dn−(1+c)) +(115) +≤ O(n−c). +(116) +Combining this observation with Fact 16 and the definition of the operator norm ∥∥∞ gives that the classical +distributions resulting from computation basis measurements of the states +�� +U † +θ′,m′ +�⊗D +⊗ exp(−iπX/4) +� +H⊗n |ψ⟩ . +(117) +and +�� +A† +π/p,m +�⊗D +⊗ exp(−iπX/4) +� +H⊗n |ψ⟩ +(118) +are O(n−c) in total variation distance away from each other. Then Corollary 11, together with the fact that +O(p3/2e−n/p2) ≤ O(1/p) since p = n−c for c < 1/2 completes the proof. +3 +Classical Hardness of sampling (X, majmodp(X) ⊕ parity(X)) +In this section we prove the classical hardness of sampling from the distribution (X, majmodp(X)⊕parity(X)) +for each prime number p, where X is sampled from the uniform distribution over {0, 1}n. Recall that the +total variation distance distributions D1, D2 over {0, 1}m is +∆(D1, D2) := +max +T ⊆{0,1}m +���� Pr[D1 ∈ T] − Pr[D2 ∈ T] +���� +(119) +By the definition of ∆, each set T ⊆ {0, 1}m, witnesses a lower bound on ∆(D1, D2) of +�� Pr[D1 ∈ T] − +Pr[D2 ∈ T] +��. To prove a lower bound on ∆(D1, D2), we construct a particular T ∈ {0, 1}m and refer to it as +our statistical test, and we say that Di “passes” the statistical test with probability Pr[Di ∈ T]. +19 + +We are interested in the total variation distance between the true distribution D = (X, majmodp(X) ⊕ +parity(X)), and the output distribution of some local function f : {0, 1}ℓ → {0, 1}n+1 that takes a uniformly +random ℓ-bit string U as input. That is, we aim to lower bound ∆(f(U), D). We prove such a lower bound +in the following theorem. +Theorem 20. For all δ < 1 there exists an ǫ > 0 such that for all sufficiently large n and prime number +p = Θ(nα) for α ∈ (δ/3, 1/3): Let f : {0, 1}ℓ → {0, 1}n+1 be an ǫ log(n)-local function, with ℓ ≤ n + nδ. +Then ∆(f(U), (X, majmodp(X) ⊕ parity(X))) ≥ 1/2 − O(1/ log n) +Proof. This proof follows closely to the analogous proof for (X, majmodp(X)) in [17], with similar notation. +Let d be the locality of f, d = ǫ log(n). We start by permuting the outputs, as shown in [17]. Note that +◦ denotes concatenation. +Lemma 21 ([17]). There exists a partition of the input u ∈ {0, 1}ℓ into u = (x, y), and permutation of the +output bits such that +f(x, y) = g1(x1, y) ◦ g2(x1, y) ◦ · · · ◦ gs(xs, y) ◦ h(y). +(120) +With gi : {0, 1} × {0, 1}ℓ−s → {0, 1}|Bi|, |Bi| ≤ O(d) and s ≥ Ω(n/d2). +We will refer to each gi(xi, y) as the ith block of the output, indexed by Bi ⊆ [n + 1] in the initial +permutation, for i ∈ [s]. Note that if we fix y, each block is independent, and block i ∈ [s] only depends on +xi. We say that gi is y-fixed for some y ∈ {0, 1}ℓ−s if gi(0, y) = gi(1, y). +Without loss of generality, and for simplicity of notation, let’s assume that the last output bit does not +get permuted, so f(x, y)n+1 is still the output bit which should (ideally) correspond to majmodp ⊕ parity of +the first n outputs, and that it only depends on y. Next we define our statistical test. +Statistical Test: +Let N0 := 3n3α, NF := 2n3α, we define our statistical test as T := T0 ∪ TF ∪ TS, with +T0 := {z ∈ {0, 1}n+1 : zBi = 0|Bi| for ≤ N0 blocks i ∈ [s]} +(121) +TF := {z ∈ {0, 1}n+1 : ∃(x, y) : f(x, y) = z and ≥ NF blocks gi(xi, y) are y-fixed} +(122) +TS := {(z′, b) ∈ {0, 1}n × {0, 1} : b ̸= majmodp(z′) ⊕ parity(z′)} +(“incorrect strings”) +(123) +We will show that f(U) passes the statistical test (f(U) ∈ T ) with probability at least 1/2 − O(1/ log n) +and (X, majmodp(X) ⊕ parity(X)) passes with probability at most 1/n. +Since both of the functions majmodp and parity only depend on the Hamming weight of their input, +it is useful to define MMp and PAR as functions over integers, such that majmodp(z) = MMp(|z|) and +parity(z) = PAR(|z|) for any z ∈ {0, 1}n, where we use | · | to denote Hamming weight |z| = �n +i=1 zi. +MMp(j) := +� +0 +if j < p/2 +mod p +1 +if j > p/2 +mod p , +PAR(j) := j +mod 2, +for j ∈ Z. +(124) +Upon fixing y, the Hamming weight |f(x, y)|1:n is a sum of independent random variables |gi(xi, y)| which +take on at most 2 different values. The following Fact, Corollary, and Lemma will be useful in analyzing this +independent sum of random variables in the context of the majmodp ⊕ parity function. +Fact 22 (Fact 3.2 in [17]). Let a1, a2, . . . at be nonzero integers modulo p, and let (x1, x2, . . . , xt) ∈ {0, 1}n +be sampled uniformly. Then the total variation distance between �t +i=1 aixi mod p and Up, the uniform +distribution over {0, 1, . . ., p − 1} is at most √pe−t/p2 +Corollary 23. For each prime p = Θ(nα) with α < 1, t = Ω(p3), a0, a1, . . . at nonzero integers modulo p, +and A ⊆ {0, 1, . . .p − 1} +|A| +p − O(1/n) ≤ +Pr +x∈{0,1}t +� +a0 + +t +� +i=1 +aixi ∈ A +� +≤ |A| +p + O(1/n) +(125) +20 + +Proof. By the definition of total variation distance, it is sufficient to prove that ∆(Up, a0 + �t +i=1 aixi) ≤ +O(1/n). +∆(Up, a0 + +t +� +i=1 +aixi) ≤ √pe−t/p2 = √pe−Ω(p) = Θ(nα/2)e−Ω(nα) ≤ O(1/n). +(126) +Lemma 24. For each α ∈ (0, 1), and prime number p = Θ(nα), define the sums S = a0 + �t +i=1 aixi and +U = u0 + �t +i=1 uixi. Also let t = Ω(p3) and a0, a1, . . . , at and u0, u1, . . . , ut be integers with 0 < ai ≤ +O(p/ log n) for each i ∈ [t]. Then +Pr +x [MMp(S) ⊕ PAR(U) = b] ≥ 1 +2 − O(1/ log n). +(127) +Proof. Let’s consider the case that at least 1/2 of the ui for i ∈ [t] are even. +Then we arbitrarily fix +all xi such that ui is odd, and let E = {i ∈ [t] : ui even}. +Note that now the parity is fixed to c := +PAR(u0 + � +i∈[t]\E uixi). Let a′ +i = aEi for each i ∈ {1, 2, . . ., |E|}, and a′ +0 = a0 + � +i/∈E ai. +Pr +xE [MMp(S) ⊕ PAR(U) = b] = +Pr +r∈{0,1}|E| + +majmodp(a′ +0 + +|E| +� +i=1 +a′ +iri) ⊕ c = b + + +(128) += Pr +r + +a′ +0 + +|E| +� +i=1 +a′ +iri ∈ Mc⊕b + + +(129) +Where M0 = {0, 1, . . ., (p − 1)/2} and M1 = {(p + 1)/2, . . ., p − 2, p − 1}. Since |M0| = (p + 1)/2, |M1| = +(p − 1)/2, and |E| = Θ(nα), it follows from Corollary 23 that +Pr +xE [MMp(S) ⊕ PAR(U) = b] ≥ (p − 1)/2p − O(1/n) = 1/2 − O(1/nα). +(130) +All that’s left is to consider the case where more than half of the ui for i ∈ [t] are odd. In this case we will +fix xi for each i ∈ [t] with ui even, setting a′ +0 := a0 + � +i∈E Si, and u′ +0 = u0 + � +i∈E ui. We denote the set +of indices of such “odd” elements as O = {i ∈ [t] : ui odd}, and set a′ +i = aOi and u′ +i = uOi for each i ∈ [|O|]. +Note that since each u′ +i is odd, we have PAR(u′ +0 + � +i≤t u′ +iri) = u′ +0 + (parity(r1, . . . , r|O|)) mod 2, which is +denoted as parity(r) ⊕ u′ +0. +Pr +xO [MMp(S) ⊕ PAR(U) = b] = +Pr +r∈{0,1}|O| + +majmodp +� +a′ +0+ +� +i≤t +a′ +iri +� +⊕ parity(r) = b ⊕ u′ +0 + + +(131) +=1 +2 Pr +r + +MMp +� +a′ +0+ +� +i≤t +a′ +iri +� += b ⊕ u′ +0 +����parity(r) = 0 + + +(132) ++ 1 +2 Pr +r + +MMp +� +a′ +0 + +� +i≤t +a′ +iri +� +̸= b ⊕ u′ +0 +����parity(r) = 1 + + +(133) +Sampling a uniformly random t bit string z1z2 . . . zt with even Hamming weight is equivalent to sampling +the first t − 1 bits uniformly at random, and setting the last bit to zt = parity(z1, . . . , zt−1). So the equation +above is equal to +=1 +2 +Pr +r1,...rt−1 + +majmodp +� +a′ +0+ +|O|−1 +� +i=1 +a′ +iri + a′ +t · parity(r1, . . . , rt−1) +� += b ⊕ u′ +0 + + +(134) ++ 1 +2 +Pr +r1,...rt−1 + +majmodp +� +a′ +0 + +|O|−1 +� +i=1 +a′ +iri + a′ +t · parity(1, r1, . . . , rt−1) +� +̸= b ⊕ u′ +0 + + . +(135) +21 + +For any positive integers z1, z2, l, r such that l < r and r − l − z2 ≥ 0, if Z2 is a positive random variable +such that Z2 ≤ z2, then Pr[z1 + Z2 ∈ [l, r]] ≥ Pr[z1 ∈ [s, t − z2]]. Therefore, with all addition done modulo +p, we lower bound the above expression as +≥1 +2 Pr + +a′ +0 + +|O|−1 +� +i=1 +a′ +iri ∈ [0, p/2 − a′ +|O|) + + + 1 +2 Pr + +a′ +0 + +|O|−1 +� +i=1 +a′ +iri ∈ (p/2, p − 1 − a′ +|O|] + + +(136) +≥ 1 +2p((p + 1)/2 − a′ +|O| + (p − 1)/2 − a′ +|O|) − O(1/n) +(137) +=1 +2 − +a′ +|O| +2p − O(1/n) +(138) +=1 +2 − O(p/ log n) +2p +− O(1/n) ≥ 1 +2 − O(1/ log n). +(139) +Where we used Corollary 23, and the Lemma assumption that 0 < ai ≤ p/ log n for each i ∈ [t] and +p = Θ(nα). +We are now ready to prove the following claims. +Claim 25. Pr[f(U) ∈ T] ≥ 1/2 − O(1/ log n) +Proof. We will show that for each y, Prx[f(x, y) ∈ T ] ≥ 1/2 − 1/ log n. Suppose we fix y arbitrarily. +If y fixes at least NF , blocks gi(xi, y), then Prx[f(x, y) ∈ TF ] = 1. Moreover, if there are ≤ N0 blocks +gi such that gi(xi, y) = 0|Bi| for some xi ∈ {0, 1}, then for each x, there will also be ≤ N0 blocks with +gi(xi, y) = 0|Bi|, so Prx[f(x, y) ∈ T0] = 1. +Therefore, we assume that there are < NF blocks gi that are y-fixed, and > N0 blocks with gi(xi, y) = 0|Bi| +for some x ∈ {0, 1}s. Thus, there are more than N0 − NF = n3α blocks gi such that for some xi ∈ {0, 1}, +gi(xi, y) = 0|Bi| and gi(1 − xi, y) ̸= 0|Bi|. Let J ⊆ [s] denote this subset of blocks, with |J| ≥ n3α. We +arbitrarily fix the xi for i ∈ [s] \ J. Now, the total Hamming weight of the first n bits of f(x, y) (denoted as +|f(x, y)1:n|) only depends on the xi for i ∈ J. +Let Si denote the Hamming weight of the ith block for each i ∈ [s]. Note that for each i ∈ J, Si = 0 +with probability 1/2, and Si is some positive integer modulo p, with probability 1/2, since |Bi| ≤ O(d) = +O(ǫ log n) < p. Moreover, for each i ∈ [s] \ J, Si is fixed. Therefore, +|f(x, y)1:n| = a + +� +j∈J +|gi(xi, y)| = a + +� +i∈J +Si +(140) +for some positive integer a that does not depend on {xi}i∈J. +Since the last bit b := f(x, y)n+1 is fixed, the correctness of the output is determined by the majmodp +and parity of f(x, y)1:n. We have that f(x, y) ∈ TS ⇐⇒ MMp(a + � +i∈J Si) ⊕ PAR(a + � +i∈J Si) ̸= b. Note +that we can write a + � +i∈J Si = a + � +i≤|J| airi for some uniformly random r ∈ {0, 1}|J|, and for each ai a +fixed positive integer mod p. Therefore, +Pr +xJ [f(x, y) ∈ TS] = +Pr +r∈{0,1}|J|[majmodp(a + +|J| +� +i=1 +airi) ⊕ PAR(a + +|J| +� +i=1 +airi) ̸= b]. +(141) +Furthermore, each ai is at most O(d) = O(ǫ log n) since |Bj| ≤ O(d) for each j ∈ [s]. By Lemma 24, +it immediately follows that PrxJ[f(x, y) ∈ TS] ≥ 1 +2 − O(1/ log n). In conclusion, we’ve showed that after +arbitrarily fixing y, Prx[f(x, y) ∈ T ] ≥ 1 +2 − O(1/ log n). Therefore, Prx,y[f(x, y) ∈ T ] ≥ 1 +2 − O(1/ log n), as +desired. +Claim 26. Pr +� +(X, majmodp(X) ⊕ parity(X)) ∈ T +� +≤ O(1/n) +22 + +Proof. This proof follows that of Claim 3.3 in [17]. Let D := (X, majmodp(X) ⊕ parity(X)). By the union +bound Pr[D ∈ T] ≤ Pr[D ∈ T0] + Pr[D ∈ TF ] + Pr[D ∈ TS]. Clearly Pr[D ∈ TS] = 0, since TS is the set of +invalid strings. Therefore, it is sufficient for us to show that Pr[D ∈ TF ], Pr[D ∈ T0] ≤ +1 +2n. +Pr[D ∈ TF ] = |TF|/2n, so it is sufficient to upper bound |TF|. Recall that z ∈ TF if z = f(x, y) for some +x, y such that at least NF blocks are y-fixed. Thus each z ∈ TF is characterized by y, and the bits of x that +do not belong to fixed blocks. That is, we need at most ℓ − NF bits to characterize z. Since ℓ ≤ n + nδ and +NF = 2n3α, +|TF | ≤ 2n+nδ−2n3α +(142) +≤ 2n−n3α +(143) +since δ < 3α. So +Pr[D ∈ TF] ≤ 2−n3α ≤ 1 +2n. +(144) +All that’s left is to bound Pr[D ∈ T0], the probability that at most N0 = 3n3α blocks i are all zero, DBi = 0|Bi|. +Since the first n bits of D are independently random, the probability that the block DBi is all zero is +independent of other blocks DBj for i ̸= j ∈ [s]. The probability that block i ∈ [s] is all zero is +Pr +� +DBi = 0|Bi|� += (1/2)|Bi| ≥ (1/2)O(d) = (1/2)O(ǫ log n) = +� 1 +n +�O(ǫ) +. +(145) +Now, the probability that at most N0 = 3n3α are all zero is +Pr[D ∈ T0] = Pr + + +� +T ⊆[s]: +|T |=N0 +{DBi ̸= 0|Bi| for each i ∈ [s] \ T } + + +(146) +≤ +� s +N0 +� � +1 − +1 +nO(ǫ) +�s−N0 +(147) +≤ +� s +N0 +� +e− s−N0 +nO(ǫ) . +(148) +Since s ≥ Ω(N/d2) = Ω( +n +ǫ2 log2 n), s ≤ n and N0 = 3n3α, +≤ +� n +3n3α +� +e−n−O(ǫ)( +n +ǫ2 log2 n −3n3α) +(149) +≤ +� +n +3n3α +�3n3α +e−n1−O(ǫ)/ log2 ne3n3α +(150) +≤ n3n3αe−n1−O(ǫ)/ log2 n +(151) +≤ 1 +2n +(152) +for sufficiently large n and small ǫ. In conclusion, Pr[D ∈ T] ≤ 1 +n, as desired. +Using Claims 25 and 26, we can lower bound the total variation distance between the target distribution +D = (X, majmodp(X) ⊕ parity(X)) and f(U). +∆(D, f(U)) ≥ |Pr[f(U) ∈ T] − Pr[D ∈ T]| +(153) +≥ 1 +2 − O(1/ log n), +(154) +completing the proof of Theorem 20. +23 + +4 +Removing the GHZ State from QNC0 Circuits +In this section we define sampling tasks related to the (X, majmodp(X)⊕parity(X)) sampling task considered +in Section 2.2, but which can be performed (approximately) by a constant-depth quantum circuit without +access to a GHZ input state. At a high level, the approach we use to construct these tasks mirrors the +approach used in [19] to find a relational problem which can be solved by a QNC0 circuit without access to +a GHZ state. First, we review “Poor Man’s GHZ States”: GHZ-like states which (unlike the GHZ state) +can be constructed by QNC0 circuits. Then we modify the circuit constructed in Section 2.2 by replacing +the GHZ input state with a circuit constructing a poor man’s GHZ state. Finally, we define a new sampling +task based on the output of these modified circuits. +4.1 +Review of Poor Man’s GHZ States +Definition 27. For any integer n let Bn be the balanced binary tree on n vertices. Label its edges e1, ..., en−1 +and vertices v0, ..., vn−1 (note the vertex labels start at 0), with v0 the root of T . For every non-root vertex +vi ∈ {v1, ..., vn−1} define P(vi) to be the set of edges contained in the (unique) path going from v0 to vi. +Finally, define the function h(d) : {0, 1}n−1 → {0, 1}n−1 by +h(d)i = +� +j: ej∈P (vi) +dj +i ∈ {1, 2, . . ., n − 1}. +(155) +That is, thinking of the bitstring d as assigning values to the edges of Bn, h(d) assigns a value to every +non-root vertex vi of Bn equal to the parity of the edge values going from v0 to vi. +Definition 28. Define the (binary tree) Poor Man’s GHZ state: +|PMn⟩ = +� +d∈{0,1}n−1 +1 +2(n−1)/2 |d⟩ ⊗ 1 +√ +2 +����h(d)0 +� ++ +���h(d)1 +�� +(156) +We call the first n − 1 qubits of |PMn⟩ “edge” qubits, and the last n qubits “vertex” qubits. Note that the n +in |PMn⟩ gives the number of vertex qubits in the state, not the total number of qubits. +Intuitively, it is occasionally helpful to think of the n vertex qubits of the state |PMn⟩ as being in an +“almost-GHZ state”, or a GHZ state with additional Pauli X type “error” terms specified by the edge qubits. +To explain this intuition, not that we can also write the state |PMn⟩ as +|PMn⟩ = +1 +2(d−1)/2 +� +d∈{0,1}n−1 +� +|d⟩ ⊗ +��n−1 +� +i=1 +Xh(d)i +� +⊗ I2 +� +|GHZn⟩ +� +(157) +We will make use of Equation (157) when working with the state |PMn⟩ later in this section. +Theorem 29. For any n, the state |PMn⟩ can be constructed by a depth-3 circuit consisting of 1 and 2 qubit +gates acting on 2n − 1 qubits. +Proof. This state can be constructed by following the procedure outlined in Theorem 17 of [19], but omitting +the measurement of the edge qubits. We recap this procedure here. +Begin with 2n − 1 qubits, n of which we identify with the vertices v0, ..., vn−1 of the tree Bn and n − 1 +of which we identify with edges e1, ...en−1 of the same tree. Apply a Hadamard gate to each vertex qubit. +Then, for every pair of vertices vi and vj connected by an edge ek, apply CNOT gates with controls on +vertex qubits vi and vj and target on the edge qubit ek. Order the edge qubits as in the tree Bn; these form +the first n − 1 qubits of |PMn⟩. Order the vertex qubits v1...vn−1v0 (note the qubit identified with the root +vertex comes last in this ordering); these form remaining n qubits of the state |PM(n)⟩. +To see that this circuit produces the correct state first observe that after the Hadamard gates are applied +and before the CNOT gates are applied, the vertex qubits are in a uniform superposition over all computa- +tional basis states. We order the vertex qubits as in the state |PMn⟩, so the final vertex qubit is associated +with the root vertex of the graph Bn. It is then straightforward to check that, for any n − 1 bit string +24 + +x = x1...xn−1, if the vertex qubits are in state |x0⟩ then applying the CNOT gates puts the edge qubits in +the state h−1(x). Similarly, if vertex qubits are in the state |x1⟩, applying the CNOT gates puts the edge +qubits in the state h−1(x). Then we can write the state produced by our circuit as +1 +2n/2 + + +� +x∈{0,1}n−1 +��h−1(x) +� +⊗ |x0⟩ + +� +x∈{0,1}n−1 +��h−1(x) +� +⊗ |x1⟩ + + +(158) += +1 +2n/2 + + +� +d∈{0,1}n−1 +|d⟩ ⊗ |h(d)0⟩ + +� +d∈{0,1}n−1 +|d⟩ ⊗ +���h(d)1 +� + + +(159) += +1 +2(n−1)/2 + + +� +d∈{0,1}n−1 +|d⟩ ⊗ +� 1 +√ +2 |h(d)0⟩ + +���h(d)1 +�� + = |PMn⟩ +(160) +where we used on the second line that the function h was one-to-one. +Finally, we show this circuit can be implemented in depth 3. Consider the 2n − 1 vertex graph obtained +from Bn by bifurcating each edge of Bn – that is, replacing each edge of Bn connecting vertices vi and vj +with a new vertex connected to both vi and vj. This graph is still a tree, hence 2-colorable, and edges of +this graph are in one-to-one correspondence with CNOT gates which need to be implemented in the circuit +described above. All CNOT gates in the same color class touch disjoint qubits and be applied simultaneously, +so we see all CNOT gates can be applied in depth 2. Adding the layer of Hadamard gates required as the +first step shows this whole circuit can be implemented in depth 3. +4.2 +Sampling with QNC0 Circuits +We begin with a description of the distribution which we will show can be sampled from (approximately) by +a QNC0 circuit. Like the distributions considered in Section 2, it will be a distribution of the form (Z, f(Z)) +where Z is a uniformly random bitstring and f(Z) : {0, 1}n → {0, 1} is some function. However, the function +f considered here is substantially more complicated than the functions considered in Section 2. We define +this function next. +Definition 30. For any prime p define the function pmmajmodp : {0, 1}2n−2 → {0, 1} to act on a 2n − 2 +bit string z via the following procedure: +1. Associate the first n − 1 bits of z with edges of the complete binary tree Bn and the next n − 1 bits with +the non-root vertices v1...vn−1, following the same ordering as in Definition 27. Label bits associated +with edges d and the bits associated with vertices x. +2. For any integer a define +MMp(a) := +� +0 if a < p/2 +1 otherwise. +(161) +3. Set +pmmajmodp(z) = MMp +�n−1 +� +i=1 +xi(−1)h(d)i +� � +parity(x) +(162) +Now we construct a quantum circuit which samples approximately from the distribution (Z, pmmajmodp(Z)) +without requiring a GHZ state input. As in Section 2, we begin by describing a circuit that performs the +sampling task and involves single qubit non-unitary rotations Aθ. +25 + +Theorem 31. For any p ∈ Z+ there is a constant-depth circuit consisting of one and two qubit unitary gates +and Aθ operations which takes the (2n− 1)-qubit all zeros state as input and produces an output which, when +measured in the computational basis, produces an output distribution (Z′, Y ) with +∆((Z′, Y ), (Z, pmmajmodp(Z))) ≤ 1 +2 − 1 +π + 1 +2p + O(p3/2e−n/4p2). +(163) +Proof. The first step is preparing the state |PMn⟩, which can be done in constant depth by Theorem 29. +After that, the same non-unitary circuit as described in the proof of Theorem 7 is applied to the vertex +qubits of the poor man’s GHZ state. This is illustrated in Figure 9. +✌✌✌ +... +✌✌✌ +H +A† +π/p +✌✌✌ +... +H +A† +π/p +✌✌✌ +H +exp(−iπX/4) +✌✌✌ +❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ +❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +|PMn⟩ +Figure 9: +Constant-depth non-unitary circuit producing approximate samples from the distribution +(Y, pmmajmodp(Y )). +The upper box indicates the n − 1 “edge” qubits of the state |PMn⟩. +The lower +box indicates the n “vertex” qubits of the same state. +To see that this circuit approximately samples from the correct distribution we write the state |PMn⟩ +as a GHZ state with additional controlled X “error” terms, then commute those through the rest of circuit. +In the following argument we will need to pay close attention to the rotation angle θ in the non-unitary +operator Aθ. For this reason, for the remainder of this section only, we change notation and write Aθ as +A (θ). +The key observation is the operator identity +A (θ)† = A (−θ)† Z +(164) +which holds for any θ and can quickly be verified by checking the action of ZA (θ) and A (−θ) Z on |0⟩ and +|1⟩ basis states. Then (using Equation (157) as a starting point) we can write the pre-measurement state +produced by the circuit above as: +1 +2(d−1)/2 +� +d∈{0,1}n−1 + +I2n−1 ⊗ + + +n−1 +� +j=1 +A +�π +p +�† +H + + ⊗ exp +�−iπX +4 +� +H + + + +|d⟩ ⊗ + + + + +n−1 +� +j=1 +Xh(d)j + + ⊗ I2 + + |GHZn⟩ + + += +1 +2(d−1)/2 +� +d∈{0,1}n−1 + +I2n−1 ⊗ + + +n−1 +� +j=1 +A +�π +p +�† +HXh(d)j + + ⊗ exp +�−iπX +4 +� +H + + (|d⟩ ⊗ |GHZn⟩) +(165) += +1 +2(d−1)/2 +� +d∈{0,1}n−1 + +|d⟩ ⊗ + + + + +n−1 +� +j=1 +Zh(d)jA +� +(−1)h(d)j π +p +�† + + ⊗ exp +�−iπX +4 +� + H⊗n |GHZn⟩ + + . (166) +Where the rearrangement on the third line used the operator identity discussed above (Equation (164)). +26 + +From this it is clear that the measurement of the first n − 1 edge qubits produces a uniformly random +bitstring. We assume that such a measurement has been carried out, producing some bitstring d. Then, +following the same analysis as used in the proof of Theorem 7, we consider the (unnormalized) state of the +first vertex qubit when the first n − 1 vertex qubits have been measured and bitstring x = x1x2...xn−1 is +observed: +⟨x|1...n−1 + + +n−1 +� +j=1 +Zh(d)jA +� +(−1)h(d)j π +p +�† + + ⊗ exp +�−iπX +4 +� � +H⊗n |GHZn⟩ +� += (−1)|x| ⟨x|1...n−1 + + +n−1 +� +j=1 +A +� +(−1)h(d)j π +p +�† + + ⊗ exp +�−iπX +4 +� � +H⊗n |GHZn⟩ +� +(167) += (−1)|x|2−(n−1) exp + +iX + +−π +4 + π +p +n−1 +� +j=1 +� +xj(−1)h(d)j� + + + + |parity(x)⟩ , +(168) +where the final line followed from exactly the same series of identities as used in Equations (20) to (25). The +key features of this argument are illustrated in Figure 10, where we focus just on the analysis of the vertex +qubits when the edge qubits are measured and classical bitstring d is observed. +Next (still following the analysis used in Section 2.1) we note that the vector above has norm 2n−1 for +any string x, and hence the bitstring x observed when measuring the first n − 1 vertex qubits is uniformly +random. Additionally, we let Yd,x be the random variable representing the outcome measurement applied to +the final qubit of the circuit depicted in Figure 9, conditioned on the measurement of the previous 2n − 2 +qubits giving the bitstring (d, x). Straightforward calculation applied to Equation (168) gives +Pr[Yd,x = parity(x)] = cos2 +� +−π +4 + π +p +�� +i +xi(−1)h(d)i +�� +(169) +Then, small extension of Lemma 8 (proven next, in Lemma 32) gives +1 +22n−2 +� +(d,x)∈{0,1}2n−2 +Pr +� +Yd,x ̸= pmmajmodp(d, x) +� +≤ 1 +2 − 1 +π + 1 +2p + O(p3/2e−n/4p2). +(170) +Finally, we let D′, X′ be random variables representing the output of measuring the edge qubits and +first n − 1 vertex qubits of the circuit depicted in Figure 9, respectively. We have already shown that the +marginal distributions of D′ and X′ are uniformly random and so we find +∆((D′, X′, YD′,X′), (Z, pmmajmodp(Z))) ≤ 1 +2 − 1 +π + 1 +2p + O(p3/2e−n/4p2) +(171) +by exactly the same argument as used to finish the proof of Theorem 7. +Lemma 32. Define the random variable Yd,x as in the proof of Theorem 31, so +Pr[Yd,x = parity(x)] = cos2 +� +−π +4 + π +p +�� +i +xi(−1)h(d)i +�� +(172) +Then +1 +22n−2 +� +(d,x)∈{0,1}2n−2 +Pr +� +Yd,x ̸= pmmajmodp(d, x) +� +≤ 1 +2 − 1 +π + 1 +2p + O(p3/2e−n/4p2). +(173) +27 + +Xh(d)1 +H +A (π/p)† +✌✌✌ +x1 +... +Xh(d)n−1 +H +A (π/p)† +✌✌✌ +xn−1 +H +exp(−iπX/4) + + + + + + + + + + + + + + + + + +|GHZn⟩ += +H +Zh(d)1 +A (π/p)† +✌✌✌ +x1 +... +H +Zh(d)n−1 +A (π/p)† +✌✌✌ +xn−1 +H +exp(−iπX/4) + + + + + + + + + + + + + + + + + +|GHZn⟩ += +H +A +� +(−1)h(d)1π/p +�† +Zh(d)1 +✌✌✌ +x1 +... +H +A +� +(−1)h(d)n−1π/p +�† +Zh(d)n−1 +✌✌✌ +xn−1 +H +exp(−iπX/4) + + + + + + + + + + + + + + + + + + + +|GHZn⟩ += +H +✌✌✌ +x1 +... +H +✌✌✌ +xn−1 +H +exp +� +−iX +� +π/4 + π/p � +j xj(−1)h(d)j +�� + + + + + + + + + + + + + + + + + +|GHZn⟩ +Figure 10: The state of the final vertex qubit of the circuit described in Figure 9 when all other vertex qubits +(and edge qubits) are measured in the computational basis. Equivalence between lines is explained in the +proof of Theorem 31. +28 + +Proof. Let D, X be random variables each taking value uniformly at random from {0, 1}n−1. Then we can +write +1 +22n−2 +� +(d,x)∈{0,1}2n−2 +Pr +� +Yd,x ̸= pmmajmodp(d, x) +� += Pr +� +YD,X ̸= parity(x) ⊕ MMp +�� +i +xi(−1)d +i +�� +(174) += +� +k +Pr +� +YD,X ̸= parity(x) ⊕ MMp (k) +��� +� +i +Xi(−1)D +i = k +� +Pr +�� +i +Xi(−1)D +i = k +� +(175) +We compare this equation to Equation (34), and note that (after rewriting majmodp(X) = MMp(|X|)) the +two probabilities are identical except that the random variable |X| has been replaced by � Xi(−1)D +i . Then +the proof of the bound proceeds identically to the proof of bound in Lemma 8, except that we need a bound +on the total variation distance between the distribution of the random variable � +i Xi(−1)Di (mod p) and +the uniform distribution over {0, 1, ..., p − 1}. +To do this, we write +� +i +Xi(−1)Di = +� +i +Xi − 2 +� +i:Xi=1 +Di +(176) +and note that both terms in the right-hand side equation give uniform distributions mod p by Fact 22 +(provided that close to half the bits of Xi are ones, which happens with high probability). +Formally, let ˜X be the random variable taking value uniformly at random from the set of n-bit strings +with Hamming weight at least n/4. Then we have +∆ + +� +i +Xi − 2 +� +i:Xi=1 +Di, +� +i +˜Xi − 2 +� +i: ˜ +Xi=1 +Di + + ≤ ∆(X, ˜X) ≤ exp(−n/8), +(177) +where the first inequality follows because for any distributions X and ˜X and (possibly random) function f +we have ∆(X, X′) ≥ ∆(f(X), f(X′)), and the second inequality follows from Hoeffding’s. Then, letting Up +denote the uniform distribution mod p, for any ˜x in the support of ˜X we have, by Fact 22, that +∆ +� +2 +� +i:˜xi=1 +Di +(mod p), Up +� +≤ √p exp +� +−n/4p2� +(178) +and hence +∆ +� +|˜x| − 2 +� +i:˜xi=1 +Di +(mod p), Up +� +≤ √p exp +� +−n/4p2� +(179) +since shifting a distribution doesn’t change its distance from the uniform distribution. Then summing over +all possible ˜x we see +∆ + + +��� ˜X +��� − 2 +� +i: ˜ +Xi=1 +Di +(mod p), Up + + ≤ √p exp +� +−n/4p2� +. +(180) +Combining Equations (177) and (180) gives +∆ +�� +i +Xi − 2 +� +i:Xi=1 +Di +(mod p), Up +� +≤ exp(−n/8) + √p exp +� +−n/4p2� += O(√p exp +� +−n/4p2� +). +(181) +Then, following the same proof as in Lemma 8 and plugging the above inequality in place of Fact 22 gives +the desired bound. +29 + +Then, following the same arguments as used in Section 2.2, we show that we can replace the non-unitary +rotation gates used in the circuit described above with actual unitary gates, while causing small disturbance +to the output distribution. The result of this procedure is a QNC0 circuit that takes the all zeros state as +input and whose output samples approximately from the distribution (Z, pmmajmodp(Z)). +Theorem 33. For n sufficiently large and p = nc for some constant c ∈ (0, 1/2) there is a constant-depth +circuit consisting of one and two qubit unitary gates and Um′,θ′ gates with m′ = ⌈c−1 + 1⌉ and θ′ = π/p +which takes the (2n − 1)-qubit all zeros state as input and produces an output which, when measured in the +computational basis, produces a distribution (Z′, Y ) with +an n-bit output which correlates approximately with the distribution (Z, pmmajmodp(Z)). +Proof. The desired circuit can be constructed from the circuit presented in Figure 9 following the same +procedure as used in Section 2.2. Specifically, we first replace blocks of m parallel Aθ gates with Aθ,m gates, +then replace those with Uθ,m gates. The only additional complication we encounter is that we must apply a +final permutation to our output bits to accommodate a “shuffling effect” caused by replacing blocks of the +Aθ gates by Aθ,m. The final circuit is presented in Figure 11, where the Cm gate denotes a permutation +whose action on the m qubit computational basis state |x1x2...xm⟩ is given by +Cm |x1x2...xm⟩ = |x2x3...xmx1⟩ . +(182) +✌✌✌ +... +✌✌✌ +H +U † +m′,θ′ +Cm +✌✌✌ +... +H +✌✌✌ +... +... +H +U † +m′,θ′ +Cm +✌✌✌ +... +H +✌✌✌ +H +exp(−iπX/4) +✌✌✌ +❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ +❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +✤ +❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +|PMn⟩ +Figure 11: +Constant-depth unitary circuit producing approximate samples from the distribution +(Y, pmmajmodp(Y )). +Note that m is constant, and so the unitaries acting on m qubits have constant +size. The upper box indicates the n − 1 “edge” qubits of the state |PMn⟩. The lower box indicates the n +“vertex” qubits of the same state. +As a first step towards showing this circuit samples from the desired distribution, we show that replacing +the parallel Aθ gates in the circuit of Figure 9 with Aθ,m gates followed by a Cm gates doesn’t change the +post-measurement distribution produced by the circuit. To see why, we consider the state of the final vertex +qubit in both circuits after a measurement is performed on all edge qubits, producing bitstring d, and the +first m vertex qubits, producing bitstring x1x2...xm. In the circuit described in Figure 9, the state of the +30 + +final qubit is given by +⟨x1x2...xm| +m +� +i=1 +AθZh(d)i � +i +CNOTi,n |+⟩⊗m ⊗ |0⟩ +(183) += ⟨x1x2...xm| +m +� +i=1 +exp(iθXxi)Zh(d)i � +i +CNOTi,n |+⟩⊗m ⊗ |0⟩ +(184) += ⟨x1x2...xm| +m +� +i=1 +Zh(d)i |+⟩⊗n ⊗ exp +� +iθX +� +i +xi(−1)h(d)i +� +|parity(x1x2...xm)⟩ +(185) +and, if the Aθ gates are replaced by a Cm gate and Aθ,m gate the state of the final qubit is given by +⟨x1x2...xm| CmAθ,m +m +� +i=1 +Zh(d)i � +i +CNOTi,n |+⟩⊗m ⊗ |+⟩n +(186) += ⟨x2...xmx1| Aθ,m +m +� +i=1 +Zh(d)i � +i +CNOTi,n |+⟩⊗m ⊗ |+⟩n +(187) += ⟨x2...xmx1| +m +� +i=1 +exp(iθXxi)Zh(d)i � +i +CNOTi,n |+⟩⊗m ⊗ |+⟩n +(188) += ⟨x2...xmx1| +m +� +i=1 +Zh(d)i |+⟩⊗n ⊗ exp +� +iθX +� +i +xi(−1)h(d)i +� +|parity(x2...xmx1)⟩ . +(189) +Since these states are the same up to an overall phase we see the change has no effect on the probability of +observing outcomes d and x1, ..., xm or the state of the unmeasured qubit. +It is straightforward to extend this analysis to the case where the same replacement is made to all D +blocks of Aθ gates in the circuit of Figure 9. +It remains to show that replacing the Aθ,m gates (in the circuit produced by the replacement discussed +above) with Uθ,m gates causes a negligible change to the distribution output by the circuit after a computa- +tional basis measurement. Following exactly the same argument as used to prove Theorem 20 we see +�����I⊗(n−1) +2 +⊗ +�� +CmU † +θ′,m′ +�⊗D +⊗ exp(−iπX/4) +� +H⊗n +− I⊗(n−1) +2 +⊗ +�� +CmA† +π/p,m +�⊗D +⊗ exp(−iπX/4) +� +H⊗n +����� +∞ +∈ O(Dn−(1+c)) ≤ O(n−c). +(190) +and so the classical distributions produced by computational basis measurements of the states +I⊗n−1 +2 +⊗ +�� +CmU † +θ′,m′ +�⊗D +⊗ exp(−iπX/4) +� +H⊗n |PMn⟩ +(191) +and +I⊗n−1 +2 +⊗ +�� +CmA† +π/p,m +�⊗D +⊗ exp(−iπX/4) +� +H⊗n |PMn⟩ +(192) +also differ by at most O(n−c) in total variation distance. +Combining Theorem 31 with the fact that +O(p3/2en/4p2) ≤ O(1/p) for p = n−c with c < 1/2 completes the proof. +5 +Classical hardness of Sampling (Z, pmmajmodp(Z)) +This section concerns the hardness of classically sampling from the distribution (Z, pmmajmodp(Z)), where +Z is a random variable Z ∼ Unif({0, 1}N) and the function pmmajmodp is defined in Definition 30, and +recalled below. +31 + +pmmajmodp +The input to pmmajmodp is a N = 2n−2 bit string, (x1, x2, . . . xn−1, d1, d2, . . . , dn−1). Each +xi corresponds to the vertex vi of the balanced binary tree Bn, and each di corresponds to the edge ei of Bn +per the ordering in Definition 27. +pmmajmodp(x, d) = MMp +�n−1 +� +i=1 +xi(−1)h(d)i +� +⊕ parity(x) +x, d ∈ {0, 1}n−1. +(193) +Where MMp is defined in Definition 28 and h(d) is defined in Definition 27. +In Section 3 we proved the classical hardness of sampling from the slighly different distribution (X, majmodp(X)⊕ +parity(X)) where X ∼ Unif({0, 1}n). For the sake of comparing with pmmajmodp we list this function below. +majmodp ⊕ parity +majmodp(x) ⊕ parity(x) = MMp +� n +� +i=1 +xi +� +⊕ parity(x) +x ∈ {0, 1}n +(194) +Both of these distributions have the form (Y, MMp(SY ) ⊕ parity(Y )) for a uniformly random bitstring Y , +and SY a sum that depends on Y . For the majmodp(Sx) ⊕ parity(x) function, the relevant sum is simply +the Hamming weight of the input x ∈ {0, 1}n, denoted as |x|. A nice property of the Hamming weight, +|x| = � +i xi is that each of the terms in the sum depends on a different bit of the input, and thus if many of +the bits of xi are sampled independently, then so are their corresponding terms in the sum. The key challenge +in applying the framework from the proof of Theorem 20 is that the terms in S = � +i xi(−1)h(d)i no longer +depend on disjoint variables. In particular, flipping the bit dj corresponding to edge ej flips the sign of all +terms xi(−1)h(d)i for vi downstream from ej in the balanced binary tree Bn. In order to accommodate for +this dependence, we will partition the tree Bn into subtrees, then identify subtrees corresponding to output +variables which are independent when a large chunk of the input variables are fixed. +We show that for some choice of p, any function f which takes as input a uniformly random (N +N δ)-bit +string and is (1/2 − Ω(1))-close in total variation distance with (Z, pmmajmodp(Z)), must have locality +d ≥ Ω(log1/2 N). If we consider f as a classical circuit with fan-in 2, this corresponds to a circuit depth +lower bound of Ω(log log N). +Theorem 34. For each δ < 1, there exists an ǫ > 0 such that for all sufficiently large even integer N and +prime number p = Θ(N α) for α ∈ (δ/3, 1/3): Let f : {0, 1}ℓ → {0, 1}N+1 be an (ǫ log N)1/2-local function, +with ℓ ≤ N + N δ. Then ∆(f(U), (Z, pmmajmodp(Z))) ≥ 1/2 − O(1/ log N). +Proof. The function f takes input an ℓ-bit string we label as (u1, u2, . . . , uℓ) and outputs (N + 1)-bit output +string we label as (z1, . . . , zN, b). Let n be the integer such that N = 2n − 1. Just as in the definition +of pmmajmodp in Definition 30, we consider the partition of z = (x, d) ∈ {0, 1}n−1 × {0, 1}n−1, where +x1, . . . , xn−1 are the first n − 1 bits of z, and d1, . . . dn−1 are the next n − 1 bits of z, and b ∈ {0, 1} is the +last bit which is considered “correct” if b = pmmajmodp(z). +The output variables x1, . . . , xn−1 are associated with v1, . . . , vn−1, the non-root vertices of the balanced +binary tree Bn. The output variables d1, . . . , dn−1 are associated with the edges e1, . . . , en−1, where we use +the ordering as defined in Definition 27. As is standard in graph theory, for any graph G we use V (G) and +E(G) to denote G’s vertices and edges respectively. To understand the correlations between each of the +output bits zi, it is useful to partition Bn as follows. +Definition 35 (Bn partition (T0, T1, . . . , Tk)). Let D := log(2d), we partition the vertices of the balanced +binary tree Bn into the bottom D layers and the top log n − D layers as shown in Figure 12. Let the top +tree T0 be the tree induced by the top log(n) − log(2d) layers of vertices in Bn. The subgraph induced by +the bottom D layers is a forest of trees which we denote as T = {T1, T2, . . . , Tk} and refer to as the small +trees. In order to make sure that each edge and vertex of Bn is accounted for in {T0} ∪ T , for each i ∈ [k] +we consider the edge which connects the root of Ti to a leaf of T0 as in the small tree Ti. Thus, each small +tree T ∈ T has an edge with the root of T as its only endpoint as shown in Figure 12. +Although a subtree T of Bn consists of vertices and edges labeled as {vi}i and {ei}i, we slightly abuse +notation and say that the output variable zi is “in” T (denoted zi ∈ T ) if the edge or vertex which is +32 + +... +... +... +... +T0 +... +... +... +... +T1 +... +... +... +... +T2 +... +... +... +... +T3 +... +... +... +... +Tk +. . . +log n − D +D +T +Figure 12: Partition of the balanced binary tree Bn into k + 1 subtrees. The top tree T0 consists of the +subtree induced by the first log n − D layers of Bn. The k bottom trees T = {T1, T2, . . . , Tk} include all +vertices in the bottom D layers of Bn and all incident edges. Note that for each i ∈ [h], Ti contains a single +edge that only has one endpoint, this edge corresponds to the edge in Bn that connects the root of Ti with +its parent in T0. +33 + +associated with zi is in E(T ) ∪ V (T ). And will sometimes use T to denote the subset of variables {zi} which +are associated with the tree T . Moreover, we define the size of a subtree T of Bn be |T | = |V (T )| + |E(T )|. +Note that since each T ∈ T has an extra edge at the root, with no other endpoint, |E(T )| = |V (T )| ≤ 2d. +The top tree T0 has |V (T0)| = 2log n−D − 1 = +n +2d − 1 vertices, and |E(T0)| = |V (T0)| − 1 = n +2d − 2 edges. +For each i ∈ [k] the small tree Ti has at most 2D − 1 = 2d − 1 vertices V (Ti), and the same number of edges +|E(Ti)| = |V (Ti)| = 2d − 1. In total, the top tree has size |T0| ≤ n/d − 3 and each bottom tree Ti ∈ T has +size at most |Ti| ≤ 4d. Since the root vertex of each small tree is at the (log n − D + 1)-level of the balanced +binary tree Bn, there are k = 2log n−D = n/d small trees. +For each output variable zi in the string z, we consider the other output variables which are in the same +tree as zi as the tree neighborhood of zi. +Definition 36 (Tree Neighbors, NT ). For each variable zi for i ∈ [N], let NT (zi) ⊆ {zi}i∈[N], be the subset +of outputs in the same tree T ∈ T ∪ {T0} as zi. Moreover, for any subset of outputs S ⊆ {zi}i∈[N], let +NT (S) := � +zi∈S NT (zi). +Recall that the variables {zi}i∈[N] only correspond to the non-root vertices of Bn, but the root vertex v0 +is in the top tree T0. Thus for vertices vj, vk corresponding to the left and right children of root v0, we have +that zj ∈ NT (zk), despite there being no variable in NT (zk) associated with the root. Note that for any +output in a small tree zi ∈ � +T ∈T T , NT (zi) has size at most 2d since |T | ≤ 2d for each T ∈ T . Moreover, +for any subset of small tree outputs S ⊆ � +T ∈T T , |NT (S)| ≤ 2d|S|. +Definition 37 (Forest Partition). F0, F1, . . . , Fs ⊆ {zi}i∈[N] is a forest partition if both of the following +hold. +1. F0, . . . , Fs is a partition of all variables {zi}i∈[N] +F0 ⊎ · · · ⊎ F1 = {zi}i∈[N] +(195) +2. Each Fi contains a union over a subset of trees from T ∪ {T0}. +NT (Fi) = Fi +for each i ∈ [s] +(196) +The next lemma shows that we can construct a forest partition with the property that, after a large +fraction of the input bits to our (ǫ log N)1/2 local function have been fixed, each of the remaining unfixed +bits controls a single (independent) subset of trees in the partition. +Lemma 38. There exists a forest partition F0, F1, . . . , Fs for some s ≥ Ω(N/d3), with |Fi| ≤ O(d2) for each +i ∈ [s]; and a partition of the input u ∈ {0, 1}ℓ into u = (w, y), with w ∈ {0, 1}s such that +f(w, y) +�� +F0 = h(y), +(197) +f(w, y) +�� +{N+1} = b(y), +(198) +f(w, y) +�� +Fi = gi(wi, y) +for each i ≥ 1, +(199) +and +T0 ⊆ F0. +(200) +For some functions h : {0, 1}ℓ−s → {0, 1}|F0|, b : {0, 1}ℓ−s → {0, 1}, and gi : {0, 1} × {0, 1}ℓ−s → {0, 1}|Fi| +for each i ∈ [s]. +We refer to gi(wi, y) as the ith block of the output, assigning values to the variables in Fi, for i ∈ [s]. +Note that if we fix the input y, each block gi(wi, y) is a function only of the input bit wi. Since the input +w ∈ {0, 1}s is uniformly random, the value of each of the blocks is independent conditioned on y. +Proof of Lemma 38. Consider the bipartite graph with the ℓ input variables to f as the left vertices, and the +N + 1 output variables as the right vertices, where each input j ∈ [ℓ] and output i ∈ [N + 1] vertex share an +edge iff the ith output bit of f, denoted as fi is a function of the jth input bit. We refer to this graph as the +input-output dependency graph of f. For each vertex v in the dependency graph, let the neighborhood of v, +Nf(v), be the set of vertices adjacent to v. Similarly, for any subset S of vertices, let Nf(S) := � +v∈S Nf(v). +Since by assumption, f is d-local, the degree of the output vertices is at most d. +34 + +Let L be the set of input vertices of the dependency graph for f which are adjacent to the output vertices +in T0 or b, that is L := Nf(T0 ∪ {b}) (or we could associate b with the root v0 in T0). If we fix the inputs +in L, then b, and the outputs in T0 are also fixed. For this reason we refer to L as the fixed inputs, and the +remaining inputs U = {ui}i∈[ℓ] \ L as the unfixed inputs. +|L| ≤ d(|T0|) ≤ d (|V (T0)| + |E(T0)|) ≤ n − 3d. +(201) +Therefore, there are at least N − |L| ≥ 2n − 1 − (n − 2d) ≥ n unfixed inputs U. Since |V (T0)| = +n +2d − 1, and +|E(T0)| = |V (T0)| − 1. +As mentioned above, the locality of f implies that the degree of the output vertices in the dependency +graph is at most d. Using the following claim, we can also bound the degree of half of the input vertices in +U. +Claim 39. There is a subset of inputs ˜U ⊆ U with size | ˜U| ≥ |U|/2 ≥ n/4 such that the degree of the vertices +in ˜U in the dependency graph of f is at most O(d). +Proof. Since there are at most N ≤ 2n output vertices, each of degree at most d, there are at most 2nd +edges in the input/output dependency graph. Therefore, at least half of the vertices in U have degree at +most 4d since otherwise there would be |U|/2 vertices with degree greater than 4d, and the total number of +edges would be strictly greater than |U| +2 · 4d ≥ n +2 · 4d = 2dn edges. +Within these bounded degree input vertices ˜U, we next find a subset W such that each pair of vertices +in W are adjacent to disjoint trees. +Claim 40. There exists a subset of inputs W ⊆ ˜U of size |W| ≥ Ω(N/d3) such that for each pair ui ̸= +uj ∈ W, the neighborhoods Nf(ui), Nf(uj) intersect with disjoint trees. That is, for each ui ̸= uj ∈ W, +NT (Nf(ui)) ∩ NT (Nf(uj)) = ∅. +Proof. We greedily build W as follows: Initialize the set V as the inputs ˜U. While V is non-empty, choose +any v ∈ V , add it to W and remove Nf(NT (Nf(v))) from V . +Note that the size of V decreases by at most O(d3) in each iteration since for any subset of outputs +S, |Nf(S)| ≤ d|S|, and |NT (S)| ≤ 2d|S|, and for any subset of inputs Sin, |Nf(Sin)| ≤ O(d). Therefore, +|W| = | ˜U|/O(d3) ≥ Ω(n/d3) = Ω(N/d3). +We set w as the input bits of u which are indexed by W from Claim 40, and let y be the remaining bits of u. +For each i ∈ [s], let Fi = NT (Nf(wi)) and let F0 be the remaining {zi} variables: F0 = {zi}i∈[n] \ (� +i∈[s] Fi). +We first show that F0, . . . , Fs is a forest partition as defined in Definition 37. By the definition of F0 +it is clear that �s +i=1 Fi = {zi}i∈[N]. +Furthermore, these forests are disjoint since for each i ̸= j ∈ [s], +Fi ∩ Fj = NT (Nf(wi)) ∩ NT (Nf(wj)) = ∅ by Claim 40, and since F0 ∩ (� +i∈[s] Fi) = ∅ by definition. All +that’s left to show that this is a forest partition is that NT (Fi) = Fi for each i ∈ {0, . . ., s}. This is clearly +true for each i ∈ [s] since NT (Fi) = NT (NT (Nf(wi))) = NT (Nf(wi)) = Fi. To show that NT (F0) = F0, +suppose for the sake of contradition that this is not the case, that there exists some a ∈ NT (F0) \ F0. Since +�s +j=0 Fj = {zi}i∈[N], a is in some other forest Fj with j ̸= 0. But this implies that NT (Fj) ∩ F0 ̸= ∅, and so +Fj ∩ F0 ̸= ∅, a contradiction. Therefore, F0, F1, . . . , Fs is a forest partition as defined in Definition 37. +Next, we show that for each i ∈ [s], f(w, y) +�� +Fi is a function of only wi and y. This is because for each +j ∈ [s], such that j ̸= i, we have Nf(wj) ∩ Fi ⊆ Fj ∩ Fi = ∅. Similarly, the outputs F0 do not depend on any +bits of w since for each i ∈ [s], Nf(wi) ∩ F0 ⊆ Fi ∩ F0 = ∅. +Since we initialized our set of fized variables L with Nf(T0 ∪ {b}), and we chose W such that W ∩ L = ∅, +it follows that both b and the outputs in T0 can be written as functions of y. Furthermore, this implies that +T0 ⊆ F0. +All that’s left to prove Lemma 38 is to show |Fi| ≤ O(d2) for each i ∈ [s]. Note that for each i ∈ [s], +|Fi| = |NT (Nf(wi))|. Since wi was chosen from the subset of input variables that are not adjacent to T0 in +f’s dependency graph (those indexed by U), and have degree at most O(d) (indexed by ˜U ⊆ U), it follows +that |NT (Nf(wi))| ≤ 2d|Nf(wi)| and |Nf(wi)| ≤ O(d). Therefore, |Fi| ≤ O(d2) for each i ∈ [s]. +35 + +Next we consider how the pmmajmodp function evaluates on (x, d). We partition the terms of the sum +S = �n−1 +i=1 xi(−1)h(d)i into s + 1 according to the forest partition F0, F1, . . . , Fs from Lemma 38. +Si = +� +vj∈V (Fi) +xj(−1)h(d)i +for each i ∈ {0, 1, . . ., s}. +(202) +Where V (Fi) denotes the set of vertices vj ∈ V (Bn) such that xj ∈ Fi and E(Fi) denotes the set of edges +ej ∈ E(Bn) such that dj ∈ Fi for i ∈ {0, 1, . . ., s}. Again, note that v0 /∈ V (F0). We can rewrite the sum as +S = �s +i=0 Si. +Let’s consider the sum S for a particular assignment z = (x, d) ∈ {0, 1}N, where for each i ∈ {0, 1, . . ., s}, +zFi denotes the assignment to Fi. Note that S0 depends only on zF0, and each term Si for i ≥ 1 depends +only on zF0 and zFi. +S(z) = S0(zF0) + +s +� +i=1 +Si(zFi, zF0) +(203) +This is because xj(−1)h(d)i depends on xj as well as each dj′ where ej′ is along the path from v0 to vj in +Bn. +Definition 41 (Minimal Block). For some assignment z ∈ {0, 1}N, we say that the ith block is minimal if +Si(zFi, zF0) = +min +z′ +Fi∈{0,1}|Fi| Si(z′ +Fi, zF0). +(204) +Claim 42. For each fixed assignment to zF0, and any i ∈ [s], there is a unique minimal assignment to zFi. +That is, for each zF0 ∈ {0, 1}|F0|, there exists a z∗ +Fi ∈ {0, 1}|Fi| such that +Si(z∗ +Fi, zF0) < Si(zFi, zF0) +for each zFi ∈ {0, 1}|Fi| \ {z∗ +Fi}. +(205) +Proof. For each i ∈ [s], the sum Si can be broken into terms for each of the small trees Tj ∈ T in the forest +Fi. +Si = +� +j∈[k]:Tj⊆Fi +STj +(206) +Where STj := � +vi∈V (Tj) xi(−1)h(d)i. Note that the value each of STj for j ∈ [s] depends on zF0 and the +variables in Tj. Since each Tj for j ∈ [s] are disjoint, it is sufficient for us to show that for a fixed zF0, there +is a unique minimal assignment to the variables of Tj for each j ∈ [s]. +For any two vertices vj ̸= vk ∈ V (Bn), let Pj,k ⊆ E(Bn) be the subset of edges {e1, . . . , en−1} along +the path from vj to vk. Note that for any vertex vi, P(vi) as defined in Definition 27 is equivalent to P0,i. +Consider some T ∈ T with root vr, and single-endpoint root edge er. We can rewrite ST as +ST = +� +vi∈V (T ) +xi +� +ej∈P0,i +(−1)dj +(207) += (−1)h(d)r + +xr + +� +vi∈V (T )\{vr} +xi +� +ej∈Pr,i +(−1)dj + + . +(208) +Note that h(d)r is a function of zF0 and dr, and for a fixed zF0, we can fix dr such that h(s)r = −1. Consider +that we set dr in this way. +ST = −xr + +� +vi∈V (T )\{vr} +−xi +� +ej∈Pr,i +(−1)dj +(209) +Now, ST is minimized if each of the V (T ) terms are minimized (value −1). This is achieved by setting xi = 1 +for each vi ∈ V (T ) and dj = 0 for each ej ∈ E(T ) \ {er}. Note that any other assignment to the variables +will result in one of the terms being either 0 or 1, therefore this is the unique minimal assignment to the tree +T . +36 + +Next, we design a statistical test similar to that in the proof of classical hardness of (X, majmodp ⊕ +parity(X)) (Theorem 20) in Section 3 with the additional set TM consisting of strings with a limited number +of minimal blocks. We define the statistical test as follows. +Statistical Test: +Let N0, NM := 3N 3α and NF := 2N 3α. The statistical test is T := TM ⊎ T0 ⊎ TF ⊎ TS, +where +TM := {z′ ∈ {0, 1}N+1 : ≤ NM blocks i ∈ [s] of z′ are minimal} +(210) +T0 := {z′ ∈ {0, 1}N+1 : z′ +Fi = 0|Fi| for ≤ N0 blocks i ∈ [s]} +(211) +TF := {z′ ∈ {0, 1}N+1 : ∃(w, y) : f(w, y) = z′ and ≥ NF blocks gi(wi, y) are y-fixed} +(212) +TS := {(z, b) ∈ {0, 1}N × {0, 1} : b ̸= pmmajmodp(z)} +(“incorrect strings”) +(213) +We will show that the function f(U) passes the statistical test with probability at least 1 +2 − O(1/ log N) +whereas the true distribution D = (Z, pmmajmodp(Z)) passes with probability at most 1/N for sufficiently +large N. +Claim 43. Pr[f(U) ∈ T] ≥ 1 +2 − O(1/ log N). +Proof. Using our partition of random input u into (x, y), our goal is to upper bound Prx,y[f(x, y) ∈ T ], where +the probability is taken over the randomness of (x, y) chosen uniformly at random from {0, 1}s × {0, 1}ℓ−s. +Since Prx,y[f(x, y) ∈ T ] ≥ miny Prx[f(x, y) ∈ T ], it is sufficient for us to upper bound Prx[f(x, y) ∈ T ] for +arbitrarily chosen y ∈ {0, 1}ℓ−s. +Suppose we arbitrarily fix y ∈ {0, 1}ℓ−s. If ≥ NF blocks of f(w, y) are y-fixed, then f(w, y) ∈ TF for +each w ∈ {0, 1}s. Moreover, if at most NM blocks gi(wi, y) are minimal for some choice of wi ∈ {0, 1}, then +for each w ∈ {0, 1}s, f(w, y) ∈ TM. Similarly, if at most N0 blocks evaluate to zero gi(wi, y) = 0|Fi| for some +choice of wi ∈ {0, 1}, then for each w ∈ {0, 1}s, f(w, y) ∈ T0. Therefore, we assume that less than NF blocks +of f are y-fixed, greater than NF of the forests of f(w, y) take on their minimal value for some choice of w, +and greater than N0 blocks are all zeros for some choice of w. Therefore, the following two hold: +1. There are at least NM − NF = N 3α blocks i ∈ [s] such that Si(0, y) ̸= Si(1, y). +2. There are at least N0 − NF = N 3α blocks i ∈ [s] such that |gi(0, y)| ̸= |gi(1, y)|. +Let J ⊆ [s] be the indices of the blocks that change their respective terms of S, and let K ⊆ [s] be the +indices of the blocks with Hamming weight that changes. +J := {i ∈ [s] : Si(0, y) ̸= Si(1, y)} +K := {i ∈ [s] : |gi(0, y)| ̸= |gi(1, y)|} +(214) +We denote |x, d| as the Hamming weight of the first N output bits of f(w, y), and recall that b is the last bit +of f(w, y). Note that |x, d| = |h(y)| + �s +i=1 |gi(wi, y)|. +Claim 44. Over the randomness of x ∈ {0, 1}s, the random variables S and |x, d| can be written as +S = a + +� +i∈J +airi, +|x, d| = e + +� +i∈K +eiri +where r ∼ Unif({0, 1}|J∪K|). +(215) +For some integers a, e, positive integers a1, . . . , a|J| ≤ O(d2) = O(ǫ log N), and nonzero integers e1, . . . , e|K|. +Proof. Note that over the randomness of x ∈ {0, 1}s, for each j′ /∈ J and k′ /∈ K, Sj′ and |gk′(w′ +k, y)| are +fixed. Therefore, there exists some integers α, β such that +S = α + +� +j∈J +Sj +|x, d| = β + +� +k∈K +|gk(wk, y)|. +(216) +Moreover, each Sj for j ∈ J are independent random variables which take on two different integer values +with equal probability. Likewise the |gk(wk, y)| for k ∈ K are independent random variables which take on +37 + +two distinct values with equal probability. Although for i ∈ J ∩ K, Si and |gi(wk, y)| are not independent. +Thus for each j ∈ J and k ∈ K, there exists integers α0, α1, β0, β1 such that α0 ̸= α1, β0 ̸= β1, and +Sj = +� +α0 +if xj = 0 +α1 +if xj = 1 +|gk(wk, y)| = +� +β0 +if xj = 0 +β1 +if xj = 1 +x ∼ Unif({0, 1})|J∪K|. +(217) +For each i ∈ J ∪ K, we will assign ri to either xi or 1 − xi. Since each xi is independently uniformly random +over {0, 1}, so is each ri. +Note that we can write the term Sj as either Sj = α0 + (α1 − α0)xj, or Sj = α1 + (α0 − α1)(1 − xj). +Thus, it is possible to rewrite Sj as c + ajrj for some integer c and positive integer aj, by setting rj = xj +and ai = (α1 − α0) if α1 > α0 and setting rj = 1 − xj and ai = (α0 − α1) if α0 > α1. Furthermore, the value +of aj = |α0 − α1|, and is at most 2 · |V (Fj)| ≤ d · 2D = 2d2 since the value of |Sj| is at most the number of +vertices in Fj. Therefore, we can write S = a + � +i∈J airi for some integer a and positive integers ai ≤ 2d2 +for i ∈ J. +For each k ∈ K, we can also write the term |gk(wk, y)| as either β0 +(β1 −β0)x0 or β1 +(β0 −β1)(1−x0). +Therefore, regardless of whether rk was assigned as xk or 1 − xk, the term can be written as c + ekrk for +some (not necessarily positive) integers c and ek. And, as desired, the entire Hamming weight sum can be +written as |x, d| = b + � +i∈K eiri for some integers b and ei for i ∈ K. +Next, we consider how much the sums in Equation (215) depend on the same bits of r. Suppose that +|J ∩K| ≤ 1 +2N 3α. Then |J \K| ≥ 1 +2N 3α. If we fix rK arbitrarily, the value of |x, d| is fixed, and therefore so is +parity(x, d). Letting c = parity(x, d), a′ = a + � +i∈J∩K airi, and J′ = J \ K, we can simplify the probability +that the output is “incorrect” over the randomness of rJ′ as follows. +Pr +rJ′ [f(w, y) ∈ TS] = Pr +rJ′[MMp(S) ⊕ parity(x, d) ̸= b] +(218) += Pr +rJ′ +� +MMp +� +a′ + +� +i∈J′ +airi +� +̸= c ⊕ b +� +(219) += Pr +rJ′ +� +a′ + +� +i∈J′ +airi ∈ Mc⊕b⊕1 +mod p +� +(220) +Where M0 = {0, 1, . . ., (p − 1)/2} and M1 = {(p + 1)/2, . . ., p − 1}. Since |M0|, |M1| ≥ (p − 1)/2, and ai is +nonzero modulo p (since ai ≤ O(ǫ log N) for i ∈ J, and p = Θ(N α))) it follows from Corollary 23 that +Pr +rJ′ [f(w, y) ∈ TS] ≥ p − 1 +2p +− O(1/N) ≥ 1/2 − O(1/p). +(221) +Where we used that |J′| ≥ +1 +2N 3α ≥ Ω(p3). +Since the bits of rK were fixed arbitrarily, it holds that +Prw[f(w, y) ∈ TS] = Prr[MMp(S) ⊕ parity(x, d) ̸= b] ≥ 1/2 − O(1/p). Therefore we assume that |J ∩ K| > +1 +2N 3α. +If we fix all ri for i /∈ J ∩ K, the remaining non-fixed blocks i ∈ J ∩ K have possible assignments which +give different values to both |gi(wi, y)| and Si. Letting a′ = a + � +i/∈J∩K a + airi, and e′ = � +i/∈J∩K eiri, we +simplify the probability that f(w, y) is “incorrect” over the randomness of rJ∩K as follows. +Pr +rJ∩K [f(w, y) ∈ TS] = Pr +rJ∩K +� +MMp +� +a′ + +� +i∈J∩K +airi +� +⊕ PAR +� +e′ + +� +i∈J∩K +eiri +�� +(222) +Since ai ≤ O(d2) ≤ O(ǫ log N) for each i ∈ [s] (by Claim 44) and |J ∩K| ≥ 1 +2N 3α = Ω(p3), it directly follows +from Lemma 24 that +Pr +rJ∩K [f(w, y) ∈ TS] ≥ 1 +2 − O(1/ log N) +(223) +Therefore, Prw[f(w, y) ∈ TS] ≥ 1 +2 − O(1/ log N). +38 + +Claim 45. Pr +� +(Z, pmmajmodp(Z)) ∈ T +� +≤ 1/N for sufficiently large N. +Proof. This proof is almost identical to that of Claim 26, which follows closely to the proof of Claim 3.3 +in [17]. The main difference in this proof accounts for the additional term TM in the statisitcal test – so +in addition to upper bounding the probability that D = (Z, pmmajmodp(Z)) is in T0, TS, or TF , we will +also upper bound the probability that D ∈ TM. Since D always outputs a “correct” string, Pr[D ∈ TS] = 0. +Thus, by the union bound it is sufficient for us to prove that Pr[D ∈ T0], Pr[D ∈ TF ], Pr[D ∈ TM] ≤ +1 +3N . +We start by showing that Pr[D ∈ TM] ≤ +1 +3N . To this end, we consider the probability that D ∈ TM +conditioned on the value of ZF0. Since ZF0 ∈ {0, 1}|F0| is uniformly random, +Pr[D ∈ TM] = +1 +2|F0| +� +t0∈{0,1}|F0| +Pr[D ∈ TM|ZF0 = t0]. +(224) +Thus it is sufficient for us to show that Pr[D ∈ TM|ZF0 = t0] ≤ +1 +3N for each t0 ∈ {0, 1}|F0|. +As shown in Claim 42, for each forest Fi for i ∈ [s], and some fixed zF0 ∈ {0, 1}|F0|, there is a unique +assignment for zFi to minimize Si(zFi, zF0). +Additionally, the minimality of each block is independent +conditioned on the value of ZF0 since for each i ∈ [s], Si(Z) is a function of only ZFi and ZF0. +We lower bound the probability that any given forest is minimal conditioned on the value of ZF0. For +any i ∈ [s] and t0 ∈ {0, 1}|F0|, we have +Pr +D [block i is minimal |ZF0 = t0] = +1 +2|Fi| ≥ 2−O(d2) = 2−O(ǫ log N) ≥ N −O(ǫ). +(225) +Where we used that |Fi| ≤ O(d2) ≤ O(ǫ log n) for i ∈ [s]. +Since the minimality of each forest are independent conditioned on the value of ZF0, for any subset of +forests U ⊆ [s], the probability that none of them are minimal conditioned on ZF0 is +Pr +D [all forests of U are not minimal|ZF0 = t0] = +� +i∈U +Pr[forest i is not minimal|ZF0 = t0]. +(226) +Therefore, for each t0 ∈ {0, 1}|F0|, +Pr +D [D ∈ TM|ZF0 = t0] = Pr +D + + +� +U⊆[s]: +|U|=s−NM +{all forests of U are not minimal } +�����ZF0 = t0 + + +(227) +≤ +� +U⊆[s]: +|U|=s−NM +Pr +� +all forests of U are not minimal +���ZF0 = t0 +� +(228) += +� +U⊆[s]: +|U|=s−NM +� +i∈U +Pr +� +forest i is not minimal +���ZF0 = t0 +� +(229) +≤ +� s +NM +� � +1 − N −O(ǫ)�s−NM +(230) +(231) +In the second line we used the union bound, the third line we used the independence of the block’s minimality +conditioned on ZF0 (Equation (226)), the fourth line we used Equation (225). We can further simplify, using +39 + +Ω(N/d3) ≤ s ≤ N, d ≤ (ǫ log N)1/2, and NM = 3N 3α. +≤ +� s +NM +�NM +exp +� +−N −O(ǫ)(s − NM) +� +(232) += sNM exp +� +−N −O(ǫ)s +� � +exp +� +N −O(ǫ)� +NM +�NM +(233) +≤ N 3N 3α exp +� n1−O(ǫ) +log3/2 N +� +(234) +≤ +1 +3N +(235) +for sufficienly large N and small ǫ (such that 3α < 1 − O(ǫ)). Therefore Pr[D ∈ TM] ≤ +1 +3N . +Next, we show using similar calculations that Pr[D ∈ T0] ≤ +1 +3N . Since each of the blocks i ∈ [s], ZFi is +uniformly random, whether each of them is all zeros is independent. Therefore the probability that block +i ∈ [s] is all zeros is. +Pr +� +ZFi = 0|Fi|� += 2−|Fi| ≤ 2−O(d2) = N −O(ǫ) +for each i ∈ [s] +(236) +Since N0 = 3N 3α, we can use the calculations from Equations (230) to (235) to bound Pr[D ∈ T0]. +Pr[D ∈ T0] ≤ +� +T ⊆[s]: +|T |=s−NM +� +i∈T +Pr +� +ZFi ̸= 0|Fi|� +(237) +≤ +� s +N0 +� � +1 − N −O(ǫ)�s−N0 +(238) +≤ +1 +3N +(239) +For sufficiently large N and small ǫ. +All that’s left is to show Pr[D ∈ TF ] ≤ +1 +3N . For this we use the same exact calculations from the proof +of Claim 26, but in this scenario we have ℓ ≤ N + N 3α, and the size of the support of D is 2N. +Pr[D ∈ TF] ≤ |TF | +2N ≤ 2ℓ−NF +2N +≤ 2N 3α−2N 3α ≤ 2−N 3α ≤ +1 +3N . +(240) +Where we used ℓ ≤ N + N δ, δ ≥ 3α, and NF = 2N 3α. Therefore, applying the union bound we get +Pr[D ∈ T] ≤ Pr[D ∈ TS] + Pr[D ∈ TM] + Pr[D ∈ T0] + Pr[D ∈ TF ] +(241) +≤ 0 + +1 +3N + +1 +3N + +1 +3N = 1 +N +(242) +6 +Discussion and Open Problems +Our results show that QNC0 circuits can sample from distributions that NC0 circuits cannot. Below we list +a few ways in which we think these results could potentially be extended. +• The constant-sized unitary Um,θ used in the construction of of constant depth quantum circuits +(Sections 2 and 4) is not constructed directly. Instead we show it exists indirectly by modifying a +non-unitary operation. An explicit construction of this unitary would be required for an experimental +implementation of this circuit, and may also lead to further insights. +40 + +• In an experiment with the goal of demonstrating quantum advantage, one would like to not just +construct a QNC0 circuit which samples from a distribution which NC0 circuits cannot, but also verify +that the distribution sampled from is indeed hard to sample from classically. How many samples are +needed for this verification? Can the circuit be modified to make the verification easier? We point +out here that the constant total variation distance in Corollary 4 means that only a few samples are +needed to verify that the distribution produced by the described quantum circuit is not produced by +a fixed NC0 circuit, for any specific choice of circuit. However ruling out all distributions producible +by NC0 circuits is a harder task. +• Can we prove an input-independent sampling separation between QNC0 and AC0 circuits? Notably, +in [18], Viola proves certain distributions cannot be produced by AC0 circuits. Can these techniques +be extended to QNC0 circuits? If so, we would have a novel technique for lower bounded the circuit +complexity of quantum states. If not, we should be able to find a QNC0 circuit which samples from +one of these distributions, producing the desired sampling separation. +7 +Acknowledgements +We would like to thank David Gosset for helpful discussions, and Ansis Rosmanis for sharing an insightful +note. +References +[1] S. Aaronson. Quantum computing, postselection, and probabilistic polynomial-time. Proceedings of the +Royal Society A: Mathematical, Physical and Engineering Sciences, 461(2063):3473–3482, 2005. [pp. 1, +3] +[2] S. Aaronson and L. Chen. Complexity-theoretic foundations of quantum supremacy experiments. arXiv +preprint arXiv:1612.05903, 2016. [pp. 1, 3] +[3] A. Anshu, N. Breuckmann, and C. Nirkhe. Nlts hamiltonians from good quantum codes. arXiv preprint +arXiv:2206.13228, 2022. [p. 3] +[4] S. Boixo, S. V. Isakov, V. N. Smelyanskiy, R. Babbush, N. Ding, Z. Jiang, M. J. Bremner, J. M. Martinis, +and H. Neven. Characterizing quantum supremacy in near-term devices. Nature Physics, 14(6):595–600, +2018. [pp. 1, 3] +[5] A. Bouland, B. Fefferman, C. Nirkhe, and U. Vazirani. Quantum supremacy and the complexity of +random circuit sampling. arXiv preprint arXiv:1803.04402, 2018. [pp. 1, 3] +[6] S. Bravyi, D. Gosset, and R. K¨onig. Quantum advantage with shallow circuits. Science, 362(6412):308– +311, 2018. [pp. 2, 3] +[7] D. Browne, E. Kashefi, and S. Perdrix. Computational depth complexity of measurement-based quantum +computation. In Conference on Quantum Computation, Communication, and Cryptography, pages 35– +46. Springer, 2010. [p. 3] +[8] R. Cleve and J. Watrous. Fast parallel circuits for the quantum fourier transform. In Proceedings 41st +Annual Symposium on Foundations of Computer Science, pages 526–536. IEEE, 2000. [p. 3] +[9] D. Grier and L. Schaeffer. Interactive shallow clifford circuits: Quantum advantage against nc1 and +beyond. In Proceedings of the 52nd Annual ACM SIGACT Symposium on Theory of Computing, pages +875–888, 2020. [p. 2] +[10] J. T. H˚astad. Computational limitations for small-depth circuits. MIT press, 1987. [p. 2] +[11] P. Høyer and R. ˇSpalek. Quantum fan-out is powerful. Theory of computing, 1(1):81–103, 2005. [p. 3] +41 + +[12] J. Preskill. Quantum computing in the nisq era and beyond. Quantum, 2:79, 2018. [p. 2] +[13] A. A. Razborov. Lower bounds on the size of bounded depth circuits over a complete basis with logical +addition. Mathematical Notes of the Academy of Sciences of the USSR, 41(4):333–338, 1987. [p. 2] +[14] P. W. Shor. Polynomial-time algorithms for prime factorization and discrete logarithms on a quantum +computer. SIAM review, 41(2):303–332, 1999. [p. 3] +[15] R. Smolensky. Algebraic methods in the theory of lower bounds for boolean circuit complexity. In +Proceedings of the nineteenth annual ACM symposium on Theory of computing, pages 77–82, 1987. [p. +2] +[16] B. M. Terhal and D. P. DiVincenzo. Adaptive quantum computation, constant depth quantum circuits +and arthur-merlin games, 2002. [pp. 1, 3] +[17] E. Viola. The complexity of distributions. SIAM Journal on Computing, 41(1):191–218, 2012. [pp. 2, +4, 11, 20, 23, 39] +[18] E. Viola. Extractors for circuit sources. SIAM Journal on Computing, 43(2):655–672, 2014. [pp. 2, 41] +[19] A. B. Watts, R. Kothari, L. Schaeffer, and A. Tal. Exponential separation between shallow quantum +circuits and unbounded fan-in shallow classical circuits. In Proceedings of the 51st Annual ACM SIGACT +Symposium on Theory of Computing, pages 515–526, 2019. [pp. 2, 3, 5, 24] +42 + diff --git a/6NAzT4oBgHgl3EQfEfqj/content/tmp_files/load_file.txt b/6NAzT4oBgHgl3EQfEfqj/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..852f8113a5a2c69c05b602921c8c9a0f547ebb72 --- /dev/null +++ b/6NAzT4oBgHgl3EQfEfqj/content/tmp_files/load_file.txt @@ -0,0 +1,1546 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf,len=1545 +page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='00995v1 [quant-ph] 3 Jan 2023 Unconditional Quantum Advantage for Sampling with Shallow Circuits Adam Bene Watts1 and Natalie Parham2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 3 1Institute for Quantum Computing,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' University of Waterloo,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Canada 2Department of Computer Science,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Columbia University 3Perimeter Institute for Theoretical Physics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Canada January 4,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2023 Abstract Recent work by Bravyi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Gosset,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' and Koenig showed that there exists a search problem that a constant- depth quantum circuit can solve,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' but that any constant-depth classical circuit with bounded fan-in cannot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' They also pose the question: can we achieve a similar proof of separation for an input-independent sampling task?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In this paper, we show that the answer to this question is yes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We introduce a distribution Dn and give a constant-depth, n qubit, quantum circuit that samples from a distribution close to Dn in total variation distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any δ < 1 we also prove, unconditionally, that any classical circuit with bounded fan-in gates that takes as input n+nδ uniformly random bits and produces output close to Dn in total variation distance has depth Ω(log log n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This gives an unconditional proof that constant-depth quantum circuits can sample from distributions which can’t be reproduced by constant-depth bounded fan-in classical circuits, even up to additive error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The distribution Dn and classical circuit lower bounds are based on work of Viola, in which he shows a different (but related) distribution cannot be sampled from approximately by constant-depth bounded fan-in classical circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1 Introduction What problems can quantum computers solve more efficiently than classical computers?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This question guides much of modern research in both the theory as well as the implementation of quantum computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Perhaps the most well-known answer comes from Shor’s factoring algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Using Shor’s algorithm, a quantum computer can factor an integer efficiently, whereas it is widely believed that factoring is not possible in polynomial classical time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' However, witnessing this speedup requires large-scale reliable (fault tolerant) quantum computers which are unlikely to be available in the near-term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Low-depth Circuits The difficulty of constructing a large scale quantum computer motives the study of constant-depth (shallow) quantum circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' These circuits describe the operations that can be implemented on quantum computers which are only able to run for a constant amount of time but can make small (constant-sized) operations in parallel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Remarkably, it appears that even these relatively simple circuits can perform tasks which classical circuits cannot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In 2004, Terhal and Divincenzo provided evidence, later strengthened by Aaronson [1], that there is no polynomial time classical algorithm which takes as input a description of a depth-3 quantum circuit and produces samples from the output distribution of that circuit [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' More recently, a series of works [5, 2, 4] studied the complexity of sampling from the output distribution of a randomly generated shallow quantum circuit (again given a description of the circuit as input) and gave evidence this task couldn’t be performed by classical computers in polynomial time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' While these examples are striking, they do have some limitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' As is standard in complexity theory, the proofs of classical hardness in the results discussed above are not unconditional, but instead rely on (natural) complexity theoretic conjectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' More subtly, the presence of noise in real world experiments means that 1 even quantum computers will not sample from the ideal output distribution of quantum circuits exactly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Near term (NISQ [12]) devices will likely only sample from the output distribution of the idealized quantum circuits up to (likely large) additive error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Strengthening hardness-of-sampling results of the form described above to this more real-word scenario requires much more tenuous complexity theoretic conjectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In [6], Bravyi, Gosset, and Koenig followed an alternate approach to demonstrating quantum advantage with shallow circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Rather than comparing the computational power of constant-depth quantum circuits to that of general classical circuits, they compared them against similarly restricted (constant-depth) classical circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This allowed for an unconditional separation: in [6] they showed that constant-depth quantum (QNC0) circuits could solve a relational (search) problem that constant-depth, bounded fan-in, classical (NC0) circuits could not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Later work [19, 9] improved on their result to give separations between QNC0 circuits and more powerful classes of constant-depth classical circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Input-Independent Sampling Problems A notable feature of all the problems discussed so far is that they are input-dependent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' That is, they are either search (relational) problems: given x ∈ {0, 1}n output a y ∈ {0, 1}m such that (x, y) ∈ R;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' or sampling problems parameterized by some input: given x ∈ {0, 1}n, provide a sample from the distribution D(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' But it is possible to study hardness of a different type of problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In contrast to the problems discussed above, input-independent sampling problems are problems in which the goal is to sample from a fixed n-bit distribution Dn (given access to uniformly random bits in the classical case, or qubits in the |0⟩ state in the quantum case).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 Existing techniques used in the results for shallow circuit separations, miserably fail in the context of input-independent problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The lack of input-dependent structure requires completely different techniques than those used in [6, 19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' At first glance, it may appear that there is a close connection between relational problems and input- independent sampling problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' If it is hard to map input x to output f(x) in constant-depth, is it also hard to sample from the distribution (X, f(X)) where X is uniform?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Perhaps surprisingly, the answer to this question is no!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To illustrate, consider the parity function, which requires Ω(log n) depth to implement with a classical circuit with unbounded fan-in [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Despite this fact, there is a depth 2 bounded fan-in classical circuit which maps a random string r ∈ {0, 1}n−1 to output (X, parity(X)) for uniformly random X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This circuit is easy to describe: simply map input r to output (r1, r1 ⊕ r2, r2 ⊕ r3, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , rn−2 ⊕ rn−1, rn−1) and check that the output distribution has the desired statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' A similar trick can be used to sample from the distribution (X, PHPn(X)) where PHPn is the Parity Halving Problem, a search problem introduced in [19] which separates QNC0 circuits from constant-depth classical circuits with unbounded fan-in.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Indeed, in contrast to search problems, where lower bounds against constant-depth circuits have a long history [10, 13, 15], lower bounds for input-independent search problems have only been developed recently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Particularly relevant to this paper is a breakthrough result of Viola [17] in which he gave the first example of a distribution which could not be sampled by constant-depth classical circuits with bounded fan-in, even up to additive error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (In a follow up work [18], Viola also gave a distribution which can not be sampled by constant-depth classical circuits with unbounded fan-in.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' While this result is stronger, the techniques used in [18] are less useful in the situation studied here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=') A natural question is whether constant-depth quantum circuits can sample from distributions that clas- sical circuits cannot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Indeed, the authors of [6] asked exactly this question: Question 1 (From [6]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Does there exist a family of quantum circuits {Cn}n∈N such that, for each n ∈ N, any constant-depth classical circuit with bounded fan-in (NC0) with access to uniformly random bits produces a distribution far from the output distribution produced by Cn run on the all zero state?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In the question above we understand close and far in the sense of additive error (or total variation distance).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We quickly review the definition of this distance below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1More formally, the goal, given a family of distributions {Dn} that depend only on n, is to produce a family of circuits {Cn}, each of which samples from the appropriate distribution given random bits as input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2 Problem classical constant unconditional input- hardness depth independent Factoring [14] Poly-time X2 X X Sampling depth-3 quantum circuits [16, 1] Poly-time ✓ X X Random Circuit Sampling [5, 2, 4] Poly-time ✓ X X 2D-HLF [6] NC0 ✓ ✓ X This work NC0 ✓ ✓ ✓ Figure 1: Table comparing a few different computational problems with either conditional or unconditional proof of quantum advantage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Definition 2 (Total Variation Distance, ∆).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The Total Variation Distance (or Statistical Distance) between two distributions D1, D2 over {0, 1}m is ∆(D1, D2) := max T ⊆{0,1}m ���� Pr[D1 ∈ T] − Pr[D2 ∈ T] ���� = 1 2 � a∈{0,1}m ���� Pr[D1 = a] − Pr[D2 = a] ���� (1) Complexity of Quantum States Another motivation for studying input-independent sampling problems comes from questions concerning the circuit complexity of quantum states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The recently solved NLTS conjecture [3] concerned quantum states which cannot be produced by constant depth quantum circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Viola’s work can be seen as studying a classical analog of these states: identifying distributions which cannot be produced by constant depth classical circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Can arguments like Viola’s be extended to the quantum setting?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='3 Indeed, a negative answer to Question 1 (showing certain distributions cannot be produced by constant depth quantum circuits) would immediately also describe a class of quantum states which also cannot be produced by constant depth quantum circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' On the other hand, a positive answer to Question 1 (like the result we will describe shortly) implies that quantum circuits can produce a different class of distributions than constant depth classical circuits, which suggests different techniques are needed to characterize the states these circuits can produce.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 Results The main result of this paper is the following Theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each δ ∈ (0, 1), there exists a family of distributions {Dn} such that for each n ∈ N, Dn is a distribution over {0, 1}n and 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' There exists a constant-depth quantum circuit which takes state |0n⟩ as input and produces a distribution which has total variation distance at most 1/6 + O(n−c) from Dn for some c ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Each classical circuit with fan-in 2 which takes n + nδ random bits as input and has total variation distance at least 1 2 − ω(1/ log n) from Dn has depth Ω(log log n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The distributions Dn constructed are of the form (X, f(X)) for a uniformly random bitstring X and function f : {0, 1}n−1 → {0, 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then a uniformly random bitstring has total variation distance 1/2 from the distribution Dn and the classical lower bound on total variation distance is near-optimal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Considering the family of constant-depth quantum circuits that approximately produce the distributions {Dn}, we get the following Corollary, showing the answer to Question 1 is YES.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Corollary 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' There exists a family of constant-depth quantum circuits {Cn} such that any classical circuit with fan-in 2 which samples from the n-bit output distribution of Cn to within 1/3−ω(1/ log n) additive error has depth Ω(log log n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2Factoring can accomplished in logarithmic depth [8] on a quantum computer or in constant depth on quantum computer with unbounded fanout gates [11] or intermediate measurements [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 3Basic arguments involving lightcones have been used to show certain states cannot be produced by constant depth circuits (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Theorem 16 in [19]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' But Viola’s arguments go well beyond basic lightcone bounds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 3 The distribution used in Theorem 3, is a variation of the distribution (X, majmodp(X)), where the function majmodp (“Majority mod p”) is defined as majmodp(x) = � 0 if |x| < p/2 mod p 1 if |x| > p/2 mod p for each x ∈ {0, 1}n−1, and prime p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (2) Viola introduced majmodp in [17] and showed that the distribution (X, majmodp(X)) is hard to sample from for low-depth classical circuits with bounded fan-in.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Overview of Techniques Before proving Theorem 3, we first prove an analogous result in the setting where we allow the quantum circuit to take as input the GHZn state: |GHZn⟩ = 1 √ 2(|0n⟩ + |1n⟩).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For this setting we consider the distribution (X, majmodp(X) ⊕ parity(X)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Theorem 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each n ∈ N, and δ ∈ (0, 1), there exists a prime p such that 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' There exists a constant-depth quantum circuit that takes the GHZn state as input and produces a distribution which has total variation distance at most 1/6+O(n−c) from (X, majmodp(X)⊕parity(X)) for some c ∈ (0, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Each classical circuit with bounded fan-in which takes n+nδ random bits as input and has total variation distance at least 1 2 − ω(1/ log n) from (X, majmodp(X) ⊕ parity(X)) has depth at least Ω(log log(n)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We construct the corresponding quantum circuit in two steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' First, we construct a pseudo-quantum circuit, which approximately samples from the correct distribution but includes some single-qubit non-unitary operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In the second step, we replace these non-unitary operations with actual unitaries and show that the desired output statistics are preserved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Our classical circuit lower bound techniques are inspired by, and heavily borrow from, Viola’s techniques in [17], where he proves classical circuit lower bounds for various distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Rather than explicitly lower bounding classical circuit depth, Viola proves lower bounds for the locality of functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To illustrate the relationship between locality and circuit depth let f : {0, 1}ℓ → {0, 1}n be a function implemented by a classical circuit attempting to sample from (X, majmodp ⊕ parity(X)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We say that f is d-local if, for each i ∈ [n], the i-th output bit of f(u) depends on at most d bits of the input u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that any circuit with bounded fan-in and depth log(d) can implement a function with locality at most O(d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' And so, to prove a circuit lower bound of Ω(log log n) for sampling from the distribution (X, majmodp ⊕ parity(X)) it suffices to prove that any function with locality at most Ω(logk n) cannot sample from the distribution (X, majmodp ⊕ parity(X)) given access to uniformly random bits as input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Our proof of sampling hardness for (X, majmodp(X) ⊕ parity(X)) closely follows Viola’s original proof of hardness for (X, majmodp(X)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Both arguments begin with the observation that for any d-local function f : {0, 1}ℓ → {0, 1}n there exists a partition of the input u = (x, y) and a permutation of output bits of f(x, y) such that 4: f(x, y) = g1(x1, y) ◦ g2(x2, y) ◦ · · · ◦ gs(xs, y) ◦ h(y), (3) where each gi(xi, y) is a subset (or “block”) of the output bits that are completely determined by y and a single bit of x, and s = Ω(n/d2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, if we fix y, each of the blocks gi are independent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let z ∈ {0, 1}n−1 be the first n − 1 outputs of f(x, y) and let b be the final output bit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We can assume without loss of generality (by absorbing at most one gi into h) that the last output bit is not permuted so that b only depends on y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In order for the function f to sample from the correct distribution the output bits z must be uniformly distributed and, for every input (x, y), we must have majmodp(z) ⊕ parity(z) = b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We note that, after fixing the input bits y, the Hamming weight of z is a sum of independent random variables but b is fixed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then (still following Viola) we show that if many of these independent variables are fixed the output distribution of z will not have sufficiently high entropy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Alternatively, if they are unfixed, the condition majmodp(z)⊕parity(z) = b is unlikely to be satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Making these observations formal completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 4We use “◦” to denote concatenation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 4 In order to extend the sampling separation to a distribution that can be prepared by a constant-depth quantum circuit without a GHZ state as input, we replace the GHZ state in the quantum circuit for Theorem 5 with a “Poor-Man’s GHZ state” (introduced in [19]) defined over a binary tree B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The resulting distribu- tion produced by this circuit is still related to (X, majmodp(X) ⊕ parity(X)) but is more complicated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In particular, it is still of the form (X, MMp(SX) ⊕ parity(X)) where MMp(j) := � 0 if j < p/2 mod p 1 if j > p/2 mod p for j ∈ Z (4) and Sz is a sum of terms that depends on output bits z ∈ {0, 1}n−1 in a complicated way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='5 Unfortunately, we no longer have the nice property that the terms of the sum Sz depend on disjoint bits of z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The main technical challenge for the classical lower bound is accounting for these dependencies within the sum, which is accomplished by carefully fixing additional bits of the input (and therefore output) to recover independence of the unfixed terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2 Reader’s Guide Both of the Theorems mentioned above, Theorem 3 and Theorem 5, consist of 2 parts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We restate each of these parts as separate theorems, each in their own section of the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The following two sections contain the proof of Theorem 5 – the sampling separation in the setting where we allow the quantum circuit to take a GHZ state as input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Section 2 contains the proof of part 1 of Theorem 5, the quantum circuit upper bound, as Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Section 3 contains the proof of part 2 of Theorem 5, the classical circuit lower bound, as Theorem 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In the last two sections, we prove the main result of this paper: Theorem 3, the separation in the sampling power between low-depth quantum and classical circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In Section 4 we prove part 1 of Theorem 3, that there is a quantum circuit that approximately samples from the target distribution as Theorem 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Finally, in Section 5, we prove the classical hardness of sampling from this distribution as Theorem 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2 Sampling from (X, majmodp(X) ⊕ parity(X)) using a GHZ state In this section we consider constant-depth quantum circuits with access to an n-qubit GHZ state as input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We show these circuits can produce samples close to the distribution (X, majmodp(X)⊕parity(X)), where X is a uniformly random bitstring of length n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We will prove this result in two steps – in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 we give a “quantum-like” circuit that samples from the correct distribution but includes non-unitary single-qubit operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2 we show how to replace those non-unitary operations with multi-qubit (but still constant-sized) unitaries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Before beginning these proofs we review some details about GHZ states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Review of GHZ States An n-qubit GHZ state is defined to be the state |GHZn⟩ = 1 √ 2 � |0⟩⊗n + |1⟩⊗n� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (5) It is well-known that applying a Hadamard transform to each qubit of a GHZ state produces a uniform superposition over bitstrings with even Hamming weight: H⊗n |GHZn⟩ = 2−n/2 � e∈En |e⟩ (6) where En is the set containing all even parity n-bit strings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We can equivalently describe this state as a coherent superposition of n − 1 random bits and a final bit whose value equals the parity of the n − 1 other bits, so H⊗n |GHZn⟩ = �n−1 � i=1 CNOTi,n � |+⟩⊗n−1 ⊗ |0⟩ (7) 5In particular, Sz is a sum of parities of sub-strings of z – see Definition 30 for details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 5 where CNOTi,j denotes a CNOT gate controlled on qubit i and applied to qubit j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Equation (7) will be our starting point for designing circuits that use the GHZ state as a resource state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' |+⟩ |+⟩ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' |+⟩ |0⟩ Figure 2: A circuit constructing the state H⊗n |GHZn⟩, as described in Equation (7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 Sampling with non-unitary operations We now consider constant-depth quantum circuits augmented with specific single qubit non-unitary “gates” Aθ, which we will soon define.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We show these circuits can sample (approximately) from the distribution (X, majority(X) ⊕ parity(X)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' While this model is non-physical, introducing it allows us to isolate some key ideas which we will reuse in the fully quantum circuit developed in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' First, for each θ ∈ R, define the (non-unitary) matrix Aθ to be the two-qubit matrix which acts on the computational basis states as Aθ |0⟩ = |0⟩ (8) Aθ |1⟩ = exp(iθX) |1⟩ (9) When drawing circuit diagrams in this section we sometimes include Aθ gates, and understand that they represent the matrix A acting on the qubits indicated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We also sometimes draw A† θ gates, which represent the adjoint of the matrix Aθ acting on the qubits indicated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We now prove the following useful circuit identity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any one qubit state |ψ⟩ and computational basis state |x⟩ with x ∈ {0, 1}, we have ⟨x|2 � A† θ � 2 CNOT2,1 |ψ⟩1 |+⟩2 = 1 √ 2 exp(i(θ + π/2)xX1) |ψ⟩1 (10) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Direct computation gives ⟨x|2 � A† θ � 2 CNOT2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 |ψ⟩1 |+⟩2 = ⟨x|2 exp(iθxX2)CNOT2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 |ψ⟩1 |+⟩2 (11) = ⟨x|2 CNOT2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 exp(iθxX1X2) |ψ⟩1 |+⟩2 (12) = ⟨x|2 CNOT2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 exp(iθxX1) |ψ⟩1 |+⟩2 (13) = exp(i(θ + π/2)xX1) |ψ⟩1 ⟨x|+⟩2 (14) = 1 √ 2 exp(i(θ + π/2)xX1) |ψ⟩1 (15) where we used on the first line that Aθ|x⟩ = exp(iθXx) |x⟩ (16) by definition,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' the commutation relation6 X2CNOT2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 = CNOT2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1X1X2 (17) =⇒ exp(iθX2)CNOT2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 = CNOT2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 exp(iθX1X2) (18) on the second line,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' that |+⟩ is a 1-eigenstate of the X operator on the third line,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' and then the definition of the CNOT gate and the |+⟩ state on the final two lines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Figure 3 gives a diagrammatic version of this proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 6 |ψ⟩ |ψ⟩ = |+⟩ A† θ ⟨x| |+⟩ exp(iθxX) ⟨x| |ψ⟩ exp (iθxXX) = |+⟩ ⟨x| |ψ⟩ exp (iθxX) = |+⟩ ⟨x| |ψ⟩ exp (ix(θ + π/2)X) = |+⟩ ⟨x| Figure 3: A diagrammatic proof of Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The equivalence between each line is explained in the proof of the lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We now prove the main result of this section and construct a constant-depth circuit with a GHZ state as input and Aθ gates which samples approximately from the distribution (X, majmodp(X)) for any p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The construction builds on Lemma 6 as well as the observations about the GHZ state discussed in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each prime number p there is a constant-depth circuit consisting of one and two-qubit unitary gates and Aθ operations which takes a GHZ state as input and produces an output which, when measured in the computational basis, produces an output distribution (X′, Y ) with ∆((X′, Y ), (X, majmodp(X) ⊕ parity(X))) ≤ 1 2 − 1 π + 1 2p + O(p3/2e−n/p2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (19) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We first describe the circuit which, when measured in the computational basis, produces output which correlates with (X, majmodp(X) ⊕ parity(X)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Fix θ = π/p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The circuit takes as input a GHZ state, applies a Hadamard transform to each qubit of the state, then applies a A† θ operation to the first n − 1 qubits in the GHZ state and a exp(−iπX/4) rotation to the final qubit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This circuit is indicated diagrammatically in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To prove this circuit samples (approximately) from the correct distribution we write the (unnormalized) output state of the circuit conditioned on first n − 1 qubits of the circuit being measured in computational 6To prove the implication, use the standard decomposition exp(iθX) = cos(θ)+i sin(θ)X, then commute the resulting terms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 7 H A† π/p ✌✌✌ H A† π/p ✌✌✌ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H A† π/p ✌✌✌ H exp(−iπX/4) ✌✌✌ \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 |GHZn⟩ Figure 4: Constant-depth circuit producing approximate samples from the distribution (X, majmodp(X) ⊕ parity(X)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' basis state |x⟩ = |x1⟩ ⊗ |x2⟩ ⊗ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ⊗ |xn−1⟩ as: ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='n−1 �� A† π/p �⊗n−1 ⊗ exp(−iπX/4) � H⊗n |GHZn⟩ = ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='n−1 �� A† π/p �⊗n−1 ⊗ exp(−iπX/4) � �n−1 � i=1 CNOTi,n � |+⟩⊗n−1 ⊗ |0⟩ (20) = n−1 � i=1 ⟨xi|A† π/p (CNOTi,n)|+⟩i ⊗ exp(−iπX/4) |0⟩n (21) = 2−(n−1)/2 exp � iX � −π 4 + n−1 � i=1 xi �π p + π 2 ��� |0⟩n (22) where we used Equation (7) on the first line, reordered terms on the second (noting that exp(iπX/4)n commutes with CNOTi,n for any i ∈ [n − 1]), and then used Lemma 6 on the third.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' A diagrammatic version of this analysis is given in Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Now, tracing over the final qubit we see the probability of the first n − 1 qubits being measured in any computational basis state |x⟩ is 2−(n−1) so the measurement of the first n − 1 bits produces a uniformly random bit string, as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Additionally, conditioning on bit string x = x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xn−1 being measured, we see the state of the n-th qubit is exp � iX � −π 4 + |x| �π p + π 2 ��� |0⟩n (23) = exp � iX � −π 4 + π p |x| �� |parity(x)⟩n (24) = cos � −π 4 + π p |x| � |parity(x)⟩n + i sin � −π 4 + π p |x| � |1 ⊕ parity(x)⟩n .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (25) Where |x| = �n−1 i=1 xi denotes the Hamming weight of x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Now let Yx be the random variable giving the outcome of a computational basis measurement performed on the n-th qubit, conditioned on a computational basis measurement of the first n−1 bits giving outcome x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We bound the probability that this random variable does not equal parity(x)⊕majmodp(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Straightforward calculation gives that the probability that Yx equals parity(x) is given by Pr[Yx = parity(x)] = cos2 � −π 4 + π p |x| � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (26) It is then easy to see (see Figure 6) that this function is inversely correlated with majmodp(x) (meaning that Yx more likely equals parity(x) when majmodp(x) = 0 and likely does not equal parity(x) when majmodp = 8 H A† π/p ⟨x1| |+⟩ A† π/p ⟨x1| H A† π/p ⟨x2| |+⟩ A† π/p ⟨x2| .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H A† π/p ⟨xn−1| |+⟩ A† π/p ⟨xn−1| H exp(−iπX/4) |0⟩ exp(−iπX/4) \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 |GHZn⟩ |+⟩ ⟨x1| |+⟩ ⟨x2| = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' |+⟩ ⟨xn−1| |0⟩ exp � iX � − π 4 + �n−1 i=1 xi � 2π p + π 2 ��� Figure 5: Diagrammatic analysis of the circuit presented in the proof of Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The first line follows from Equation (7), while the second follows from Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Expanding on this we can bound the average probability that Yx does not equal parity(x)⊕majmodp(x)]: 1 2n−1 � x∈{0,1}n−1 Pr � Yx ̸= parity(x) ⊕ majmodp(x) � ≤ 1 2 − 1 π + 1 2p + O(p3/2e−n/p2) (27) Details of this calculation are given after this proof, in Lemma 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Finally, we bound the total variation distance between the output of the quantum circuit depicted in Figure 4 and the distribution (X, majmodp(X) ⊕ parity(X)) with uniformly random X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let (X′, Y ) be the random variable giving the output of the quantum circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then ∆((X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' majmodp(X) ⊕ parity(X)),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (X′,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Y )) = 1 2 � x∈{0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1}n−1 y∈{0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1} ��� Pr � (X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' majmodp(X) ⊕ parity(X)) = (x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' y) � − Pr[(X′,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Y ) = (x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' y)] ��� (28) = 1 2 � x∈{0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1}n−1 y∈{0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1} ��� Pr[X = x] Pr � majmodp(x) ⊕ parity(x) = y � − Pr[X′ = x] Pr[Yx = y] ��� (29) = 1 2n � x∈{0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1}n−1 y∈{0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1} ��� Pr � majmodp(x) ⊕ parity(x) = y � − Pr[Yx = y] ��� (30) = 1 2n−1 � x∈{0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1}n−1 Pr � Yx ̸= majmodp(x) ⊕ parity(x) � ≤ 1 2 − 1 π + 1 2p + O(p3/2e−n/p2) (31) This completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 9 0 p/4 p/2 3p/4 p 1/2 1 |x| Pr[y = parity(x)] majmodp(|x|) (a) Inverse correlation of Pr[Yx = parity(x)] and majmodp(x) 0 p/4 p/2 3p/4 p 1/2 1 |x| Pr � y ̸= majmodp(x) ⊕ parity(x) � (b) Probability that Yx is incorrect, f(|x|) Figure 6: Plots displaying the correlation of Yx and majmodp(x) ⊕ parity(x) where Yx is the last bit output by the circuit in Figure 4 conditioned on the first n − 1 measurements resulting in string x ∈ {0, 1}n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Lemma 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Define the random variable Yx as in the proof of Theorem 7, so Yx takes values in {0, 1} and Pr[Yx = parity(x)] = cos2 � −π 4 + π p |x| � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (32) Then 2−(n−1) � x∈{0,1}n−1 Pr � Yx ̸= majmodp(x) ⊕ parity(x) � ≤ 1 2 − 1 π + 1 2p + O(p3/2e−n/p2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (33) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let X be a random variable taking value uniformly at random from {0, 1}n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then we have 2−(n−1) � x∈{0,1}n−1 Pr � Yx ̸= majmodp(x) ⊕ parity(x) � = p−1 � k=0 Pr � YX ̸= majmodp(X) ⊕ parity(X) ��|X| = k � Pr[|X| = k] (34) Let f(k) be the probability that our output measurement is incorrect given that the Hamming weight of the first n bits have Hamming weight k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' f(k) := Pr � Y ̸= majmodp(X) ⊕ parity(X) ��|X| = k � (35) It follows from Equation (32), that f(k) = \uf8f1 \uf8f2 \uf8f3 sin2 � − π 4 + π p k � , k ≤ p/2 mod p cos2 � − π 4 + π p k � , k > p/2 mod p (36) which is plotted in Figure 6b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let δ be the total variation distance between |X| mod p and Up, the uniform distribution over {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', p − 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then Pr[|X| = k mod p] ≤ 1 p + δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We can upper bound Equation (34), 10 as Pr � Y ̸= majmodp(X) ⊕ parity(X) � ≤ �1 p + δ � p−1 � k=0 f(k) (37) = �1 p + δ � \uf8eb \uf8ed1 2 + 2 (p−1)/2 � k=1 f(k) \uf8f6 \uf8f8 (38) = �1 p + δ � � 1 2 + 2 � p/2 1/2 f(k) � dk (39) Where in the second line we use the fact that f(k) is symmetric about p/2, so � p−1 2 k=1 f(k) = �p−1 k= p+1 2 f(k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In the third line we used that f(k) is convex over (0, p/2), and therefore �(p−1)/2 i=1 f(k) is a (midpoint-Riemann sum) over-approximation of � p/2 1/2 f(k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Next we evaluate the integral.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' � p/2 1/2 f(k) dk = � p/2 0 sin2 � −π 4 + π p k � dk (40) = � p/2 0 1 2 � 1 + cos �2π p k + π 2 �� dk (41) = 1 2 � k + p 2π sin �2π p k + π 2 ������ p/2 0 (42) = p 4 � 1 − 2 π � (43) Combining this with Equation (39),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' we get the probability we measure an incorrect string is at most Pr � Y ̸= majmodp(X) ⊕ parity(X) � ≤ �1 p + δ � �p 2 � 1 − 2 π � + 1 2 � (44) = 1 2 − 1 π + δp 2 � 1 − 2 π � + 1 2 �1 p + δ � (45) = 1 2 − � 1 π − 1 2p � + O(pδ) (46) All that’s left is to upper bound δ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' the total variation distance between |X| mod p and Up.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For this, we use the following Fact from [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Fact 9 (special case of Fact 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2 in [17]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let (x1, x2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , xt) ∈ {0, 1}n be sampled uniformly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then the total variation distance between �t i=1 xi mod p and Up, the uniform distribution over {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', p − 1} is at most √pe−t/p2 Using this fact, we get the upper bound δ ≤ p1/2e−n/p2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The probability the measured string is incorrect is then Pr � Y ̸= majmodp(X) ⊕ parity(X) � ≤ 1 2 − 1 π + 1 2p + O(p3/2e−n/p2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (47) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2 Removing non-unitary operations We now construct a fully quantum circuit that takes a GHZ state as input and produces a state which, when measured in the computational basis, samples approximately from the distribution (X, majmodp(X)⊕ parity(X)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Our starting point is the non-unitary circuit constructed in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' First, we modify this 11 circuit by replacing the non-unitary Aθ gates with a different set of non-unitary gates, and show the classical distributions output by the two circuits after measurement are identical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then we show these new non- unitary gates are close to unitary gates, and hence the circuit can be made fully unitary with minimal change to the output distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 Introducing multi-qubit non-unitary operations We start by defining the m-qubit non-unitary operation Aθ,m whose action on the m qubit basis state |x⟩ = |x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm⟩ is given by: Aθ,m |x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm⟩ = exp(iθxm) |x1⟩ ⊗ exp(iθx1) |x2⟩ ⊗ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ⊗ exp(iθxm−1) |xm⟩ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (48) Intuitively, we can think of the Aθ,m operation as consisting of m distinct Aθ operations, just with the qubits they act on “shifted” away from the qubits controlling the gate by 1 modulo m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Now we observe that, in certain situations, an Aθ,m operation can replace a tensor product of m different Aθ operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Lemma 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any m-qubit computational basis state |x⟩ = |x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm⟩ and arbitrary one qubit state |ψ⟩, the following equivalence holds: ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='m � A† θ,m � 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='m � m � i=1 CNOTi,m+1 � |+⟩⊗m ⊗ |ψ⟩ = ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='m � m � i=1 � A† θ � i CNOTi,m+1 � |+⟩⊗m ⊗ |ψ⟩ (49) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The proof is similar to the proof of Lemma 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In what follows we identify indices mod m so, in particular, we have x0 = xm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then we see: ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='m � A† θ,m � 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='m \uf8eb \uf8ed m � j=1 CNOTj,m+1 \uf8f6 \uf8f8 |+⟩⊗m ⊗ |ψ⟩ = ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='m \uf8eb \uf8ed m � j=1 exp(iθXjxj−1)CNOTj,m+1 \uf8f6 \uf8f8 |+⟩⊗m ⊗ |ψ⟩ (50) = ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='m \uf8eb \uf8ed m � j=1 CNOTj,m+1 exp(iθXjXm+1xj−1) \uf8f6 \uf8f8 |+⟩⊗m ⊗ |ψ⟩ (51) = ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='m \uf8eb \uf8ed m � j=1 CNOTj,m+1 \uf8f6 \uf8f8 |+⟩⊗m ⊗ exp \uf8eb \uf8ediθX m � j=1 xj−1 \uf8f6 \uf8f8 |ψ⟩ (52) = ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='m \uf8eb \uf8ed m � j=1 CNOTj,m+1 \uf8f6 \uf8f8 |+⟩⊗m ⊗ exp \uf8eb \uf8ediθX m � j=1 xj \uf8f6 \uf8f8 |ψ⟩ (53) = ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='m \uf8eb \uf8ed m � j=1 exp(iθXjxj)CNOTj,m+1 \uf8f6 \uf8f8 |+⟩⊗m ⊗ |ψ⟩ (54) = ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='m \uf8eb \uf8ed m � j=1 � A† θ � j CNOTj,m+1 \uf8f6 \uf8f8 |+⟩⊗m ⊗ |ψ⟩ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (55) Here the first line follows from the definition of Aθ,m, the second line follows from commuting an exp(iθX) gate past a CNOT gate as in the proof of Lemma 6, the third line follows because |+⟩ is a 1 eigenstate of the X operator and the fourth line follows from a simple relabeling of indices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The fifth line follows from applying the same argument as in the second and third lines, just in the reverse direction, and the sixth line follows by definition of Aθ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Figure 7 gives a diagrammatic version of this proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 12 A straightforward consequence of Lemma 10 and the arguments of Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 is that constant-depth quantum circuits augmented with Aθ,m gates and acting on a GHZ state can also approximately sample from the distribution (X, majmodp(X) ⊕ parity(X)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Corollary 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let m and D be integers, and n = Dm + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then the state �� A† π/p,m �⊗D ⊗ exp(−iπX/4) � H⊗n |GHZn⟩ , (56) when measured in the computational basis, produces an output distribution (X′, Y ) with ∆((X′, Y ), (X, majmodp(X) ⊕ parity(X))) ≤ 1 2 − 1 π + 1 2p + O(p3/2e−n/p2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (57) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' By Lemma 10 and Equation (7) we have �� A† π/p,m �⊗D ⊗ exp(−iπX/4) � H⊗n |GHZn⟩ = �� A† π/p,m �⊗D ⊗ exp(−iπX/4) � �n−1 � i=1 CNOTi,n � |+⟩⊗n−1 ⊗ |0⟩ (58) = �� A† π/p �⊗n−1 ⊗ exp(−iπX/4) � �n−1 � i=1 CNOTi,n � |+⟩⊗n−1 ⊗ |0⟩ (59) = �� A† π/p �⊗n−1 ⊗ exp(−iπX/4) � H⊗n |GHZn⟩ (60) In the proof of Theorem 7 we show this state, when measured in the computational basis, is close to the distribution (X, majmodp(X) ⊕ parity(X)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2 Replacing multi-qubit non-unitary operations with unitary operations In this section, we construct a fully unitary circuit which takes a GHZ state as input and produces an output which, when measured in the computation basis, samples for a distribution close in Total Variation Distance to the distribution (X, majmodp(X)⊕parity(X)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We do this by proving that we can replace the non-unitary operations Am,θ introduced in the previous section with unitary operations while causing minimal change to a circuit using these elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To make these statements formal, we first recall some definitions and useful standard facts about matrix norms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Definition 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The Frobenius norm of a matrix M, denoted ∥M∥F , is defined by ∥M∥F = � tr[M ∗M] (61) Definition 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The infinity (or operator) norm of a matrix M, denoted ∥M∥∞, is defined by ∥M∥∞ = max |ψ⟩:∥|ψ⟩∥=1 ∥M |ψ⟩∥, (62) where ∥|ψ⟩∥ denotes the regular Euclidean norm of any vector |ψ⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Fact 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any matrix M, the Frobenius norm upper bounds the operator norm ∥M∥∞ ≤ ∥M∥F .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (63) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For an arbitrary matrix M, let λ1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', λd denote the eigenvalues of M ∗M, with λ1 ≥ λ2 ≥ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='λd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note all λi are positive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then we have ∥M∥2 ∞ = λ1 ≤ d � i=1 λi = ∥M∥2 F (64) as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 13 |+⟩ A† θ,m ⟨x1| |+⟩ exp(iθXxm) ⟨x1| |+⟩ ⟨x2| |+⟩ exp(iθXx1) ⟨x2| .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' |+⟩ ⟨xm| |+⟩ exp(iθXxm−1) ⟨xm| |ψ⟩ |ψ⟩ |+⟩ ⟨x1| |+⟩ ⟨x2| = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' |+⟩ ⟨xm| |ψ⟩ exp � iθX �m j=1 xj � |+⟩ exp(iθXx1) ⟨x1| |+⟩ exp(iθXx2) ⟨x2| = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' |+⟩ exp(iθXxm) ⟨xm| |ψ⟩ |+⟩ A† θ ⟨x1| |+⟩ A† θ ⟨x2| = .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' |+⟩ A† θ ⟨xm| |ψ⟩ Figure 7: Diagrammatic proof of Lemma 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' |ψ⟩ is an arbitrary single qubit state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The equivalence between lines is explained in the proof of the lemma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 14 Fact 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Given matrices A1, A2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='As and B1, B2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', Bs with ∥Ai − Bi∥∞ ≤ ǫ, (65) ∥Ai∥ ≤ 1 (66) for all i ∈ [s], and sǫ < 1, (67) we also have ������ � i∈[s] Ai − � i∈[s] Bi ������ ∞ ≤ 2sǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (68) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' First note that ∥M∥∞ is equal to the largest singular value of the matrix M, from which it follows that ∥M ⊗ N∥∞ = ∥M∥∞∥N∥∞ (69) for any matrices M and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then an inductive argument gives ����� s � i=1 Ai − s � i=1 Ai ����� ∞ = ����� � i=1s Ai − B1 s � i=2 Ai + B1 s � i=2 Ai − s � i=1 Bi ����� ∞ (70) ≤ �����(A1 − B1) s � i=2 Ai ����� + �����B1 ⊗ � s � i=2 Ai − s � i=2 Bi ������ (71) ≤ ǫ + (1 + ǫ) ����� s � i=2 Ai − s � i=2 Bi ����� (72) = ǫ + (1 + ǫ)(2ǫ(s − 1)) ≤ 2sǫ (73) as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Fact 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Given two states |ρ⟩ and |σ⟩, let p(x) and q(x) denote the resulting classical distributions when |ρ⟩ and |σ⟩ are measured in some basis {|x⟩}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then we have � x |p(x) − q(x)| ≤ 4∥|ρ⟩ − |σ⟩∥ (74) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' First, we note that for any two states |ρ⟩ and |σ⟩ and PSD matrix M ≤ I we have 2∥|ρ⟩ − |σ⟩∥ ≥ 2∥M(|ρ⟩ − |σ⟩)∥ (75) ≥ 2 (∥M |ρ⟩∥ − ∥M |σ⟩∥) (76) ≥ (∥M |ρ⟩∥ − ∥M |σ⟩∥) (∥M |ρ⟩∥ + ∥M |σ⟩∥) (77) = ∥M |ρ⟩∥2 − ∥M |σ⟩∥2 (78) Then defining probability distributions p(x) and q(x) and the basis {|x⟩} as above, let Px := {x : p(x) ≥ q(x)} (79) and Mx = � x∈Px |x⟩⟨x| .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (80) 15 Then note ∥Mx |ρ⟩∥2 − ∥Mx |σ⟩∥2 = � x∈Px |⟨x|ρ⟩|2 − |⟨x|σ⟩|2 (81) = � x∈Px (p(x) − q(x)) (82) = 1 2 � x |p(x) − q(x)| (83) with the final inequality holding because both p(x) and q(x) must sum to one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Combining the two inequalities above proves the result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Next, we recall the definition of the matrix Am,θ in terms of its action on computational basis states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Am,θ |x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm⟩ := exp(iθXxm) |x1⟩ ⊗ exp(iθXx1) |x2⟩ ⊗ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ⊗ exp(iθXxm−1) |xm⟩ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (84) The matrix Am,θ would be a unitary matrix iff it mapped computational basis states to some set of orthonor- mal basis states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='7 The following lemma shows that this condition is close to being satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In what follows, for any bitstring x = x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm ∈ {0, 1}m we let x denote the bitwise compliment of x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We also interpret all subscripts in the remainder of this section mod m so, in particular, x0 = xm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Lemma 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any θ ∈ R, m ∈ Z+ and x = x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm ∈ {0, 1}m the matrix Aθ,m satisfies the following properties: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ⟨x|A† θ,mAθ,m|x⟩ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ⟨x|A† θ,mAθ,m|x⟩ = −im+2|x| sinm(θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ⟨y|A† θ,mAθ,m|x⟩ = 0 for any y ∈ {0, 1}m\\{x, x}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The proof of Items 1 and 2 are purely computational.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any x = x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm ∈ {0, 1}m we have ⟨x| A† m,θAm,θ |x⟩ = � j∈[m] ⟨xj| exp(−iθxj−1) exp(iθxj−1) |xj⟩ (85) = � j∈[m] ⟨xj|xj⟩ = 1, (86) proving Item 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' A similar calculation gives ⟨x|A† m,θAm,θ|x⟩ = � j∈[m] ⟨xj|exp(−iθXxj) exp(iθXxj)|xj⟩ (87) = � j∈[m] ⟨xj|exp � i1+2xjθX � |xj⟩ (88) = � j∈[m] ⟨xj|cos(θ) + i1+2xj sin(θ)X|xj⟩ (89) = � j∈[m] i1+2xj sin(θ) (90) = im+2|x| sinm(θ) (91) = −im+2|x| sinm(θ), (92) where we used that X |xj⟩ = |xj⟩ by definition of the compliment on the fourth line and that |x| + |x| = m for any x in the final line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This proves Item 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 7More generally it is unitary iff it maps any set of orthonormal basis states to some other orthornomal basis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 16 To prove Item 3 note that for any m bit strings x and y with x /∈ {y, y} there exists a k ∈ [m] with xk−1 = yk−1 and xk ̸= yk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Fixing k to be that value we find: ⟨y|A† m,θAm,θ|x⟩ = m � j=1 ⟨xj|exp(−iθXyj−1) exp(iθXxj−1)|yj⟩ (93) = ⟨yk|exp(iθX(xk − yk))|xk⟩ × � j∈[m]\\{k} ⟨yj|exp(iθX(xj−1 − yj−1))|xj⟩ (94) = ⟨yk|xk⟩ × � j∈[m]\\{k} ⟨yj|exp(iθX(xj−1 − yj−1))|xj⟩ (95) = 0 (96) since yk ̸= xk by definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This competes the proof of Item 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We show that, as a consequence of Lemma 17, there exists an m qubit unitary matrix which is close (in Frobenius norm) to the non-unitary matrix Aθ,m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We construct this unitary by applying Gram-Schmidt orthnomalization applied to the state’s output by Am,θ acting on computational basis states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Lemma 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any m, there exists unitary matrices Um,θ satisfying ∥Am,θ − Um,θ∥F ∈ O � θ−m� (97) as θ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We will define Um,θ by its action on computational basis states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' First, fix Bm to be any set containing half the bit strings of length m with the property that for any x ∈ {0, 1}m either x ∈ Bm or x ∈ Bm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (That is, Bm contains one representative element from the equivalence classes of the set {0, 1}m induced by the equivalence relation x ∼ y if x = y or x = y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then define: Um,θ |x⟩ := � Am,θ |x⟩ if x ∈ Bm C−1 � Am,θ |x⟩ + im+2|x| sinm(θ)Am,θ |x⟩ � otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (98) with C := � 1 − sin2m(θ) a normalizing constant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Observe that, by Item 2 of Lemma 17, for x /∈ Bm we can also write Um,θ |x⟩ = C−1 � Am,θ |x⟩ − ⟨x|A† m,θAm,θ|x⟩ Am,θ |x⟩ � (99) and C = � 1 − ��� ⟨x|A† m,θAm,θ|x⟩ ��� 2�1/2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (100) We now prove that Um,θ is unitary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To do this, we prove Um,θ maps computational basis states to an orthonormal basis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' First note that Item 1 of Lemma 17 gives that for any x ∈ Bm: ⟨x|U † m,θUm,θ|x⟩ = ⟨x|A† m,θAm,θ|x⟩ = 1 (101) while a similar calculation gives for any x /∈ Bm: ⟨x|U † m,θUm,θ|x⟩ = C−2 � ⟨x| A† m,θ − ⟨x|A† m,θAm,θ|x⟩† ⟨x| A† m,θ � � Am,θ |x⟩ − ⟨x|A† m,θAm,θ|x⟩ Am,θ |x⟩ � (102) = C−2 � 1 − ��� ⟨x|A† m,θAm,θ|x⟩ ��� 2� = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (103) 17 Where we used Equations (99) and (100) on the first and second lines, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then we see the states {Um,θ |x⟩} for x ∈ {0, 1}m acting on computational basis states are correctly normalized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' It remains to show that these states are orthogonal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' First, we note that Item 3 of Lemma 17 gives that for any x, y ∈ {0, 1}m with y /∈ {x, x} we have ⟨y|A† θ,mAθ,m|x⟩ = ⟨y|A† θ,mAθ,m|x⟩ = ⟨y|A† θ,mAθ,m|x⟩ = ⟨y|A† θ,mAθ,m|x⟩ = 0 (104) and then a quick proof by cases shows that ⟨y|U † θ,mUθ,m|x⟩ = 0 for any x ∈ {0, 1}m and y /∈ {x, x}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Finally, we consider the inner product ⟨x|U † θ,mUθ,m|x⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' By definition of Bm, exactly one of x or x is in Bm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Assume for the moment that x /∈ Bm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then using Equation (99) we have ⟨x|A† θ,mAθ,m|x⟩ = C−1 � ⟨x| A† m,θ � � Am,θ |x⟩ − ⟨x|A† m,θAm,θ|x⟩ Am,θ |x⟩ � (105) = C−1 � ⟨x|A† m,θAm,θ|x⟩ − ⟨x|A† m,θAm,θ|x⟩ ⟨x|A† m,θAm,θ|x⟩ � (106) = C−1 � ⟨x|A† m,θAm,θ|x⟩ − ⟨x|A† m,θAm,θ|x⟩ � = 0 (107) as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We conclude Um,θ is unitary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Finally, to show Um,θ is close to Am,θ we compute ∥Am,θ − Um,θ∥2 F = � x∈{0,1}m |(Am,θ − Um,θ) |x⟩|2 (108) = � x∈Bm ��� � 1 − C−1� Am,θ |x⟩ − im+2|x|C−1 sinm(θ)Am,θ |x⟩ ��� 2 (109) ≤ � x∈Bm � 1 − C−1�2 + C−2 sin2m(θ) (110) ≤ 2m/2 �sin4m(θ) 2 + sin2m(θ) 1 − sin2m(θ) � ∈ O � θ2m� (111) where the final big O approximation holds for any fixed m as θ → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Taking a square root then completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Finally, we are in a position to describe the fully unitary (X, majmodp(X) ⊕ parity(X)) sampling circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Theorem 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For n sufficiently large and p = nc for any constant c ∈ (0, 1/2) there is a constant-depth circuit consisting of one and two qubit unitary gates and Um′,θ′ gates with m′ = ⌈c−1+1⌉ and θ′ = π/p which takes an n qubit GHZ state as input and produces an output which, when measured in the computational basis, produces an output distribution (X′, Y ) with ∆((X′, Y ), (X, majmodp(X) ⊕ parity(X))) ≤ 1 2 − 1 π + O(1/p).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (112) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For convenience, we assume n = Dm′ + 1 for some constant D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This circuit consists of a Hadamard gate applied to each qubit of the GHZ state, followed by U † m′,θ′ gates applied to all qubits except the final qubit and an exp(−iπX/4) rotation applied to the final qubit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Figure 8 illustrates this circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note the quantum state produced by this circuit pre-measurement is �� U † θ′,m′ �⊗D ⊗ exp(−iπX/4) � H⊗n |ψ⟩ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (113) To prove this circuit samples from the correct distribution first note that Lemma 18 and Fact 14 give that ��Uπ/p,m − Aπ/p,m �� ∞ ∈ O(θ′m) = O(n−mc) ≤ O(n−(1+c)) (114) 18 H U † m′,θ′ ✌✌✌ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H ✌✌✌ H U † m′,θ′ ✌✌✌ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H ✌✌✌ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H U † m′,θ′ ✌✌✌ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H ✌✌✌ H exp(−iπX/4) ✌✌✌ \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 |GHZn⟩ Figure 8: Constant-depth fully unitary circuit producing approximate samples from the distribution (majmodp(X) ⊕ parity(X), X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Here p = nc for some constant c ∈ (0, 1], θ′ = π/p, m′ = � c−1 + 1 � and n = Dm′ + 1 for some large integer D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Them, Fact 15 gives that ���� �� U † θ′,m′ �⊗D ⊗ exp(−iπX/4) � H⊗n − �� A† π/p,m �⊗D ⊗ exp(−iπX/4) � H⊗n ���� ∞ ∈ O(Dn−(1+c)) (115) ≤ O(n−c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (116) Combining this observation with Fact 16 and the definition of the operator norm ∥∥∞ gives that the classical distributions resulting from computation basis measurements of the states �� U † θ′,m′ �⊗D ⊗ exp(−iπX/4) � H⊗n |ψ⟩ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (117) and �� A† π/p,m �⊗D ⊗ exp(−iπX/4) � H⊗n |ψ⟩ (118) are O(n−c) in total variation distance away from each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then Corollary 11, together with the fact that O(p3/2e−n/p2) ≤ O(1/p) since p = n−c for c < 1/2 completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 3 Classical Hardness of sampling (X, majmodp(X) ⊕ parity(X)) In this section we prove the classical hardness of sampling from the distribution (X, majmodp(X)⊕parity(X)) for each prime number p, where X is sampled from the uniform distribution over {0, 1}n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Recall that the total variation distance distributions D1, D2 over {0, 1}m is ∆(D1, D2) := max T ⊆{0,1}m ���� Pr[D1 ∈ T] − Pr[D2 ∈ T] ���� (119) By the definition of ∆, each set T ⊆ {0, 1}m, witnesses a lower bound on ∆(D1, D2) of �� Pr[D1 ∈ T] − Pr[D2 ∈ T] ��.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To prove a lower bound on ∆(D1, D2), we construct a particular T ∈ {0, 1}m and refer to it as our statistical test, and we say that Di “passes” the statistical test with probability Pr[Di ∈ T].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 19 We are interested in the total variation distance between the true distribution D = (X, majmodp(X) ⊕ parity(X)), and the output distribution of some local function f : {0, 1}ℓ → {0, 1}n+1 that takes a uniformly random ℓ-bit string U as input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' That is, we aim to lower bound ∆(f(U), D).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We prove such a lower bound in the following theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Theorem 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For all δ < 1 there exists an ǫ > 0 such that for all sufficiently large n and prime number p = Θ(nα) for α ∈ (δ/3, 1/3): Let f : {0, 1}ℓ → {0, 1}n+1 be an ǫ log(n)-local function, with ℓ ≤ n + nδ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then ∆(f(U), (X, majmodp(X) ⊕ parity(X))) ≥ 1/2 − O(1/ log n) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This proof follows closely to the analogous proof for (X, majmodp(X)) in [17], with similar notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let d be the locality of f, d = ǫ log(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We start by permuting the outputs, as shown in [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that denotes concatenation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Lemma 21 ([17]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' There exists a partition of the input u ∈ {0, 1}ℓ into u = (x, y), and permutation of the output bits such that f(x, y) = g1(x1, y) ◦ g2(x1, y) ◦ · · · ◦ gs(xs, y) ◦ h(y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (120) With gi : {0, 1} × {0, 1}ℓ−s → {0, 1}|Bi|, |Bi| ≤ O(d) and s ≥ Ω(n/d2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We will refer to each gi(xi, y) as the ith block of the output, indexed by Bi ⊆ [n + 1] in the initial permutation, for i ∈ [s].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that if we fix y, each block is independent, and block i ∈ [s] only depends on xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We say that gi is y-fixed for some y ∈ {0, 1}ℓ−s if gi(0, y) = gi(1, y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Without loss of generality, and for simplicity of notation, let’s assume that the last output bit does not get permuted, so f(x, y)n+1 is still the output bit which should (ideally) correspond to majmodp ⊕ parity of the first n outputs, and that it only depends on y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Next we define our statistical test.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Statistical Test: Let N0 := 3n3α,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' NF := 2n3α,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' we define our statistical test as T := T0 ∪ TF ∪ TS,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' with T0 := {z ∈ {0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1}n+1 : zBi = 0|Bi| for ≤ N0 blocks i ∈ [s]} (121) TF := {z ∈ {0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1}n+1 : ∃(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' y) : f(x,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' y) = z and ≥ NF blocks gi(xi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' y) are y-fixed} (122) TS := {(z′,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' b) ∈ {0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1}n × {0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1} : b ̸= majmodp(z′) ⊕ parity(z′)} (“incorrect strings”) (123) We will show that f(U) passes the statistical test (f(U) ∈ T ) with probability at least 1/2 − O(1/ log n) and (X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' majmodp(X) ⊕ parity(X)) passes with probability at most 1/n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since both of the functions majmodp and parity only depend on the Hamming weight of their input, it is useful to define MMp and PAR as functions over integers, such that majmodp(z) = MMp(|z|) and parity(z) = PAR(|z|) for any z ∈ {0, 1}n, where we use | · | to denote Hamming weight |z| = �n i=1 zi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' MMp(j) := � 0 if j < p/2 mod p 1 if j > p/2 mod p , PAR(j) := j mod 2, for j ∈ Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (124) Upon fixing y, the Hamming weight |f(x, y)|1:n is a sum of independent random variables |gi(xi, y)| which take on at most 2 different values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The following Fact, Corollary, and Lemma will be useful in analyzing this independent sum of random variables in the context of the majmodp ⊕ parity function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Fact 22 (Fact 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2 in [17]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let a1, a2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' at be nonzero integers modulo p, and let (x1, x2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , xt) ∈ {0, 1}n be sampled uniformly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then the total variation distance between �t i=1 aixi mod p and Up, the uniform distribution over {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', p − 1} is at most √pe−t/p2 Corollary 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each prime p = Θ(nα) with α < 1, t = Ω(p3), a0, a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' at nonzero integers modulo p, and A ⊆ {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='p − 1} |A| p − O(1/n) ≤ Pr x∈{0,1}t � a0 + t � i=1 aixi ∈ A � ≤ |A| p + O(1/n) (125) 20 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' By the definition of total variation distance, it is sufficient to prove that ∆(Up, a0 + �t i=1 aixi) ≤ O(1/n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ∆(Up, a0 + t � i=1 aixi) ≤ √pe−t/p2 = √pe−Ω(p) = Θ(nα/2)e−Ω(nα) ≤ O(1/n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (126) Lemma 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each α ∈ (0, 1), and prime number p = Θ(nα), define the sums S = a0 + �t i=1 aixi and U = u0 + �t i=1 uixi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Also let t = Ω(p3) and a0, a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , at and u0, u1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , ut be integers with 0 < ai ≤ O(p/ log n) for each i ∈ [t].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then Pr x [MMp(S) ⊕ PAR(U) = b] ≥ 1 2 − O(1/ log n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (127) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let’s consider the case that at least 1/2 of the ui for i ∈ [t] are even.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then we arbitrarily fix all xi such that ui is odd, and let E = {i ∈ [t] : ui even}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that now the parity is fixed to c := PAR(u0 + � i∈[t]\\E uixi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let a′ i = aEi for each i ∈ {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', |E|}, and a′ 0 = a0 + � i/∈E ai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr xE [MMp(S) ⊕ PAR(U) = b] = Pr r∈{0,1}|E| \uf8ee \uf8f0majmodp(a′ 0 + |E| � i=1 a′ iri) ⊕ c = b \uf8f9 \uf8fb (128) = Pr r \uf8ee \uf8f0a′ 0 + |E| � i=1 a′ iri ∈ Mc⊕b \uf8f9 \uf8fb (129) Where M0 = {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', (p − 1)/2} and M1 = {(p + 1)/2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', p − 2, p − 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since |M0| = (p + 1)/2, |M1| = (p − 1)/2, and |E| = Θ(nα), it follows from Corollary 23 that Pr xE [MMp(S) ⊕ PAR(U) = b] ≥ (p − 1)/2p − O(1/n) = 1/2 − O(1/nα).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (130) All that’s left is to consider the case where more than half of the ui for i ∈ [t] are odd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In this case we will fix xi for each i ∈ [t] with ui even, setting a′ 0 := a0 + � i∈E Si, and u′ 0 = u0 + � i∈E ui.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We denote the set of indices of such “odd” elements as O = {i ∈ [t] : ui odd}, and set a′ i = aOi and u′ i = uOi for each i ∈ [|O|].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that since each u′ i is odd, we have PAR(u′ 0 + � i≤t u′ iri) = u′ 0 + (parity(r1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , r|O|)) mod 2, which is denoted as parity(r) ⊕ u′ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr xO [MMp(S) ⊕ PAR(U) = b] = Pr r∈{0,1}|O| \uf8ee \uf8f0majmodp � a′ 0+ � i≤t a′ iri � ⊕ parity(r) = b ⊕ u′ 0 \uf8f9 \uf8fb (131) =1 2 Pr r \uf8ee \uf8f0MMp � a′ 0+ � i≤t a′ iri � = b ⊕ u′ 0 ����parity(r) = 0 \uf8f9 \uf8fb (132) + 1 2 Pr r \uf8ee \uf8f0MMp � a′ 0 + � i≤t a′ iri � ̸= b ⊕ u′ 0 ����parity(r) = 1 \uf8f9 \uf8fb (133) Sampling a uniformly random t bit string z1z2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' zt with even Hamming weight is equivalent to sampling the first t − 1 bits uniformly at random, and setting the last bit to zt = parity(z1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , zt−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' So the equation above is equal to =1 2 Pr r1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='rt−1 \uf8ee \uf8f0majmodp � a′ 0+ |O|−1 � i=1 a′ iri + a′ t · parity(r1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , rt−1) � = b ⊕ u′ 0 \uf8f9 \uf8fb (134) + 1 2 Pr r1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='rt−1 \uf8ee \uf8f0majmodp � a′ 0 + |O|−1 � i=1 a′ iri + a′ t · parity(1, r1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , rt−1) � ̸= b ⊕ u′ 0 \uf8f9 \uf8fb .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (135) 21 For any positive integers z1, z2, l, r such that l < r and r − l − z2 ≥ 0, if Z2 is a positive random variable such that Z2 ≤ z2, then Pr[z1 + Z2 ∈ [l, r]] ≥ Pr[z1 ∈ [s, t − z2]].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, with all addition done modulo p, we lower bound the above expression as ≥1 2 Pr \uf8ee \uf8f0a′ 0 + |O|−1 � i=1 a′ iri ∈ [0, p/2 − a′ |O|) \uf8f9 \uf8fb + 1 2 Pr \uf8ee \uf8f0a′ 0 + |O|−1 � i=1 a′ iri ∈ (p/2, p − 1 − a′ |O|] \uf8f9 \uf8fb (136) ≥ 1 2p((p + 1)/2 − a′ |O| + (p − 1)/2 − a′ |O|) − O(1/n) (137) =1 2 − a′ |O| 2p − O(1/n) (138) =1 2 − O(p/ log n) 2p − O(1/n) ≥ 1 2 − O(1/ log n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (139) Where we used Corollary 23, and the Lemma assumption that 0 < ai ≤ p/ log n for each i ∈ [t] and p = Θ(nα).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We are now ready to prove the following claims.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Claim 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr[f(U) ∈ T] ≥ 1/2 − O(1/ log n) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We will show that for each y, Prx[f(x, y) ∈ T ] ≥ 1/2 − 1/ log n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Suppose we fix y arbitrarily.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' If y fixes at least NF , blocks gi(xi, y), then Prx[f(x, y) ∈ TF ] = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Moreover, if there are ≤ N0 blocks gi such that gi(xi, y) = 0|Bi| for some xi ∈ {0, 1}, then for each x, there will also be ≤ N0 blocks with gi(xi, y) = 0|Bi|, so Prx[f(x, y) ∈ T0] = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, we assume that there are < NF blocks gi that are y-fixed, and > N0 blocks with gi(xi, y) = 0|Bi| for some x ∈ {0, 1}s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Thus, there are more than N0 − NF = n3α blocks gi such that for some xi ∈ {0, 1}, gi(xi, y) = 0|Bi| and gi(1 − xi, y) ̸= 0|Bi|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let J ⊆ [s] denote this subset of blocks, with |J| ≥ n3α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We arbitrarily fix the xi for i ∈ [s] \\ J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Now, the total Hamming weight of the first n bits of f(x, y) (denoted as |f(x, y)1:n|) only depends on the xi for i ∈ J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let Si denote the Hamming weight of the ith block for each i ∈ [s].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that for each i ∈ J, Si = 0 with probability 1/2, and Si is some positive integer modulo p, with probability 1/2, since |Bi| ≤ O(d) = O(ǫ log n) < p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Moreover, for each i ∈ [s] \\ J, Si is fixed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, |f(x, y)1:n| = a + � j∈J |gi(xi, y)| = a + � i∈J Si (140) for some positive integer a that does not depend on {xi}i∈J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since the last bit b := f(x, y)n+1 is fixed, the correctness of the output is determined by the majmodp and parity of f(x, y)1:n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We have that f(x, y) ∈ TS ⇐⇒ MMp(a + � i∈J Si) ⊕ PAR(a + � i∈J Si) ̸= b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that we can write a + � i∈J Si = a + � i≤|J| airi for some uniformly random r ∈ {0, 1}|J|, and for each ai a fixed positive integer mod p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, Pr xJ [f(x, y) ∈ TS] = Pr r∈{0,1}|J|[majmodp(a + |J| � i=1 airi) ⊕ PAR(a + |J| � i=1 airi) ̸= b].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (141) Furthermore, each ai is at most O(d) = O(ǫ log n) since |Bj| ≤ O(d) for each j ∈ [s].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' By Lemma 24, it immediately follows that PrxJ[f(x, y) ∈ TS] ≥ 1 2 − O(1/ log n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In conclusion, we’ve showed that after arbitrarily fixing y, Prx[f(x, y) ∈ T ] ≥ 1 2 − O(1/ log n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, Prx,y[f(x, y) ∈ T ] ≥ 1 2 − O(1/ log n), as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Claim 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr � (X, majmodp(X) ⊕ parity(X)) ∈ T � ≤ O(1/n) 22 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This proof follows that of Claim 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='3 in [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let D := (X, majmodp(X) ⊕ parity(X)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' By the union bound Pr[D ∈ T] ≤ Pr[D ∈ T0] + Pr[D ∈ TF ] + Pr[D ∈ TS].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Clearly Pr[D ∈ TS] = 0, since TS is the set of invalid strings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, it is sufficient for us to show that Pr[D ∈ TF ], Pr[D ∈ T0] ≤ 1 2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr[D ∈ TF ] = |TF|/2n, so it is sufficient to upper bound |TF|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Recall that z ∈ TF if z = f(x, y) for some x, y such that at least NF blocks are y-fixed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Thus each z ∈ TF is characterized by y, and the bits of x that do not belong to fixed blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' That is, we need at most ℓ − NF bits to characterize z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since ℓ ≤ n + nδ and NF = 2n3α, |TF | ≤ 2n+nδ−2n3α (142) ≤ 2n−n3α (143) since δ < 3α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' So Pr[D ∈ TF] ≤ 2−n3α ≤ 1 2n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (144) All that’s left is to bound Pr[D ∈ T0], the probability that at most N0 = 3n3α blocks i are all zero, DBi = 0|Bi|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since the first n bits of D are independently random, the probability that the block DBi is all zero is independent of other blocks DBj for i ̸= j ∈ [s].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The probability that block i ∈ [s] is all zero is Pr � DBi = 0|Bi|� = (1/2)|Bi| ≥ (1/2)O(d) = (1/2)O(ǫ log n) = � 1 n �O(ǫ) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (145) Now, the probability that at most N0 = 3n3α are all zero is Pr[D ∈ T0] = Pr \uf8ee \uf8ef\uf8ef\uf8f0 � T ⊆[s]: |T |=N0 {DBi ̸= 0|Bi| for each i ∈ [s] \\ T } \uf8f9 \uf8fa\uf8fa\uf8fb (146) ≤ � s N0 � � 1 − 1 nO(ǫ) �s−N0 (147) ≤ � s N0 � e− s−N0 nO(ǫ) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (148) Since s ≥ Ω(N/d2) = Ω( n ǫ2 log2 n), s ≤ n and N0 = 3n3α, ≤ � n 3n3α � e−n−O(ǫ)( n ǫ2 log2 n −3n3α) (149) ≤ � n 3n3α �3n3α e−n1−O(ǫ)/ log2 ne3n3α (150) ≤ n3n3αe−n1−O(ǫ)/ log2 n (151) ≤ 1 2n (152) for sufficiently large n and small ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In conclusion, Pr[D ∈ T] ≤ 1 n, as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Using Claims 25 and 26, we can lower bound the total variation distance between the target distribution D = (X, majmodp(X) ⊕ parity(X)) and f(U).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ∆(D, f(U)) ≥ |Pr[f(U) ∈ T] − Pr[D ∈ T]| (153) ≥ 1 2 − O(1/ log n), (154) completing the proof of Theorem 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 23 4 Removing the GHZ State from QNC0 Circuits In this section we define sampling tasks related to the (X, majmodp(X)⊕parity(X)) sampling task considered in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2, but which can be performed (approximately) by a constant-depth quantum circuit without access to a GHZ input state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' At a high level, the approach we use to construct these tasks mirrors the approach used in [19] to find a relational problem which can be solved by a QNC0 circuit without access to a GHZ state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' First, we review “Poor Man’s GHZ States”: GHZ-like states which (unlike the GHZ state) can be constructed by QNC0 circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then we modify the circuit constructed in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2 by replacing the GHZ input state with a circuit constructing a poor man’s GHZ state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Finally, we define a new sampling task based on the output of these modified circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1 Review of Poor Man’s GHZ States Definition 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any integer n let Bn be the balanced binary tree on n vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Label its edges e1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', en−1 and vertices v0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', vn−1 (note the vertex labels start at 0), with v0 the root of T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For every non-root vertex vi ∈ {v1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', vn−1} define P(vi) to be the set of edges contained in the (unique) path going from v0 to vi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Finally, define the function h(d) : {0, 1}n−1 → {0, 1}n−1 by h(d)i = � j: ej∈P (vi) dj i ∈ {1, 2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', n − 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (155) That is, thinking of the bitstring d as assigning values to the edges of Bn, h(d) assigns a value to every non-root vertex vi of Bn equal to the parity of the edge values going from v0 to vi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Definition 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Define the (binary tree) Poor Man’s GHZ state: |PMn⟩ = � d∈{0,1}n−1 1 2(n−1)/2 |d⟩ ⊗ 1 √ 2 ����h(d)0 � + ���h(d)1 �� (156) We call the first n − 1 qubits of |PMn⟩ “edge” qubits, and the last n qubits “vertex” qubits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that the n in |PMn⟩ gives the number of vertex qubits in the state, not the total number of qubits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Intuitively, it is occasionally helpful to think of the n vertex qubits of the state |PMn⟩ as being in an “almost-GHZ state”, or a GHZ state with additional Pauli X type “error” terms specified by the edge qubits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To explain this intuition, not that we can also write the state |PMn⟩ as |PMn⟩ = 1 2(d−1)/2 � d∈{0,1}n−1 � |d⟩ ⊗ ��n−1 � i=1 Xh(d)i � ⊗ I2 � |GHZn⟩ � (157) We will make use of Equation (157) when working with the state |PMn⟩ later in this section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Theorem 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any n, the state |PMn⟩ can be constructed by a depth-3 circuit consisting of 1 and 2 qubit gates acting on 2n − 1 qubits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This state can be constructed by following the procedure outlined in Theorem 17 of [19], but omitting the measurement of the edge qubits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We recap this procedure here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Begin with 2n − 1 qubits, n of which we identify with the vertices v0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', vn−1 of the tree Bn and n − 1 of which we identify with edges e1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='en−1 of the same tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Apply a Hadamard gate to each vertex qubit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then, for every pair of vertices vi and vj connected by an edge ek, apply CNOT gates with controls on vertex qubits vi and vj and target on the edge qubit ek.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Order the edge qubits as in the tree Bn;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' these form the first n − 1 qubits of |PMn⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Order the vertex qubits v1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='vn−1v0 (note the qubit identified with the root vertex comes last in this ordering);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' these form remaining n qubits of the state |PM(n)⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To see that this circuit produces the correct state first observe that after the Hadamard gates are applied and before the CNOT gates are applied, the vertex qubits are in a uniform superposition over all computa- tional basis states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We order the vertex qubits as in the state |PMn⟩, so the final vertex qubit is associated with the root vertex of the graph Bn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' It is then straightforward to check that, for any n − 1 bit string 24 x = x1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xn−1, if the vertex qubits are in state |x0⟩ then applying the CNOT gates puts the edge qubits in the state h−1(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Similarly, if vertex qubits are in the state |x1⟩, applying the CNOT gates puts the edge qubits in the state h−1(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then we can write the state produced by our circuit as 1 2n/2 \uf8eb \uf8ed � x∈{0,1}n−1 ��h−1(x) � ⊗ |x0⟩ + � x∈{0,1}n−1 ��h−1(x) � ⊗ |x1⟩ \uf8f6 \uf8f8 (158) = 1 2n/2 \uf8eb \uf8ed � d∈{0,1}n−1 |d⟩ ⊗ |h(d)0⟩ + � d∈{0,1}n−1 |d⟩ ⊗ ���h(d)1 � \uf8f6 \uf8f8 (159) = 1 2(n−1)/2 \uf8eb \uf8ed � d∈{0,1}n−1 |d⟩ ⊗ � 1 √ 2 |h(d)0⟩ + ���h(d)1 ��\uf8f6 \uf8f8 = |PMn⟩ (160) where we used on the second line that the function h was one-to-one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Finally, we show this circuit can be implemented in depth 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Consider the 2n − 1 vertex graph obtained from Bn by bifurcating each edge of Bn – that is, replacing each edge of Bn connecting vertices vi and vj with a new vertex connected to both vi and vj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This graph is still a tree, hence 2-colorable, and edges of this graph are in one-to-one correspondence with CNOT gates which need to be implemented in the circuit described above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' All CNOT gates in the same color class touch disjoint qubits and be applied simultaneously, so we see all CNOT gates can be applied in depth 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Adding the layer of Hadamard gates required as the first step shows this whole circuit can be implemented in depth 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2 Sampling with QNC0 Circuits We begin with a description of the distribution which we will show can be sampled from (approximately) by a QNC0 circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Like the distributions considered in Section 2, it will be a distribution of the form (Z, f(Z)) where Z is a uniformly random bitstring and f(Z) : {0, 1}n → {0, 1} is some function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' However, the function f considered here is substantially more complicated than the functions considered in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We define this function next.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Definition 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any prime p define the function pmmajmodp : {0, 1}2n−2 → {0, 1} to act on a 2n − 2 bit string z via the following procedure: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Associate the first n − 1 bits of z with edges of the complete binary tree Bn and the next n − 1 bits with the non-root vertices v1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='vn−1, following the same ordering as in Definition 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Label bits associated with edges d and the bits associated with vertices x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any integer a define MMp(a) := � 0 if a < p/2 1 otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (161) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Set pmmajmodp(z) = MMp �n−1 � i=1 xi(−1)h(d)i � � parity(x) (162) Now we construct a quantum circuit which samples approximately from the distribution (Z, pmmajmodp(Z)) without requiring a GHZ state input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' As in Section 2, we begin by describing a circuit that performs the sampling task and involves single qubit non-unitary rotations Aθ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 25 Theorem 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any p ∈ Z+ there is a constant-depth circuit consisting of one and two qubit unitary gates and Aθ operations which takes the (2n− 1)-qubit all zeros state as input and produces an output which, when measured in the computational basis, produces an output distribution (Z′, Y ) with ∆((Z′, Y ), (Z, pmmajmodp(Z))) ≤ 1 2 − 1 π + 1 2p + O(p3/2e−n/4p2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (163) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The first step is preparing the state |PMn⟩, which can be done in constant depth by Theorem 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' After that, the same non-unitary circuit as described in the proof of Theorem 7 is applied to the vertex qubits of the poor man’s GHZ state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This is illustrated in Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ✌✌✌ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ✌✌✌ H A† π/p ✌✌✌ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='H ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='A† ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='π/p ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✌✌✌ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='H ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='exp(−iπX/4) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✌✌✌ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='|PMn⟩ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='Figure 9: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='Constant-depth non-unitary circuit producing approximate samples from the distribution ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='(Y,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' pmmajmodp(Y )).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The upper box indicates the n − 1 “edge” qubits of the state |PMn⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The lower box indicates the n “vertex” qubits of the same state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To see that this circuit approximately samples from the correct distribution we write the state |PMn⟩ as a GHZ state with additional controlled X “error” terms, then commute those through the rest of circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In the following argument we will need to pay close attention to the rotation angle θ in the non-unitary operator Aθ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For this reason, for the remainder of this section only, we change notation and write Aθ as A (θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The key observation is the operator identity A (θ)† = A (−θ)† Z (164) which holds for any θ and can quickly be verified by checking the action of ZA (θ) and A (−θ) Z on |0⟩ and |1⟩ basis states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then (using Equation (157) as a starting point) we can write the pre-measurement state produced by the circuit above as: 1 2(d−1)/2 � d∈{0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1}n−1 \uf8eb \uf8edI2n−1 ⊗ \uf8eb \uf8ed n−1 � j=1 A �π p �† H \uf8f6 \uf8f8 ⊗ exp �−iπX 4 � H \uf8f6 \uf8f8 \uf8eb \uf8ed|d⟩ ⊗ \uf8eb \uf8ed \uf8eb \uf8ed n−1 � j=1 Xh(d)j \uf8f6 \uf8f8 ⊗ I2 \uf8f6 \uf8f8 |GHZn⟩ \uf8f6 \uf8f8 = 1 2(d−1)/2 � d∈{0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1}n−1 \uf8eb \uf8edI2n−1 ⊗ \uf8eb \uf8ed n−1 � j=1 A �π p �† HXh(d)j \uf8f6 \uf8f8 ⊗ exp �−iπX 4 � H \uf8f6 \uf8f8 (|d⟩ ⊗ |GHZn⟩) (165) = 1 2(d−1)/2 � d∈{0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1}n−1 \uf8eb \uf8ed|d⟩ ⊗ \uf8eb \uf8ed \uf8eb \uf8ed n−1 � j=1 Zh(d)jA � (−1)h(d)j π p �† \uf8f6 \uf8f8 ⊗ exp �−iπX 4 �\uf8f6 \uf8f8 H⊗n |GHZn⟩ \uf8f6 \uf8f8 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (166) Where the rearrangement on the third line used the operator identity discussed above (Equation (164)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 26 From this it is clear that the measurement of the first n − 1 edge qubits produces a uniformly random bitstring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We assume that such a measurement has been carried out, producing some bitstring d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then, following the same analysis as used in the proof of Theorem 7, we consider the (unnormalized) state of the first vertex qubit when the first n − 1 vertex qubits have been measured and bitstring x = x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xn−1 is observed: ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='n−1 \uf8eb \uf8ed n−1 � j=1 Zh(d)jA � (−1)h(d)j π p �† \uf8f6 \uf8f8 ⊗ exp �−iπX 4 � � H⊗n |GHZn⟩ � = (−1)|x| ⟨x|1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='n−1 \uf8eb \uf8ed n−1 � j=1 A � (−1)h(d)j π p �† \uf8f6 \uf8f8 ⊗ exp �−iπX 4 � � H⊗n |GHZn⟩ � (167) = (−1)|x|2−(n−1) exp \uf8eb \uf8ediX \uf8eb \uf8ed−π 4 + π p n−1 � j=1 � xj(−1)h(d)j� \uf8f6 \uf8f8 \uf8f6 \uf8f8 |parity(x)⟩ , (168) where the final line followed from exactly the same series of identities as used in Equations (20) to (25).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The key features of this argument are illustrated in Figure 10, where we focus just on the analysis of the vertex qubits when the edge qubits are measured and classical bitstring d is observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Next (still following the analysis used in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='1) we note that the vector above has norm 2n−1 for any string x, and hence the bitstring x observed when measuring the first n − 1 vertex qubits is uniformly random.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Additionally, we let Yd,x be the random variable representing the outcome measurement applied to the final qubit of the circuit depicted in Figure 9, conditioned on the measurement of the previous 2n − 2 qubits giving the bitstring (d, x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Straightforward calculation applied to Equation (168) gives Pr[Yd,x = parity(x)] = cos2 � −π 4 + π p �� i xi(−1)h(d)i �� (169) Then, small extension of Lemma 8 (proven next, in Lemma 32) gives 1 22n−2 � (d,x)∈{0,1}2n−2 Pr � Yd,x ̸= pmmajmodp(d, x) � ≤ 1 2 − 1 π + 1 2p + O(p3/2e−n/4p2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (170) Finally, we let D′, X′ be random variables representing the output of measuring the edge qubits and first n − 1 vertex qubits of the circuit depicted in Figure 9, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We have already shown that the marginal distributions of D′ and X′ are uniformly random and so we find ∆((D′, X′, YD′,X′), (Z, pmmajmodp(Z))) ≤ 1 2 − 1 π + 1 2p + O(p3/2e−n/4p2) (171) by exactly the same argument as used to finish the proof of Theorem 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Lemma 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Define the random variable Yd,x as in the proof of Theorem 31, so Pr[Yd,x = parity(x)] = cos2 � −π 4 + π p �� i xi(−1)h(d)i �� (172) Then 1 22n−2 � (d,x)∈{0,1}2n−2 Pr � Yd,x ̸= pmmajmodp(d, x) � ≤ 1 2 − 1 π + 1 2p + O(p3/2e−n/4p2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (173) 27 Xh(d)1 H A (π/p)† ✌✌✌ x1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Xh(d)n−1 H A (π/p)† ✌✌✌ xn−1 H exp(−iπX/4) \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 |GHZn⟩ = H Zh(d)1 A (π/p)† ✌✌✌ x1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H Zh(d)n−1 A (π/p)† ✌✌✌ xn−1 H exp(−iπX/4) \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 |GHZn⟩ = H A � (−1)h(d)1π/p �† Zh(d)1 ✌✌✌ x1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H A � (−1)h(d)n−1π/p �† Zh(d)n−1 ✌✌✌ xn−1 H exp(−iπX/4) \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 |GHZn⟩ = H ✌✌✌ x1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H ✌✌✌ xn−1 H exp � −iX � π/4 + π/p � j xj(−1)h(d)j �� \uf8f1 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f4 \uf8f3 |GHZn⟩ Figure 10: The state of the final vertex qubit of the circuit described in Figure 9 when all other vertex qubits (and edge qubits) are measured in the computational basis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Equivalence between lines is explained in the proof of Theorem 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 28 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let D, X be random variables each taking value uniformly at random from {0, 1}n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then we can write 1 22n−2 � (d,x)∈{0,1}2n−2 Pr � Yd,x ̸= pmmajmodp(d, x) � = Pr � YD,X ̸= parity(x) ⊕ MMp �� i xi(−1)d i �� (174) = � k Pr � YD,X ̸= parity(x) ⊕ MMp (k) ��� � i Xi(−1)D i = k � Pr �� i Xi(−1)D i = k � (175) We compare this equation to Equation (34), and note that (after rewriting majmodp(X) = MMp(|X|)) the two probabilities are identical except that the random variable |X| has been replaced by � Xi(−1)D i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then the proof of the bound proceeds identically to the proof of bound in Lemma 8, except that we need a bound on the total variation distance between the distribution of the random variable � i Xi(−1)Di (mod p) and the uniform distribution over {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', p − 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To do this, we write � i Xi(−1)Di = � i Xi − 2 � i:Xi=1 Di (176) and note that both terms in the right-hand side equation give uniform distributions mod p by Fact 22 (provided that close to half the bits of Xi are ones, which happens with high probability).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Formally, let ˜X be the random variable taking value uniformly at random from the set of n-bit strings with Hamming weight at least n/4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then we have ∆ \uf8eb \uf8ed� i Xi − 2 � i:Xi=1 Di, � i ˜Xi − 2 � i: ˜ Xi=1 Di \uf8f6 \uf8f8 ≤ ∆(X, ˜X) ≤ exp(−n/8), (177) where the first inequality follows because for any distributions X and ˜X and (possibly random) function f we have ∆(X, X′) ≥ ∆(f(X), f(X′)), and the second inequality follows from Hoeffding’s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then, letting Up denote the uniform distribution mod p, for any ˜x in the support of ˜X we have, by Fact 22, that ∆ � 2 � i:˜xi=1 Di (mod p), Up � ≤ √p exp � −n/4p2� (178) and hence ∆ � |˜x| − 2 � i:˜xi=1 Di (mod p), Up � ≤ √p exp � −n/4p2� (179) since shifting a distribution doesn’t change its distance from the uniform distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then summing over all possible ˜x we see ∆ \uf8eb \uf8ed ��� ˜X ��� − 2 � i: ˜ Xi=1 Di (mod p), Up \uf8f6 \uf8f8 ≤ √p exp � −n/4p2� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (180) Combining Equations (177) and (180) gives ∆ �� i Xi − 2 � i:Xi=1 Di (mod p), Up � ≤ exp(−n/8) + √p exp � −n/4p2� = O(√p exp � −n/4p2� ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (181) Then, following the same proof as in Lemma 8 and plugging the above inequality in place of Fact 22 gives the desired bound.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 29 Then, following the same arguments as used in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2, we show that we can replace the non-unitary rotation gates used in the circuit described above with actual unitary gates, while causing small disturbance to the output distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The result of this procedure is a QNC0 circuit that takes the all zeros state as input and whose output samples approximately from the distribution (Z, pmmajmodp(Z)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Theorem 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For n sufficiently large and p = nc for some constant c ∈ (0, 1/2) there is a constant-depth circuit consisting of one and two qubit unitary gates and Um′,θ′ gates with m′ = ⌈c−1 + 1⌉ and θ′ = π/p which takes the (2n − 1)-qubit all zeros state as input and produces an output which, when measured in the computational basis, produces a distribution (Z′, Y ) with an n-bit output which correlates approximately with the distribution (Z, pmmajmodp(Z)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The desired circuit can be constructed from the circuit presented in Figure 9 following the same procedure as used in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Specifically, we first replace blocks of m parallel Aθ gates with Aθ,m gates, then replace those with Uθ,m gates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The only additional complication we encounter is that we must apply a final permutation to our output bits to accommodate a “shuffling effect” caused by replacing blocks of the Aθ gates by Aθ,m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The final circuit is presented in Figure 11, where the Cm gate denotes a permutation whose action on the m qubit computational basis state |x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm⟩ is given by Cm |x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm⟩ = |x2x3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xmx1⟩ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (182) ✌✌✌ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ✌✌✌ H U † m′,θ′ Cm ✌✌✌ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H ✌✌✌ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H U † m′,θ′ Cm ✌✌✌ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='H ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✌✌✌ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='H ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='exp(−iπX/4) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✌✌✌ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='✤ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ❴ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='\uf8f3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='|PMn⟩ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='Figure 11: ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='Constant-depth unitary circuit producing approximate samples from the distribution ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='(Y,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' pmmajmodp(Y )).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that m is constant, and so the unitaries acting on m qubits have constant size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The upper box indicates the n − 1 “edge” qubits of the state |PMn⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The lower box indicates the n “vertex” qubits of the same state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' As a first step towards showing this circuit samples from the desired distribution, we show that replacing the parallel Aθ gates in the circuit of Figure 9 with Aθ,m gates followed by a Cm gates doesn’t change the post-measurement distribution produced by the circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To see why, we consider the state of the final vertex qubit in both circuits after a measurement is performed on all edge qubits, producing bitstring d, and the first m vertex qubits, producing bitstring x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In the circuit described in Figure 9, the state of the 30 final qubit is given by ⟨x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm| m � i=1 AθZh(d)i � i CNOTi,n |+⟩⊗m ⊗ |0⟩ (183) = ⟨x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm| m � i=1 exp(iθXxi)Zh(d)i � i CNOTi,n |+⟩⊗m ⊗ |0⟩ (184) = ⟨x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm| m � i=1 Zh(d)i |+⟩⊗n ⊗ exp � iθX � i xi(−1)h(d)i � |parity(x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm)⟩ (185) and, if the Aθ gates are replaced by a Cm gate and Aθ,m gate the state of the final qubit is given by ⟨x1x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xm| CmAθ,m m � i=1 Zh(d)i � i CNOTi,n |+⟩⊗m ⊗ |+⟩n (186) = ⟨x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xmx1| Aθ,m m � i=1 Zh(d)i � i CNOTi,n |+⟩⊗m ⊗ |+⟩n (187) = ⟨x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xmx1| m � i=1 exp(iθXxi)Zh(d)i � i CNOTi,n |+⟩⊗m ⊗ |+⟩n (188) = ⟨x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xmx1| m � i=1 Zh(d)i |+⟩⊗n ⊗ exp � iθX � i xi(−1)h(d)i � |parity(x2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='xmx1)⟩ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (189) Since these states are the same up to an overall phase we see the change has no effect on the probability of observing outcomes d and x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', xm or the state of the unmeasured qubit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' It is straightforward to extend this analysis to the case where the same replacement is made to all D blocks of Aθ gates in the circuit of Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' It remains to show that replacing the Aθ,m gates (in the circuit produced by the replacement discussed above) with Uθ,m gates causes a negligible change to the distribution output by the circuit after a computa- tional basis measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Following exactly the same argument as used to prove Theorem 20 we see �����I⊗(n−1) 2 ⊗ �� CmU † θ′,m′ �⊗D ⊗ exp(−iπX/4) � H⊗n − I⊗(n−1) 2 ⊗ �� CmA† π/p,m �⊗D ⊗ exp(−iπX/4) � H⊗n ����� ∞ ∈ O(Dn−(1+c)) ≤ O(n−c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (190) and so the classical distributions produced by computational basis measurements of the states I⊗n−1 2 ⊗ �� CmU † θ′,m′ �⊗D ⊗ exp(−iπX/4) � H⊗n |PMn⟩ (191) and I⊗n−1 2 ⊗ �� CmA† π/p,m �⊗D ⊗ exp(−iπX/4) � H⊗n |PMn⟩ (192) also differ by at most O(n−c) in total variation distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Combining Theorem 31 with the fact that O(p3/2en/4p2) ≤ O(1/p) for p = n−c with c < 1/2 completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 5 Classical hardness of Sampling (Z, pmmajmodp(Z)) This section concerns the hardness of classically sampling from the distribution (Z, pmmajmodp(Z)), where Z is a random variable Z ∼ Unif({0, 1}N) and the function pmmajmodp is defined in Definition 30, and recalled below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 31 pmmajmodp The input to pmmajmodp is a N = 2n−2 bit string, (x1, x2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' xn−1, d1, d2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , dn−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Each xi corresponds to the vertex vi of the balanced binary tree Bn, and each di corresponds to the edge ei of Bn per the ordering in Definition 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' pmmajmodp(x, d) = MMp �n−1 � i=1 xi(−1)h(d)i � ⊕ parity(x) x, d ∈ {0, 1}n−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (193) Where MMp is defined in Definition 28 and h(d) is defined in Definition 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In Section 3 we proved the classical hardness of sampling from the slighly different distribution (X, majmodp(X)⊕ parity(X)) where X ∼ Unif({0, 1}n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For the sake of comparing with pmmajmodp we list this function below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' majmodp ⊕ parity majmodp(x) ⊕ parity(x) = MMp � n � i=1 xi � ⊕ parity(x) x ∈ {0, 1}n (194) Both of these distributions have the form (Y, MMp(SY ) ⊕ parity(Y )) for a uniformly random bitstring Y , and SY a sum that depends on Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For the majmodp(Sx) ⊕ parity(x) function, the relevant sum is simply the Hamming weight of the input x ∈ {0, 1}n, denoted as |x|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' A nice property of the Hamming weight, |x| = � i xi is that each of the terms in the sum depends on a different bit of the input, and thus if many of the bits of xi are sampled independently, then so are their corresponding terms in the sum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The key challenge in applying the framework from the proof of Theorem 20 is that the terms in S = � i xi(−1)h(d)i no longer depend on disjoint variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In particular, flipping the bit dj corresponding to edge ej flips the sign of all terms xi(−1)h(d)i for vi downstream from ej in the balanced binary tree Bn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In order to accommodate for this dependence, we will partition the tree Bn into subtrees, then identify subtrees corresponding to output variables which are independent when a large chunk of the input variables are fixed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We show that for some choice of p, any function f which takes as input a uniformly random (N +N δ)-bit string and is (1/2 − Ω(1))-close in total variation distance with (Z, pmmajmodp(Z)), must have locality d ≥ Ω(log1/2 N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' If we consider f as a classical circuit with fan-in 2, this corresponds to a circuit depth lower bound of Ω(log log N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Theorem 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each δ < 1, there exists an ǫ > 0 such that for all sufficiently large even integer N and prime number p = Θ(N α) for α ∈ (δ/3, 1/3): Let f : {0, 1}ℓ → {0, 1}N+1 be an (ǫ log N)1/2-local function, with ℓ ≤ N + N δ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then ∆(f(U), (Z, pmmajmodp(Z))) ≥ 1/2 − O(1/ log N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The function f takes input an ℓ-bit string we label as (u1, u2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , uℓ) and outputs (N + 1)-bit output string we label as (z1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , zN, b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let n be the integer such that N = 2n − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Just as in the definition of pmmajmodp in Definition 30, we consider the partition of z = (x, d) ∈ {0, 1}n−1 × {0, 1}n−1, where x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , xn−1 are the first n − 1 bits of z, and d1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' dn−1 are the next n − 1 bits of z, and b ∈ {0, 1} is the last bit which is considered “correct” if b = pmmajmodp(z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The output variables x1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , xn−1 are associated with v1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , vn−1, the non-root vertices of the balanced binary tree Bn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The output variables d1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , dn−1 are associated with the edges e1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , en−1, where we use the ordering as defined in Definition 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' As is standard in graph theory, for any graph G we use V (G) and E(G) to denote G’s vertices and edges respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To understand the correlations between each of the output bits zi, it is useful to partition Bn as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Definition 35 (Bn partition (T0, T1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , Tk)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let D := log(2d), we partition the vertices of the balanced binary tree Bn into the bottom D layers and the top log n − D layers as shown in Figure 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let the top tree T0 be the tree induced by the top log(n) − log(2d) layers of vertices in Bn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The subgraph induced by the bottom D layers is a forest of trees which we denote as T = {T1, T2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , Tk} and refer to as the small trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In order to make sure that each edge and vertex of Bn is accounted for in {T0} ∪ T , for each i ∈ [k] we consider the edge which connects the root of Ti to a leaf of T0 as in the small tree Ti.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Thus, each small tree T ∈ T has an edge with the root of T as its only endpoint as shown in Figure 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Although a subtree T of Bn consists of vertices and edges labeled as {vi}i and {ei}i, we slightly abuse notation and say that the output variable zi is “in” T (denoted zi ∈ T ) if the edge or vertex which is 32 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' T0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' T1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' T2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' T3 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Tk .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' log n − D D T Figure 12: Partition of the balanced binary tree Bn into k + 1 subtrees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The top tree T0 consists of the subtree induced by the first log n − D layers of Bn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The k bottom trees T = {T1, T2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , Tk} include all vertices in the bottom D layers of Bn and all incident edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that for each i ∈ [h], Ti contains a single edge that only has one endpoint, this edge corresponds to the edge in Bn that connects the root of Ti with its parent in T0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 33 associated with zi is in E(T ) ∪ V (T ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' And will sometimes use T to denote the subset of variables {zi} which are associated with the tree T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Moreover, we define the size of a subtree T of Bn be |T | = |V (T )| + |E(T )|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that since each T ∈ T has an extra edge at the root, with no other endpoint, |E(T )| = |V (T )| ≤ 2d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The top tree T0 has |V (T0)| = 2log n−D − 1 = n 2d − 1 vertices, and |E(T0)| = |V (T0)| − 1 = n 2d − 2 edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each i ∈ [k] the small tree Ti has at most 2D − 1 = 2d − 1 vertices V (Ti), and the same number of edges |E(Ti)| = |V (Ti)| = 2d − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In total, the top tree has size |T0| ≤ n/d − 3 and each bottom tree Ti ∈ T has size at most |Ti| ≤ 4d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since the root vertex of each small tree is at the (log n − D + 1)-level of the balanced binary tree Bn, there are k = 2log n−D = n/d small trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each output variable zi in the string z, we consider the other output variables which are in the same tree as zi as the tree neighborhood of zi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Definition 36 (Tree Neighbors, NT ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each variable zi for i ∈ [N], let NT (zi) ⊆ {zi}i∈[N], be the subset of outputs in the same tree T ∈ T ∪ {T0} as zi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Moreover, for any subset of outputs S ⊆ {zi}i∈[N], let NT (S) := � zi∈S NT (zi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Recall that the variables {zi}i∈[N] only correspond to the non-root vertices of Bn, but the root vertex v0 is in the top tree T0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Thus for vertices vj, vk corresponding to the left and right children of root v0, we have that zj ∈ NT (zk), despite there being no variable in NT (zk) associated with the root.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that for any output in a small tree zi ∈ � T ∈T T , NT (zi) has size at most 2d since |T | ≤ 2d for each T ∈ T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Moreover, for any subset of small tree outputs S ⊆ � T ∈T T , |NT (S)| ≤ 2d|S|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Definition 37 (Forest Partition).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' F0, F1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , Fs ⊆ {zi}i∈[N] is a forest partition if both of the following hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' F0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , Fs is a partition of all variables {zi}i∈[N] F0 ⊎ · · · ⊎ F1 = {zi}i∈[N] (195) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Each Fi contains a union over a subset of trees from T ∪ {T0}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' NT (Fi) = Fi for each i ∈ [s] (196) The next lemma shows that we can construct a forest partition with the property that, after a large fraction of the input bits to our (ǫ log N)1/2 local function have been fixed, each of the remaining unfixed bits controls a single (independent) subset of trees in the partition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Lemma 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' There exists a forest partition F0, F1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , Fs for some s ≥ Ω(N/d3), with |Fi| ≤ O(d2) for each i ∈ [s];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' and a partition of the input u ∈ {0, 1}ℓ into u = (w, y), with w ∈ {0, 1}s such that f(w, y) �� F0 = h(y), (197) f(w, y) �� {N+1} = b(y), (198) f(w, y) �� Fi = gi(wi, y) for each i ≥ 1, (199) and T0 ⊆ F0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (200) For some functions h : {0, 1}ℓ−s → {0, 1}|F0|, b : {0, 1}ℓ−s → {0, 1}, and gi : {0, 1} × {0, 1}ℓ−s → {0, 1}|Fi| for each i ∈ [s].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We refer to gi(wi, y) as the ith block of the output, assigning values to the variables in Fi, for i ∈ [s].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that if we fix the input y, each block gi(wi, y) is a function only of the input bit wi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since the input w ∈ {0, 1}s is uniformly random, the value of each of the blocks is independent conditioned on y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proof of Lemma 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Consider the bipartite graph with the ℓ input variables to f as the left vertices, and the N + 1 output variables as the right vertices, where each input j ∈ [ℓ] and output i ∈ [N + 1] vertex share an edge iff the ith output bit of f, denoted as fi is a function of the jth input bit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We refer to this graph as the input-output dependency graph of f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each vertex v in the dependency graph, let the neighborhood of v, Nf(v), be the set of vertices adjacent to v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Similarly, for any subset S of vertices, let Nf(S) := � v∈S Nf(v).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since by assumption, f is d-local, the degree of the output vertices is at most d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 34 Let L be the set of input vertices of the dependency graph for f which are adjacent to the output vertices in T0 or b, that is L := Nf(T0 ∪ {b}) (or we could associate b with the root v0 in T0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' If we fix the inputs in L, then b, and the outputs in T0 are also fixed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For this reason we refer to L as the fixed inputs, and the remaining inputs U = {ui}i∈[ℓ] \\ L as the unfixed inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' |L| ≤ d(|T0|) ≤ d (|V (T0)| + |E(T0)|) ≤ n − 3d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (201) Therefore, there are at least N − |L| ≥ 2n − 1 − (n − 2d) ≥ n unfixed inputs U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since |V (T0)| = n 2d − 1, and |E(T0)| = |V (T0)| − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' As mentioned above, the locality of f implies that the degree of the output vertices in the dependency graph is at most d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Using the following claim, we can also bound the degree of half of the input vertices in U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Claim 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' There is a subset of inputs ˜U ⊆ U with size | ˜U| ≥ |U|/2 ≥ n/4 such that the degree of the vertices in ˜U in the dependency graph of f is at most O(d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since there are at most N ≤ 2n output vertices, each of degree at most d, there are at most 2nd edges in the input/output dependency graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, at least half of the vertices in U have degree at most 4d since otherwise there would be |U|/2 vertices with degree greater than 4d, and the total number of edges would be strictly greater than |U| 2 · 4d ≥ n 2 · 4d = 2dn edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Within these bounded degree input vertices ˜U, we next find a subset W such that each pair of vertices in W are adjacent to disjoint trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Claim 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' There exists a subset of inputs W ⊆ ˜U of size |W| ≥ Ω(N/d3) such that for each pair ui ̸= uj ∈ W, the neighborhoods Nf(ui), Nf(uj) intersect with disjoint trees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' That is, for each ui ̸= uj ∈ W, NT (Nf(ui)) ∩ NT (Nf(uj)) = ∅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We greedily build W as follows: Initialize the set V as the inputs ˜U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' While V is non-empty, choose any v ∈ V , add it to W and remove Nf(NT (Nf(v))) from V .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that the size of V decreases by at most O(d3) in each iteration since for any subset of outputs S, |Nf(S)| ≤ d|S|, and |NT (S)| ≤ 2d|S|, and for any subset of inputs Sin, |Nf(Sin)| ≤ O(d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, |W| = | ˜U|/O(d3) ≥ Ω(n/d3) = Ω(N/d3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We set w as the input bits of u which are indexed by W from Claim 40, and let y be the remaining bits of u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each i ∈ [s], let Fi = NT (Nf(wi)) and let F0 be the remaining {zi} variables: F0 = {zi}i∈[n] \\ (� i∈[s] Fi).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We first show that F0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , Fs is a forest partition as defined in Definition 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' By the definition of F0 it is clear that �s i=1 Fi = {zi}i∈[N].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Furthermore, these forests are disjoint since for each i ̸= j ∈ [s], Fi ∩ Fj = NT (Nf(wi)) ∩ NT (Nf(wj)) = ∅ by Claim 40, and since F0 ∩ (� i∈[s] Fi) = ∅ by definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' All that’s left to show that this is a forest partition is that NT (Fi) = Fi for each i ∈ {0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', s}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This is clearly true for each i ∈ [s] since NT (Fi) = NT (NT (Nf(wi))) = NT (Nf(wi)) = Fi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To show that NT (F0) = F0, suppose for the sake of contradition that this is not the case, that there exists some a ∈ NT (F0) \\ F0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since �s j=0 Fj = {zi}i∈[N], a is in some other forest Fj with j ̸= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' But this implies that NT (Fj) ∩ F0 ̸= ∅, and so Fj ∩ F0 ̸= ∅, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, F0, F1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , Fs is a forest partition as defined in Definition 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Next, we show that for each i ∈ [s], f(w, y) �� Fi is a function of only wi and y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This is because for each j ∈ [s], such that j ̸= i, we have Nf(wj) ∩ Fi ⊆ Fj ∩ Fi = ∅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Similarly, the outputs F0 do not depend on any bits of w since for each i ∈ [s], Nf(wi) ∩ F0 ⊆ Fi ∩ F0 = ∅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since we initialized our set of fized variables L with Nf(T0 ∪ {b}), and we chose W such that W ∩ L = ∅, it follows that both b and the outputs in T0 can be written as functions of y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Furthermore, this implies that T0 ⊆ F0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' All that’s left to prove Lemma 38 is to show |Fi| ≤ O(d2) for each i ∈ [s].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that for each i ∈ [s], |Fi| = |NT (Nf(wi))|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since wi was chosen from the subset of input variables that are not adjacent to T0 in f’s dependency graph (those indexed by U), and have degree at most O(d) (indexed by ˜U ⊆ U), it follows that |NT (Nf(wi))| ≤ 2d|Nf(wi)| and |Nf(wi)| ≤ O(d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, |Fi| ≤ O(d2) for each i ∈ [s].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 35 Next we consider how the pmmajmodp function evaluates on (x, d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We partition the terms of the sum S = �n−1 i=1 xi(−1)h(d)i into s + 1 according to the forest partition F0, F1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , Fs from Lemma 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Si = � vj∈V (Fi) xj(−1)h(d)i for each i ∈ {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', s}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (202) Where V (Fi) denotes the set of vertices vj ∈ V (Bn) such that xj ∈ Fi and E(Fi) denotes the set of edges ej ∈ E(Bn) such that dj ∈ Fi for i ∈ {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', s}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Again, note that v0 /∈ V (F0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We can rewrite the sum as S = �s i=0 Si.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let’s consider the sum S for a particular assignment z = (x, d) ∈ {0, 1}N, where for each i ∈ {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', s}, zFi denotes the assignment to Fi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that S0 depends only on zF0, and each term Si for i ≥ 1 depends only on zF0 and zFi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' S(z) = S0(zF0) + s � i=1 Si(zFi, zF0) (203) This is because xj(−1)h(d)i depends on xj as well as each dj′ where ej′ is along the path from v0 to vj in Bn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Definition 41 (Minimal Block).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For some assignment z ∈ {0, 1}N, we say that the ith block is minimal if Si(zFi, zF0) = min z′ Fi∈{0,1}|Fi| Si(z′ Fi, zF0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (204) Claim 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each fixed assignment to zF0, and any i ∈ [s], there is a unique minimal assignment to zFi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' That is, for each zF0 ∈ {0, 1}|F0|, there exists a z∗ Fi ∈ {0, 1}|Fi| such that Si(z∗ Fi, zF0) < Si(zFi, zF0) for each zFi ∈ {0, 1}|Fi| \\ {z∗ Fi}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (205) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each i ∈ [s], the sum Si can be broken into terms for each of the small trees Tj ∈ T in the forest Fi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Si = � j∈[k]:Tj⊆Fi STj (206) Where STj := � vi∈V (Tj) xi(−1)h(d)i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that the value each of STj for j ∈ [s] depends on zF0 and the variables in Tj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since each Tj for j ∈ [s] are disjoint, it is sufficient for us to show that for a fixed zF0, there is a unique minimal assignment to the variables of Tj for each j ∈ [s].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any two vertices vj ̸= vk ∈ V (Bn), let Pj,k ⊆ E(Bn) be the subset of edges {e1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , en−1} along the path from vj to vk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that for any vertex vi, P(vi) as defined in Definition 27 is equivalent to P0,i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Consider some T ∈ T with root vr, and single-endpoint root edge er.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We can rewrite ST as ST = � vi∈V (T ) xi � ej∈P0,i (−1)dj (207) = (−1)h(d)r \uf8eb \uf8edxr + � vi∈V (T )\\{vr} xi � ej∈Pr,i (−1)dj \uf8f6 \uf8f8 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (208) Note that h(d)r is a function of zF0 and dr, and for a fixed zF0, we can fix dr such that h(s)r = −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Consider that we set dr in this way.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ST = −xr + � vi∈V (T )\\{vr} −xi � ej∈Pr,i (−1)dj (209) Now, ST is minimized if each of the V (T ) terms are minimized (value −1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This is achieved by setting xi = 1 for each vi ∈ V (T ) and dj = 0 for each ej ∈ E(T ) \\ {er}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that any other assignment to the variables will result in one of the terms being either 0 or 1, therefore this is the unique minimal assignment to the tree T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 36 Next, we design a statistical test similar to that in the proof of classical hardness of (X, majmodp ⊕ parity(X)) (Theorem 20) in Section 3 with the additional set TM consisting of strings with a limited number of minimal blocks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We define the statistical test as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Statistical Test: Let N0, NM := 3N 3α and NF := 2N 3α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The statistical test is T := TM ⊎ T0 ⊎ TF ⊎ TS,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' where TM := {z′ ∈ {0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1}N+1 : ≤ NM blocks i ∈ [s] of z′ are minimal} (210) T0 := {z′ ∈ {0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1}N+1 : z′ Fi = 0|Fi| for ≤ N0 blocks i ∈ [s]} (211) TF := {z′ ∈ {0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1}N+1 : ∃(w,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' y) : f(w,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' y) = z′ and ≥ NF blocks gi(wi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' y) are y-fixed} (212) TS := {(z,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' b) ∈ {0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1}N × {0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1} : b ̸= pmmajmodp(z)} (“incorrect strings”) (213) We will show that the function f(U) passes the statistical test with probability at least 1 2 − O(1/ log N) whereas the true distribution D = (Z,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' pmmajmodp(Z)) passes with probability at most 1/N for sufficiently large N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Claim 43.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr[f(U) ∈ T] ≥ 1 2 − O(1/ log N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Using our partition of random input u into (x, y), our goal is to upper bound Prx,y[f(x, y) ∈ T ], where the probability is taken over the randomness of (x, y) chosen uniformly at random from {0, 1}s × {0, 1}ℓ−s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since Prx,y[f(x, y) ∈ T ] ≥ miny Prx[f(x, y) ∈ T ], it is sufficient for us to upper bound Prx[f(x, y) ∈ T ] for arbitrarily chosen y ∈ {0, 1}ℓ−s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Suppose we arbitrarily fix y ∈ {0, 1}ℓ−s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' If ≥ NF blocks of f(w, y) are y-fixed, then f(w, y) ∈ TF for each w ∈ {0, 1}s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Moreover, if at most NM blocks gi(wi, y) are minimal for some choice of wi ∈ {0, 1}, then for each w ∈ {0, 1}s, f(w, y) ∈ TM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Similarly, if at most N0 blocks evaluate to zero gi(wi, y) = 0|Fi| for some choice of wi ∈ {0, 1}, then for each w ∈ {0, 1}s, f(w, y) ∈ T0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, we assume that less than NF blocks of f are y-fixed, greater than NF of the forests of f(w, y) take on their minimal value for some choice of w, and greater than N0 blocks are all zeros for some choice of w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, the following two hold: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' There are at least NM − NF = N 3α blocks i ∈ [s] such that Si(0, y) ̸= Si(1, y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' There are at least N0 − NF = N 3α blocks i ∈ [s] such that |gi(0, y)| ̸= |gi(1, y)|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Let J ⊆ [s] be the indices of the blocks that change their respective terms of S, and let K ⊆ [s] be the indices of the blocks with Hamming weight that changes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' J := {i ∈ [s] : Si(0, y) ̸= Si(1, y)} K := {i ∈ [s] : |gi(0, y)| ̸= |gi(1, y)|} (214) We denote |x, d| as the Hamming weight of the first N output bits of f(w, y), and recall that b is the last bit of f(w, y).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that |x, d| = |h(y)| + �s i=1 |gi(wi, y)|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Claim 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Over the randomness of x ∈ {0, 1}s, the random variables S and |x, d| can be written as S = a + � i∈J airi, |x, d| = e + � i∈K eiri where r ∼ Unif({0, 1}|J∪K|).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (215) For some integers a, e, positive integers a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , a|J| ≤ O(d2) = O(ǫ log N), and nonzero integers e1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' , e|K|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that over the randomness of x ∈ {0, 1}s, for each j′ /∈ J and k′ /∈ K, Sj′ and |gk′(w′ k, y)| are fixed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, there exists some integers α, β such that S = α + � j∈J Sj |x, d| = β + � k∈K |gk(wk, y)|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (216) Moreover, each Sj for j ∈ J are independent random variables which take on two different integer values with equal probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Likewise the |gk(wk, y)| for k ∈ K are independent random variables which take on 37 two distinct values with equal probability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Although for i ∈ J ∩ K, Si and |gi(wk, y)| are not independent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Thus for each j ∈ J and k ∈ K, there exists integers α0, α1, β0, β1 such that α0 ̸= α1, β0 ̸= β1, and Sj = � α0 if xj = 0 α1 if xj = 1 |gk(wk, y)| = � β0 if xj = 0 β1 if xj = 1 x ∼ Unif({0, 1})|J∪K|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (217) For each i ∈ J ∪ K, we will assign ri to either xi or 1 − xi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since each xi is independently uniformly random over {0, 1}, so is each ri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Note that we can write the term Sj as either Sj = α0 + (α1 − α0)xj, or Sj = α1 + (α0 − α1)(1 − xj).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Thus, it is possible to rewrite Sj as c + ajrj for some integer c and positive integer aj, by setting rj = xj and ai = (α1 − α0) if α1 > α0 and setting rj = 1 − xj and ai = (α0 − α1) if α0 > α1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Furthermore, the value of aj = |α0 − α1|, and is at most 2 · |V (Fj)| ≤ d · 2D = 2d2 since the value of |Sj| is at most the number of vertices in Fj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, we can write S = a + � i∈J airi for some integer a and positive integers ai ≤ 2d2 for i ∈ J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For each k ∈ K, we can also write the term |gk(wk, y)| as either β0 +(β1 −β0)x0 or β1 +(β0 −β1)(1−x0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, regardless of whether rk was assigned as xk or 1 − xk, the term can be written as c + ekrk for some (not necessarily positive) integers c and ek.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' And, as desired, the entire Hamming weight sum can be written as |x, d| = b + � i∈K eiri for some integers b and ei for i ∈ K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Next, we consider how much the sums in Equation (215) depend on the same bits of r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Suppose that |J ∩K| ≤ 1 2N 3α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Then |J \\K| ≥ 1 2N 3α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' If we fix rK arbitrarily, the value of |x, d| is fixed, and therefore so is parity(x, d).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Letting c = parity(x, d), a′ = a + � i∈J∩K airi, and J′ = J \\ K, we can simplify the probability that the output is “incorrect” over the randomness of rJ′ as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr rJ′ [f(w, y) ∈ TS] = Pr rJ′[MMp(S) ⊕ parity(x, d) ̸= b] (218) = Pr rJ′ � MMp � a′ + � i∈J′ airi � ̸= c ⊕ b � (219) = Pr rJ′ � a′ + � i∈J′ airi ∈ Mc⊕b⊕1 mod p � (220) Where M0 = {0, 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', (p − 1)/2} and M1 = {(p + 1)/2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=', p − 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since |M0|, |M1| ≥ (p − 1)/2, and ai is nonzero modulo p (since ai ≤ O(ǫ log N) for i ∈ J, and p = Θ(N α))) it follows from Corollary 23 that Pr rJ′ [f(w, y) ∈ TS] ≥ p − 1 2p − O(1/N) ≥ 1/2 − O(1/p).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (221) Where we used that |J′| ≥ 1 2N 3α ≥ Ω(p3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since the bits of rK were fixed arbitrarily, it holds that Prw[f(w, y) ∈ TS] = Prr[MMp(S) ⊕ parity(x, d) ̸= b] ≥ 1/2 − O(1/p).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore we assume that |J ∩ K| > 1 2N 3α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' If we fix all ri for i /∈ J ∩ K, the remaining non-fixed blocks i ∈ J ∩ K have possible assignments which give different values to both |gi(wi, y)| and Si.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Letting a′ = a + � i/∈J∩K a + airi, and e′ = � i/∈J∩K eiri, we simplify the probability that f(w, y) is “incorrect” over the randomness of rJ∩K as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr rJ∩K [f(w, y) ∈ TS] = Pr rJ∩K � MMp � a′ + � i∈J∩K airi � ⊕ PAR � e′ + � i∈J∩K eiri �� (222) Since ai ≤ O(d2) ≤ O(ǫ log N) for each i ∈ [s] (by Claim 44) and |J ∩K| ≥ 1 2N 3α = Ω(p3), it directly follows from Lemma 24 that Pr rJ∩K [f(w, y) ∈ TS] ≥ 1 2 − O(1/ log N) (223) Therefore, Prw[f(w, y) ∈ TS] ≥ 1 2 − O(1/ log N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 38 Claim 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr � (Z, pmmajmodp(Z)) ∈ T � ≤ 1/N for sufficiently large N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' This proof is almost identical to that of Claim 26, which follows closely to the proof of Claim 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='3 in [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The main difference in this proof accounts for the additional term TM in the statisitcal test – so in addition to upper bounding the probability that D = (Z, pmmajmodp(Z)) is in T0, TS, or TF , we will also upper bound the probability that D ∈ TM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since D always outputs a “correct” string, Pr[D ∈ TS] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Thus, by the union bound it is sufficient for us to prove that Pr[D ∈ T0], Pr[D ∈ TF ], Pr[D ∈ TM] ≤ 1 3N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We start by showing that Pr[D ∈ TM] ≤ 1 3N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' To this end, we consider the probability that D ∈ TM conditioned on the value of ZF0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since ZF0 ∈ {0, 1}|F0| is uniformly random, Pr[D ∈ TM] = 1 2|F0| � t0∈{0,1}|F0| Pr[D ∈ TM|ZF0 = t0].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (224) Thus it is sufficient for us to show that Pr[D ∈ TM|ZF0 = t0] ≤ 1 3N for each t0 ∈ {0, 1}|F0|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' As shown in Claim 42, for each forest Fi for i ∈ [s], and some fixed zF0 ∈ {0, 1}|F0|, there is a unique assignment for zFi to minimize Si(zFi, zF0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Additionally, the minimality of each block is independent conditioned on the value of ZF0 since for each i ∈ [s], Si(Z) is a function of only ZFi and ZF0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We lower bound the probability that any given forest is minimal conditioned on the value of ZF0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For any i ∈ [s] and t0 ∈ {0, 1}|F0|, we have Pr D [block i is minimal |ZF0 = t0] = 1 2|Fi| ≥ 2−O(d2) = 2−O(ǫ log N) ≥ N −O(ǫ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (225) Where we used that |Fi| ≤ O(d2) ≤ O(ǫ log n) for i ∈ [s].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since the minimality of each forest are independent conditioned on the value of ZF0, for any subset of forests U ⊆ [s], the probability that none of them are minimal conditioned on ZF0 is Pr D [all forests of U are not minimal|ZF0 = t0] = � i∈U Pr[forest i is not minimal|ZF0 = t0].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (226) Therefore,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' for each t0 ∈ {0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1}|F0|,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr D [D ∈ TM|ZF0 = t0] = Pr D \uf8ee \uf8ef\uf8ef\uf8f0 � U⊆[s]: |U|=s−NM {all forests of U are not minimal } �����ZF0 = t0 \uf8f9 \uf8fa\uf8fa\uf8fb (227) ≤ � U⊆[s]: |U|=s−NM Pr � all forests of U are not minimal ���ZF0 = t0 � (228) = � U⊆[s]: |U|=s−NM � i∈U Pr � forest i is not minimal ���ZF0 = t0 � (229) ≤ � s NM � � 1 − N −O(ǫ)�s−NM (230) (231) In the second line we used the union bound,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' the third line we used the independence of the block’s minimality conditioned on ZF0 (Equation (226)),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' the fourth line we used Equation (225).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We can further simplify, using 39 Ω(N/d3) ≤ s ≤ N, d ≤ (ǫ log N)1/2, and NM = 3N 3α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ≤ � s NM �NM exp � −N −O(ǫ)(s − NM) � (232) = sNM exp � −N −O(ǫ)s � � exp � N −O(ǫ)� NM �NM (233) ≤ N 3N 3α exp � n1−O(ǫ) log3/2 N � (234) ≤ 1 3N (235) for sufficienly large N and small ǫ (such that 3α < 1 − O(ǫ)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore Pr[D ∈ TM] ≤ 1 3N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Next, we show using similar calculations that Pr[D ∈ T0] ≤ 1 3N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Since each of the blocks i ∈ [s], ZFi is uniformly random, whether each of them is all zeros is independent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore the probability that block i ∈ [s] is all zeros is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr � ZFi = 0|Fi|� = 2−|Fi| ≤ 2−O(d2) = N −O(ǫ) for each i ∈ [s] (236) Since N0 = 3N 3α, we can use the calculations from Equations (230) to (235) to bound Pr[D ∈ T0].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr[D ∈ T0] ≤ � T ⊆[s]: |T |=s−NM � i∈T Pr � ZFi ̸= 0|Fi|� (237) ≤ � s N0 � � 1 − N −O(ǫ)�s−N0 (238) ≤ 1 3N (239) For sufficiently large N and small ǫ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' All that’s left is to show Pr[D ∈ TF ] ≤ 1 3N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' For this we use the same exact calculations from the proof of Claim 26, but in this scenario we have ℓ ≤ N + N 3α, and the size of the support of D is 2N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Pr[D ∈ TF] ≤ |TF | 2N ≤ 2ℓ−NF 2N ≤ 2N 3α−2N 3α ≤ 2−N 3α ≤ 1 3N .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' (240) Where we used ℓ ≤ N + N δ, δ ≥ 3α, and NF = 2N 3α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Therefore, applying the union bound we get Pr[D ∈ T] ≤ Pr[D ∈ TS] + Pr[D ∈ TM] + Pr[D ∈ T0] + Pr[D ∈ TF ] (241) ≤ 0 + 1 3N + 1 3N + 1 3N = 1 N (242) 6 Discussion and Open Problems Our results show that QNC0 circuits can sample from distributions that NC0 circuits cannot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Below we list a few ways in which we think these results could potentially be extended.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The constant-sized unitary Um,θ used in the construction of of constant depth quantum circuits (Sections 2 and 4) is not constructed directly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Instead we show it exists indirectly by modifying a non-unitary operation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' An explicit construction of this unitary would be required for an experimental implementation of this circuit, and may also lead to further insights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 40 In an experiment with the goal of demonstrating quantum advantage, one would like to not just construct a QNC0 circuit which samples from a distribution which NC0 circuits cannot, but also verify that the distribution sampled from is indeed hard to sample from classically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' How many samples are needed for this verification?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Can the circuit be modified to make the verification easier?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' We point out here that the constant total variation distance in Corollary 4 means that only a few samples are needed to verify that the distribution produced by the described quantum circuit is not produced by a fixed NC0 circuit, for any specific choice of circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' However ruling out all distributions producible by NC0 circuits is a harder task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Can we prove an input-independent sampling separation between QNC0 and AC0 circuits?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Notably, in [18], Viola proves certain distributions cannot be produced by AC0 circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Can these techniques be extended to QNC0 circuits?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' If so, we would have a novel technique for lower bounded the circuit complexity of quantum states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' If not, we should be able to find a QNC0 circuit which samples from one of these distributions, producing the desired sampling separation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 7 Acknowledgements We would like to thank David Gosset for helpful discussions, and Ansis Rosmanis for sharing an insightful note.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' References [1] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Aaronson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Quantum computing, postselection, and probabilistic polynomial-time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences, 461(2063):3473–3482, 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1, 3] [2] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Aaronson and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Complexity-theoretic foundations of quantum supremacy experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' arXiv preprint arXiv:1612.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='05903, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1, 3] [3] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Anshu, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Breuckmann, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Nirkhe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Nlts hamiltonians from good quantum codes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' arXiv preprint arXiv:2206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='13228, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 3] [4] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Boixo, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Isakov, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Smelyanskiy, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Babbush, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Ding, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Jiang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Bremner, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Martinis, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Neven.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Characterizing quantum supremacy in near-term devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Nature Physics, 14(6):595–600, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1, 3] [5] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Bouland, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Fefferman, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Nirkhe, and U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Vazirani.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Quantum supremacy and the complexity of random circuit sampling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' arXiv preprint arXiv:1803.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content='04402, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1, 3] [6] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Bravyi, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Gosset, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' K¨onig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Quantum advantage with shallow circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Science, 362(6412):308– 311, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2, 3] [7] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Browne, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Kashefi, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Perdrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Computational depth complexity of measurement-based quantum computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In Conference on Quantum Computation, Communication, and Cryptography, pages 35– 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Springer, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 3] [8] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Cleve and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Watrous.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Fast parallel circuits for the quantum fourier transform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In Proceedings 41st Annual Symposium on Foundations of Computer Science, pages 526–536.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' IEEE, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 3] [9] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Grier and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Schaeffer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Interactive shallow clifford circuits: Quantum advantage against nc1 and beyond.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In Proceedings of the 52nd Annual ACM SIGACT Symposium on Theory of Computing, pages 875–888, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2] [10] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' H˚astad.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Computational limitations for small-depth circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' MIT press, 1987.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2] [11] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Høyer and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' ˇSpalek.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Quantum fan-out is powerful.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Theory of computing, 1(1):81–103, 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 3] 41 [12] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Preskill.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Quantum computing in the nisq era and beyond.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Quantum, 2:79, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2] [13] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Razborov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Lower bounds on the size of bounded depth circuits over a complete basis with logical addition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Mathematical Notes of the Academy of Sciences of the USSR, 41(4):333–338, 1987.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2] [14] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Shor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Polynomial-time algorithms for prime factorization and discrete logarithms on a quantum computer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' SIAM review, 41(2):303–332, 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 3] [15] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Smolensky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Algebraic methods in the theory of lower bounds for boolean circuit complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In Proceedings of the nineteenth annual ACM symposium on Theory of computing, pages 77–82, 1987.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2] [16] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Terhal and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' DiVincenzo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Adaptive quantum computation, constant depth quantum circuits and arthur-merlin games, 2002.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 1, 3] [17] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Viola.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' The complexity of distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' SIAM Journal on Computing, 41(1):191–218, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2, 4, 11, 20, 23, 39] [18] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Viola.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Extractors for circuit sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' SIAM Journal on Computing, 43(2):655–672, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2, 41] [19] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Watts, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Kothari, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Schaeffer, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Tal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' Exponential separation between shallow quantum circuits and unbounded fan-in shallow classical circuits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' In Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing, pages 515–526, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' [pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} +page_content=' 2, 3, 5, 24] 42' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NAzT4oBgHgl3EQfEfqj/content/2301.00995v1.pdf'} diff --git a/6NAzT4oBgHgl3EQff_yv/content/2301.01462v1.pdf b/6NAzT4oBgHgl3EQff_yv/content/2301.01462v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..78750faae961dde2cfd98dbed808649c48841583 --- /dev/null +++ b/6NAzT4oBgHgl3EQff_yv/content/2301.01462v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9e6a7b69094a7fe8c88cb34e27cb8ecfb433b2c41cac4e7737aedd899d9a200 +size 726729 diff --git a/6NAzT4oBgHgl3EQff_yv/vector_store/index.faiss b/6NAzT4oBgHgl3EQff_yv/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..214df0b149b2aeb291d0e4ee21875526fa98993e --- /dev/null +++ b/6NAzT4oBgHgl3EQff_yv/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22b6fdc719996988a2984c9a75e35fe757e0b90615f2fe5a5c9db41a4b7f481b +size 3342381 diff --git a/6NAzT4oBgHgl3EQff_yv/vector_store/index.pkl b/6NAzT4oBgHgl3EQff_yv/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..aeb76b5dfa95859d6b6877b232e6f8d80968b022 --- /dev/null +++ b/6NAzT4oBgHgl3EQff_yv/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cd2f1965fc3a2e4e5589a9d66b718383001bb2769054f0c05b41da17d1c6233 +size 129879 diff --git a/6dA0T4oBgHgl3EQfN__p/vector_store/index.faiss b/6dA0T4oBgHgl3EQfN__p/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..9e10749118baee26468a7520011f1cadd8af319f --- /dev/null +++ b/6dA0T4oBgHgl3EQfN__p/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d89686d458e46b916af13eaed9fbab6255a526612cc04dd554dd2b566b3e5a7 +size 3211309 diff --git a/79E0T4oBgHgl3EQffQDe/vector_store/index.faiss b/79E0T4oBgHgl3EQffQDe/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..7ba805964d26a38fb0e066f3203b5010581ab3b1 --- /dev/null +++ b/79E0T4oBgHgl3EQffQDe/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99282173c2ea474d4104935e8cc9ff58442f9dc4a52d8cb7697dce9718b13380 +size 3342381 diff --git a/7tFLT4oBgHgl3EQfAi4b/content/2301.11966v1.pdf b/7tFLT4oBgHgl3EQfAi4b/content/2301.11966v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bd48ce4a92268a328e4273c21fc87b7bd07aa937 --- /dev/null +++ b/7tFLT4oBgHgl3EQfAi4b/content/2301.11966v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4492414d534972cefe1c245b6dc143b1586540c026fc0fbd0db14303d7456ed +size 170060 diff --git a/7tFLT4oBgHgl3EQfAi4b/vector_store/index.faiss b/7tFLT4oBgHgl3EQfAi4b/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..8bfcf59f76be9ebc058a31c2d22c54d326a073a3 --- /dev/null +++ b/7tFLT4oBgHgl3EQfAi4b/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1a37936b7f78fa436fb3472952b961244fca4d8d8ccba9224c3e440ff3b4732 +size 2293805 diff --git a/7tFLT4oBgHgl3EQfAi4b/vector_store/index.pkl b/7tFLT4oBgHgl3EQfAi4b/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..3b7345312075611f545a6ccaad403c76981a5b97 --- /dev/null +++ b/7tFLT4oBgHgl3EQfAi4b/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2d9e4a910fd4639088e36e279acbef512663b7bad90607f574fc632f6050674 +size 80203 diff --git a/8dFJT4oBgHgl3EQfnix6/content/tmp_files/2301.11592v1.pdf.txt b/8dFJT4oBgHgl3EQfnix6/content/tmp_files/2301.11592v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..373c817f1cdeb155b1800fbe5dfd0637f36b4bc0 --- /dev/null +++ b/8dFJT4oBgHgl3EQfnix6/content/tmp_files/2301.11592v1.pdf.txt @@ -0,0 +1,2400 @@ +Solving Constrained Reinforcement Learning through Augmented State and +Reward Penalties +Hao Jiang 1 Tien Mai 1 Pradeep Varakantham 1 +Abstract +Constrained Reinforcement Learning has been +employed to enforce safety constraints on policy +through the use of expected cost constraints. The +key challenge is in handling expected cost accu- +mulated using the policy and not just in a single +step. Existing methods have developed innova- +tive ways of converting this cost constraint over +entire policy to constraints over local decisions +(at each time step). While such approaches have +provided good solutions with regards to objective, +they can either be overly aggressive or conserva- +tive with respect to costs. This is owing to use +of estimates for ”future” or ”backward” costs in +local cost constraints. +To that end, we provide an equivalent uncon- +strained formulation to constrained RL that has +an augmented state space and reward penalties. +This intuitive formulation is general and has in- +teresting theoretical properties. More importantly, +this provides a new paradigm for solving con- +strained RL problems effectively. As we show in +our experimental results, we are able to outper- +form leading approaches on multiple benchmark +problems from literature. +1. Introduction +There are multiple objectives of interest when handling +safety depending on the type of domain: (a) ensuring safety +constraint is never violated; (b) ensuring safety constraint is +not violated in expectation; (c) ensuring the chance of safety +constraint violation is small (Value at Risk, VaR) (Lucas & +Klaassen, 1998); (d) ensuring the expected cost of violation +is bounded (Conditional Value at Risk, CVaR) (Rockafellar +et al., 2000; Yang et al., 2021); and others. One of the +main models in Reinforcement Learning to ensure safety is +Constrained RL, which employs objective (b) above. Our +focus in this paper is also on Constrained RL. +1School of Computing and Information Systems, Singapore +Management University. +Preprint +Constrained RL problems are of relevance in domains that +can be represented using an underlying Constrained Markov +Decision Problem (CMDP) (Altman, 1999). The main chal- +lenge in solving Constrained RL problems is the expected +cost constraint, which requires averaging over multiple tra- +jectories from the policy. Such problems have many appli- +cations including but not limited to: (a) electric self driving +cars reaching destination at the earliest while minimizing +the risk of not getting stranded on the road with no charge; +(b) robots moving through unknown terrains to reach a des- +tination, while having a threshold on the average risk of +passing through unsafe areas (e.g., a ditch). Broadly, they +are also applicable to problems robot motion planning (Ono +et al., 2015; Moldovan & Abbeel, 2012; Chow et al., 2015a), +resource allocation (Mastronarde & van der Schaar, 2010; +Junges et al., 2015), and financial engineering (Abe et al., +2010; Di Castro et al., 2012). +Related Work: Many model free approaches have been pro- +posed to solve Constrained RL problems. One of the initial +approaches to be developed for addressing such constraints +is the Lagrangian method (Chow et al., 2015b). However, +such an approach does not provide either theoretical or em- +pirical guarantees in ensuring the constraints are enforced. +To counter the issue of safety guarantees, next set of ap- +proaches focused on transforming the cost constraint over +trajectories into cost constraint over individual decisions in +many different ways. One such approach imposed surrogate +constraints (El Chamie et al., 2016; G´abor et al., 1998) on +individual state and action pairs. Since the surrogate con- +straints are typically stricter than the original constraint on +the entire trajectory, they were able to provide theoretical +guarantees on safety. However, the issue with such type of +approaches is their conservative nature, which can poten- +tially hamper the expected reward objective. More recent +approaches such as CPO (Constrained Policy Optimiza- +tion) (Achiam et al., 2017), Lyapunov (Chow et al., 2019b), +BVF (Satija et al., 2020a) have since provided more tighter +local constraints (over individual decisions) and thereby +have improved the state of art in guaranteeing safety while +providing high quality solutions (with regards to expected +reward). In converting a trajectory based constraint to a +local constraint, there is an estimation of cost involved for +the rest of the trajectory. Due to such estimation, trans- +arXiv:2301.11592v1 [cs.LG] 27 Jan 2023 + +Solving Constrained RL through Augmented State and Reward Penalties +formed cost constraints over individual decisions are error +prone. In problems where the estimation is not close to the +actual, results with such approaches with regards to cost +constraint enforcement are poor (as we demonstrate in our +experimental results). +Contributions: +To that end, we focus on an approach that relies on exact +accumulated costs (and not on estimated costs). In this +paper, we make four key contributions: +• We provide a re-formulation of the constrained RL prob- +lem through augmenting the state space with cost ac- +cumulated so far and also considering reward penalties +when cost constraint is violated. This builds on the idea +of augmented MDPs (Hou et al., 2014) employed to +solve Risk Sensitive MDPs. The key advantage of this +reformulation is that by penalizing rewards (as opposed +to the entire expected value that is done typically using +Lagrangian methods), we get more fine grained control +on how to handle the constraints. Also, we can utilize +existing RL methods with minor modifications. +• We show theoretically that the reward penalties em- +ployed in the new formulation are not adhoc and can +equivalently represent different constraints mentioned +in the first paragraph of introduction, i.e. risk-neural, +chance constrained (or VAR) and CVaR constraints. +• We modify existing RL methods (DQN and SAC) to +solve the re-formulated RL problem with augmented +state space and reward penalties. A key advantage for +the new approaches is the knowledge of exact costs +incurred so far (available within the state space) and this +allows for assigning credit for cost constraint violations +more precisely during learning. +• Finally, we demonstrate the utility of our approach by +comparing against leading approaches for constrained +RL on multiple benchmark problems from literature. +We empirically demonstrate that our approaches are able +to outperform leading Constrained RL approaches from +the literature either with respect to expected value or in +enforcing the cost constraint or both. +2. Constrained Markov Decision Process +A Constrained Markov Decision Process (CMDP) (Altman, +1999) is defined using tuple ⟨S, A, r, p, d, s0, cmax⟩, where +S is set of states with initial state as s0, A is set of actions, +r : S × A → R is reward with respect to each state-action +pair, p : S × A → P is transition probability of each state. +d : S → d(S) is the cost function and cmax is the maximum +allowed cumulative cost. Here, we assume that d(s) ≥ 0 +for all s ∈ S. This assumption is not restrictive as one +can always add positive amounts to d(s) and cmax to meet +the assumption. The objective in a risk-neural CMDP is to +compute a policy, π : S × A → [0, 1], which maximizes +reward over a finite horizon T while ensuring the cumulative +cost does not exceed the maximum allowed cumulative cost. +max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +s.t. +E +� T +� +t=0 +d(st)|s0, π +� +≤ cmax. +(RN-CMDP) +The literature has seen other types of constraints, e.g., +chance constraints requiring that Pπ(D(τ) > cmax) ≤ α +for a risk level α ∈ [0, 1], or CVaR ones of the form +Eπ[(D(τ) − cmax)+] ≤ β. Handling different types of +constraints would require different techniques. In the next +section, we present our approach based on augmented state +and reward penalties that assembles all the aforementioned +constraint types into one single framework. +3. Cost Augmented Formulation for Safe RL +We first present our extended MDP reformulation and pro- +vide several theoretical findings that connect our extended +formula with different variants of CMDP. We first focus +on the case of single-constrained MDP and show how the +results can be extended to the multi-constrained setting. +3.1. Extended MDP Reformulation +We introduce our approach to track the accumulated cost +at each time period, which allows us to determine states +that potentially lead to high-cost trajectories. To this end, +let us define a new MDP with an extended state space +� +�S, A, �r, �p, d, s0, cmax +� +where �S = {(s, c)| s ∈ S, c ∈ +R+}. That is, each state s′ of the extended MDP includes +an original state from S and information about the accumu- +lated cost. We the define the transition probabilities between +states in the extended space. +� +� +� +� +� +�p(s′ +t+1, c′ +t+1|(st, ct), at) = p(s′ +t+1|st, at) +if c′ +t+1 = ct + d(st) +�p(s′ +t+1, c′ +t+1|(st, ct), at) = 0 otherwise +and new rewards with penalties +� +� +� +� +� +� +� +� +� +�r(at|(st, ct)) = r(at|st) if ct + d(st) ≤ cmax +�r(at|(st, ct)) = r(at|st) − ∆(ct + d(st))/γt +if ct ≤ cmax and ct + d(st) > cmax +�r(at|(st, ct)) = r(at|st) − ∆d(st)/γt if ct > cmax +where ∆ is a positive scalar and ∆d(st) and ∆(ct + +d(st)) are penalties given to the agent if the accumu- +lated cost exceeds the upper bound cmax. Under these + +Solving Constrained RL through Augmented State and Reward Penalties +reward penalties, the accumulated reward for each tra- +jectory τ = {(s0, a0), . . . , (sT , aT )} can be written as +�R(τ) = � +t γtr(at|st) if D(τ) ≤ cmax and �R(τ) = +� +t γtr(at|st) − ∆D(τ) if D(τ) > cmax, where D(τ) is +the total cost of trajectory τ, i.e., D(τ) = � +st∈τ d(st). So, +in fact, we penalize every trajectory that violates the cost +constraint. +We now consider the following extended MDP, which han- +dles the constraints in a relaxed manner through penalties. +max +π +E +� T +� +t=0 +γt�r(at|(st, ct)) +���(s0, c0), π +� +(EMDP) +where c0 = 0. There are also other ways to penalize the +rewards, allowing us to establish equivalences between the +extended MDP to other risk-averse CMDP, which we will +discuss later in the next section. +3.2. Theoretical Properties +To demonstrate the generality and power in representation of +the reward penalties along with state augmentation in the un- +constrained MDP (EMDP), we provide theoretical properties +that map reward penalties to different types of constraints +(expected cost, VaR, CVaR, Worst-case cost): +(i) Proposition 3.1 states that if the penalty parameter ∆ = +0, then (EMDP) becomes the classical unconstrained +MDP. +(ii) Theorem 3.2 shows that if ∆ = ∞, then (EMDP) is +equivalent to a worst-case constrained MDP +(iii) Theorem 3.5 establishes a lower bound on ∆ from +which any solution to (EMDP) will satisfy the risk- +neural constraint in (RN-CMDP). +(iv) Theorem +3.6 +connects +(EMDP) +with +chance- +constrained MDP by providing a lower bound for ∆ +from which any solution to (EMDP) will satisfy a VaR +constraint P(� +t d(st) ≤ cmax) ≤ α. +(v) Theorems 3.6 and 3.8 further strengthen the above re- +sults by showing that, under some different reward set- +tings, (EMDP) is equivalent to a chance-constrained (or +VaR) or equivalent to a CVaR CMDP. +We now describe our theoretical results in detail. All the +proofs can be found in the appendix. We first state, in Propo- +sition 3.1, a quite obvious result saying that if we set the +penalty parameter ∆ = 0, then the MDP with augmented +state space becomes the original unconstrained MDP. +Proposition 3.1. If ∆ = 0, then (EMDP) is equivalent to +the unconstrained MDP maxπ E +��T +t=0 γtr(st, at)|s0, π +� +. +It can be seen that increasing ∆ will set more penalties to +trajectories whose costs exceed the maximum cost allowed +cmax, which also implies that (EMDP) would lower the prob- +abilities of taking these trajectories. So, intuitively, if we +raise ∆ to infinity, then (EMDP) will give policies that yield +zero probabilities to violating trajectories. We state this +result in Theorem 3.2 below. +Theorem 3.2 (Connection to worst-case CMDP). If we +set ∆ = ∞, then if π∗ solves (EMDP), it also solves the +following worst-case constrained MDP problem +max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +s.t. +� +st∈τ +d(st) ≤ cmax, ∀τ ∼ π. +(WC-CMDP) +As a result, π∗ is feasible to the risk-neural CMDP +(RN-CMDP). +The above theorem implies that if we set the penalties to +be very large (e.g., ∞), then all the trajectories generated +by the optimal policy π∗ will satisfy the constraint, i.e., the +accumulated cost will not exceed cmax. Such a conservative +policy would be useful in critical environments where the +agent is strictly not allowed to go beyond the maximum +allowed cost cmax. An example would be a routing problem +for electrical cars where the remaining energy needs not +become empty before reaching a charging station or the +destination. Note that the worst-case CMDP (WC-CMDP) +would be non-stationary and history-dependent, i.e., there +would be no stationary and history-independent policies +being optimal for the worst-case CMDP (WC-CMDP). This +remark is obviously seen, as at a stage, one needs to consider +the current accumulated cost to make feasible actions. Thus, +a policy that ignores the historical states and actions would +be not optimal (or even not feasible) for the worst-case MDP. +As a result, this worst-case CMDP can not be presented by +a standard-constrained MDP formulation. +Theorem 3.2 also tells us that one can get a feasible solution +to the risk-neural CMDP (RN-CMDP) by just raising ∆ to +infinity. In fact, ∆ does not need to be infinite to achieve +feasibility. Below we establish a lower bound for the penalty +parameter ∆ such that a solution to (EMDP) is always feasi- +ble to the risk-neural CMDP (RN-CMDP). Let us define Ψ∗ +as the optimal value of the unconstrained MDP problem +Ψ∗ = max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +. +and Ψ be the optimal value of the worst-case CMDP +(WC-CMDP). +We define a conditional expectation +�Eπ [D(τ)| D(τ) ≤ cmax] as the expected cost over trajecto- +ries whose costs are less than cmax +�Eπ [D(τ)| D(τ) ≤ cmax] = +� +τ| D(τ)≤cmax +Pπ(τ)D(τ) + +Solving Constrained RL through Augmented State and Reward Penalties +where Pπ(τ) is the probability of τ under policy π. Before +presenting the bound, we first need two lemmas. Lemma 3.3 +establishes a condition under which a policy π is feasible to +the risk-neural CMDP. +Lemma +3.3. +Let +φ∗ += +cmax +− +maxπ +� +�Eπ[D(τ)| D(τ) ≤ cmax] +� +. Given any policy π, if +�Eπ[D(τ)| D(τ) > cmax] ≤ φ∗, then Eπ[D(τ)] ≤ cmax. +Lemma 3.4 below further provides an upper bound for the +expected cost of violating trajectories under an optimal pol- +icy given by the extended MDP reformulation (EMDP). +Lemma 3.4. Given ∆ > 0, let π∗ be an optimal solution +to (EMDP). We have +�Eπ∗ [D(τ)| D(τ) > cmax] ≤ Ψ∗ − Ψ +∆ +. +Using Lemmas 3.3 and 3.4, we are ready to state the main +result in Theorem 3.5 below. +Theorem 3.5 (Connection to the risk-neural CMDP). For +any ∆ ≥ Ψ∗−Ψ +φ∗ +, a solution to (EMDP) is always feasible to +the risk-neural CMDP (RN-CMDP). +To prove Lemmas 3.3, 3.4, we leverage the fact that the +objective of (EMDP) can be written equivalently as +Eπ +�� +t +γtr(st, at) +� +− ∆�Eπ [D(τ)| D(τ) > cmax] +(1) +which allows us to establish a relation between ∆ and +�Eπ∗ [D(τ)| D(τ) > cmax], where π∗ is an optimal policy +of (EMDP). The bounds then come from this relation. We +refer the reader to the appendix for detailed proofs. +There is also a lower bound for ∆ from which any solution +to (EMDP) always satisfies a chance constraint (or VaR). To +state this result, let us first define the following VaR CMDP, +for any risk level α ∈ [0, 1]. +max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +s.t. +Pπ +� +(D(τ) > cmax +� +≤ α. +(VaR-CMDP) +We have the following theorem showing a connection be- +tween (EMDP) and the VaR CMDP above. +Theorem 3.6 (Connection to VaR CMDP). For any ∆ ≥ +(Ψ∗ − Ψ)/(αcmax), a solution to (EMDP) is always feasi- +ble to (VaR-CMDP). +We also leverage Eq. 1 to prove the theorem by show- +ing that when ∆ is sufficiently large, the conditional ex- +pectation �Eπ∗ [D(τ)| D(τ) > cmax] can be bounded from +above (π∗ is an optimal policy of (EMDP)). +We then +can link this to the chance constraint by noting that +�Eπ∗ [D(τ)| D(τ) > cmax] ≥ cmaxP(D(τ) > cmax). +Theorem 3.6 tells us that one can just raise ∆ to a suffi- +ciently large value to meet a chance constraint of any risk +level. Here, Theorem 3.6 only guarantees feasibility to +(VaR-CMDP). Interestingly, if we modify the reward penal- +ties by making them independent of the costs d(s), than +an equivalent mapping to (VaR-CMDP) can be obtained. +Specifically, let us re-define the following reward for the +extended MDP. That is, we replace the cost d(st) by a con- +stant. Theorem 3.7 below shows that (EMDP) is actually +equivalent to a chance-constrained CMDP under the new +reward setting. +Theorem 3.7 (VaR equivalence). If we modify the reward +penalties as +� +� +� +� +� +� +� +� +� +�r(at|(st, ct)) = r(at|st) if ct + d(st) ≤ cmax +�r(at|(st, ct)) = r(at|st) − ∆(t + 1)/γt +if ct ≤ cmax and ct + d(st) > cmax +�r(at|(st, ct)) = r(at|st) − ∆/γt if ct > cmax +then if π∗ is an optimal solution to (EMDP), then there is +α∆ ∈ [0; Ψ∗−Ψ +∆T ] (α is dependent of ∆) such that π∗ is also +optimal to (VaR-CMDP). Moreover lim∆→∞ α∆ = 0. +It can be also seen that Theorem 3.2 is a special case of +Theorem 3.7 when ∆ = ∞. +We finally connect (EMDP) with a risk-averse CMDP that +has a CVaR intuition. The theorem below shows that, by +slightly changing the reward penalties, (EMDP) actually +solves a risk-averse CMDP problem. +Theorem 3.8 (CVaR CMDP equivalence). If we modify the +reward penalties as +� +� +� +� +� +� +� +� +� +�r(at|(st, ct)) = r(at|st) if ct + d(st) ≤ cmax +�r(at|(st, ct)) = r(at|st) − ∆(ct + d(st) − cmax)/γt +if ct ≤ cmax and ct + d(st) > cmax +�r(at|(st, ct)) = r(at|st) − ∆d(st)/γt if ct > cmax +then for any ∆ > 0, there is β∆ ∈ +� +0; Ψ∗−Ψ +∆ +� +(β∆ is de- +pendent of ∆) such that any optimal solution to the extended +CMDP (EMDP) is also optimal to the following risk-averse +CMDP +max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +s.t. +Eτ∼π +� +(D(τ) − cmax)+� +≤ β∆. +(CVaR-CMDP) +Moreover, lim∆→∞ β∆ = 0. + +Solving Constrained RL through Augmented State and Reward Penalties +In practice, since ∆ is just a scalar, one can just grad- +ually increase it from 0 to get a desired policy. +This +indicates the generality of the unconstrained exended +MDP formulation (EMDP). In summary, we show that +(EMDP) brings risk-neural, worst-case and VaR and CVaR +CMDPs in (RN-CMDP), (WC-CMDP), (VaR-CMDP) and +(CVaR-CMDP) under one umbrella. +3.3. Multi-constrained CMDP +We now discuss extension to CMDP with multiple cost +constraints (e.g., limited fuel and bounded risk) and show +how the above theoretical results can be extended to the +multi-constrained variants. A multi-constrained risk-neural +CMDP can be formulated as +max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +s.t. +E +� T +� +t=0 +dk(st)|s0, π +� +≤ ck +max, ∀k ∈ [K] +(MRN-CMDP) +where [K] denotes the set {1, . . . , K}. Similar to the single +constraint case, to include cost functions in the rewards, we +extend the state space to keep track of the accumulated costs +as �S = {(s, c1, . . . , cK)| s ∈ S, ck ∈ R, ∀k ∈ [K]} and +define new transitions probabilities as +� +� +� +� +� +�p(st+1, cK +t+1|(st, cK +t ), at) = p(st+1|st, at) +if ck +t+1 = ck +t + dk(st) +�p(st+1, cK +t+1|(st, cK +t ), at) = 0 otherwise +where cK +t += (c1 +t, . . . , cK) for notational simplicity. The +new rewards are also updated in such a way that every +trajectory violating the constraints will be penalized. +�r(at|(st, cK +t )) = r(at|st) − +� +k∈[K] +∆kδk(ct), +where δk(ct), ∀k ∈ [K], are defined as follows. +δk(ct) = +� +� +� +� +� +� +� +� +� +0 if , ck +t + dk(st) ≤ ck +max +(ck +t + dk(st))/γt if ck +t ≤ ck +max, +ck +t + dk(st) ≥ ck +max +dk(st)/γt if ck +t > ck +max. +Here, we allow penalty parameters ∆k to be different over +constraints. We formulate the extended unconstrained MDP +as: +max +π +� +E +� T +� +t=0 +γt�r(at|(st, cK +t )) +���(s0, cK +0 ), π +�� +. +(2) +Similar to the single-constrained case, the reward penalties +allow us to write the objective function of the extended +MDP as +Eπ +�� +t +γtr(st, at) +� +− +� +k∈[K] +∆k�Eπ +� +Dk(τ)| Dk(τ) > ck +max +� +(3) +where Dk(τ) is the accumulated cost dk(st) on trajectory τ, +i.e., Dk(τ) = � +st∈τ dk(st). As a result, when ∆k grows, +the extended MDP will discount the second term of (3), thus +yielding policies that satisfy or even solve risk-neural or +risk-averse CMDP problems. Specifically, the following +results can be proved: +• When ∆k = ∞, ∀k ∈ [K], then (2) is equivalent to +worst-case CMDP (i.e., all the trajectories generated +by the policy will satisfy all the cost constraints). +• There are lower bounds for ∆k from which any so- +lution to (2) will be feasible to risk-neural and VaR +CMDP with multiple constraints. +• For any ∆k > 0, under different reward penalty set- +tings, (2) is equivalent to a multi-constrained CVaR +CMDP or equivalent to a multi-constrained VaR +CMDP. +All the detailed proofs and discussions can be found in the +appendix. +4. Safe RL Algorithms +In this section, we update existing RL methods to effectively +utilize the extended state space and reward penalties, while +considering RN-CMDP. Due to the theoretical properties +in the previous section, just by tweaking ∆, we can also +handle other Constrained MDPs. +4.1. Safe DQN +Deep Q Network (DQN) (Mnih et al., 2015) is an efficient +method to learn in primarily discrete action Reinforcement +Learning problems. However, the original DQN does not +consider safety constraints and cannot be applied to any of +the CMDP variants. +The main modifications in the updated algorithm, referred +to as Safe DQN are with regards to exploiting the extended +state space and the reward penalties based on constraint +violations. The pseudo code for the Safe DQN algorithm is +provided in Algorithm 1. +The impact of extended state space on the algorithm can be +observed in almost every line of the algorithm. The penalty +for violation of constraints When selecting an action (line +4), Safe DQN not consider the feasibility of the action with +respect to cost. Instead, like in the original DQN, it is purely +based on the current Q value. The assumption is that the + +Solving Constrained RL through Augmented State and Reward Penalties +Algorithm 1 DQN with Extended State Space +Initialization: Relay buffer D with capacity N, action- +value function Q with weight θ, target action-value function +ˆQ with weight θ− = θ. +1: for each episode do +2: +Initialize with sequence (s0, c0 = 0). +3: +for each time step t do +4: +Select a random action at with probability ϵ, oth- +erwise select at = arg maxa Q((st, ct), a; θ). +5: +Execute action at, observe (st+1, ct+1), rt. +6: +Store ((st, ct), at, rt, (st+1, ct+1)) in D. +7: +Update state-cost pair to (st+1, ct+1). +8: +Sample ((sj, cj), aj, rj, (sj+1, cj+1)) from D. +9: +if cj > cmax then +10: +˜rj = r(sj) − ∆d(sj)/γt +11: +else if cj+1 > cmax then +12: +˜rj = r(sj) − ∆(ct + d(sj))/γt +13: +else +14: +˜rj = r(sj) +15: +end if +16: +{”mask” indicates if the episode terminates} +17: +yj = ˜rj + γ ∗ maxa′ ˆQ((sj+1, cj+1), a′; θ−) ∗ +maskj+1. +18: +Update θ using l = (yi − Q((sj, cj), aj; θ))2. +19: +Every C steps reset ˆQ = Q. +20: +end for +21: end for +penalties accrued due to violation (in lines 9-12) will be suf- +ficient to force the agent away from cost infeasible actions. +Once the new rewards are obtained (based on considering +reward penalties), the Q network is updated using the mean +square error loss on line 17. +4.2. Safe SAC +Soft Actor-Critic (SAC) (Haarnoja et al., 2018) is an off- +policy algorithm that learns a stochastic policy for discrete +and continuous action RL problems. SAC employs policy +entropy in conjunction with value function to ensure more +exploration. Q value function in SAC is defined as follows: +Q(s, a) =E[ +∞ +� +t=0 +γtr(st, at, st+1)+ +α +∞ +� +t=1 +γtH(π(·|st))|s0 = s, a0 = a] +(4) +where H(.) denotes the entropy of the action distribution +for a given state, st). SAC also employs the double Q- +trick, where we use the minimum of two Q value functions +(Qi(.), i ∈ 1, 2) as the target, y to avoid overestimation. +y =r(s, a, s′) + γ min +i=1,2 Qi(s′, ˜a′) − α log π(˜a′|s′) +(5) +where ˜a′ ∼ π(·|s′). +Our algorithm, referred to as Safe SAC builds on SAC by +having an extended state space and a new action selection +strategy that exploits the extended state space. In Safe +DQN, we primarily rely on violation of constraints, so as to +learn about the bad trajectories and avoid them. While such +approach works well for discrete action settings and in an +off policy setting, it is sample inefficient and can be slow +for actor-critic settings. In Safe SAC, apart from reward +penalty, we also focus on learning about feasible actions, +which are generated through the use of the cost accumulated +so far (available as part of the state space) and a Q value on +the future cost. +Formally, we define the optimization to select safe actions +(at each decision epoch) in Equation 6 and show safe SAC +algorithm in Algorithm 2. Extending on the double Q trick +for reward, we also have double Q for future cost, referred to +as {Qi +d}i∈1,2. At each step, the objective is to pick an action +that will maximize the reward Q value for the extended +state, action minus the weighted entropy of the action. The +constraint here is to pick only those actions, which will +not violate the cost constraint. In the left hand side of the +constraint, we calculate the overall expected cost from: (a) +(estimate) of the future cost, from the current state; (b) +(actual) cost incurred so far; and (c) subtracting the (actual) +cost incurred at the current step, as it is part of both (a) and +(b); +arg max +a +min +i=1,2 Qi((s, c), a) − α log π(a|(s, c)) +s.t. max +i=1,2 Qi +D((s, c), a) + c − d((s, c)) ≤ cmax, ∀(s, c) +(6) +Algorithm 2 (in appendix) provides the pseudo code for +Safe SAC. +5. Experiment +We empirically compare the performance of our approaches +on both discrete and continuous environments with respect +to expected reward and expected cost achieved against lead- +ing benchmark approaches. For an RL benchmark, we use +the original DQN (Mnih et al., 2015) and it is referred +to as unsafe DQN, as it does not account for cost con- +straints. For leading Constrained RL benchmarks, we use +BVF (Backward Value Function) (Satija et al., 2020b) and +Lyapunov (Chow et al., 2019a). We mostly show results +with respect to expected cost constraint, as there are many +model free approaches that solve the RN-CMDP problem. +For one example (Safety Gym), we also provide comparison +when a CVaR constraint is provided. The performance val- +ues (expected cost and expected reward) along with standard +deviation in each experiment are averaged over 5 runs. + +Solving Constrained RL through Augmented State and Reward Penalties +Figure 1. Gridworld environment and reward, cost comparison of different approaches +Figure 2. Highway environment and reward, cost comparison of different approaches +5.1. GridWorld: RN-CMDP +For a discrete state and discrete action environment, we +consider the stochastic 2D grid world problem introduced +previously (Leike et al., 2017; Chow et al., 2018; Satija +et al., 2020b; Jain et al., 2021). The grid on the left of Figure +1 shows the environment. The agent starts at the bottom +right corner of the map (green cell) and the objective is to +move to the goal at the bottom left corner (blue cell). The +agent can only move in the adjoining cells in the cardinal +directions. Occasionally agent will execute a random action +with probability p = 0.05 instead of the one selected by +the agent. It gets a reward of +100 on reaching the goal, +and a penalty of -1 at every time step. There are a number +of pits in the map (red cell) and agent gets a random cost +ranging from 1 to 1.5 on passing through any pit cell. We +consider an 8x8 grid and the maximum time horizon is 200 +steps, after which the episode terminates. This modified +GridWorld environment is challenging because agent can +travel to destination with a short path with a high cost, but if +it wishes to travel safely, it needs to explore enough to find +a safe path which is far from the shortest one. We set the +expected cost threshold, cmax = 2, meaning agent could +pass at most one pit. For discrete state environments, we +use the discrete SAC in (Christodoulou, 2019). +Figure 1 shows the performance of each method with respect +to expected reward (score) and expected cost (constraint). +Here are the key observations: +• With respect to expected reward, among safe approaches, +Lyapunov achieves the highest reward. However, it +violates the expected cost constraint by more than twice +the cost constraint value. +• Safe SAC and Safe DQN achieve similar expected re- +ward values, though Safe SAC reaches there faster. This +high expected reward value is achieved while satisfying +the expected cost constraint after 1000 episodes. +• The other constrained RL approach, BVF achieved the +lowest value while not being able to satisfy the expected +cost constraint. +• As expected, Unsafe DQN achieved the highest expected +reward but was unable to satisfy the expected cost con- +straint. +5.2. Highway Environment:RN-CMDP +Inspired by experiment in GPIRL (Levine et al., 2011), we +test our safe methods in the highway environment (Leurent, +2018) of Figure 2. The task in highway environment is to +navigate a car on a four-lane highway with all other vehicles + +HAverage Score in each Episode +100 +80 +60 +Score +40 +20 +0 +Unsafe DQN +Safe DQN +BVF +-20 +Safe SAC +Lyapunov +-40 +0 +2000 +4000 +6000 +8000 +10000 +12000 +14000 +EpisodeAverage Constraint in each Episode +Unsafe DQN +Safe DON +15.0 +BVF +Safe SAC +12.5 +Lyapunov +10.0 +Constraint +7.5 +5.0 +2.5 +0.0 +0 +2000 +4000 +6000 +8000 +10000 +12000 +14000 +EpisodeAverage Score in each Episode +22.5 +20.0 +17.5 +Score +15.0 +12.5 +Unsafe DQN +10.0 +Safe DQN +BVF +7.5 +Safe SAC +Lyapunov +2000 +4000 +6000 +8000 +10000 +12000 +14000 +0 +EpisodeAverage Constraint in each Episode +10 +8 +Constraint +6 +4 +Unsafe DQN +Safe DQN +BVF +2 +Safe SAC +Lyapunov +2000 +4000 +6000 +8000 +10000 +12000 +14000 +0 +EpisodeSolving Constrained RL through Augmented State and Reward Penalties +Figure 3. Safety Gym Environment and reward, cost comparison of different approaches +acting randomly. The goal for the agent is to maximize its +reward by travelling on the right lane at the highest speed, +vmax. However, to ensure safety, we set the constraint on +the time the agent drives faster than a given speed in the +rightmost lane. +Figure 2 shows the expected reward and expected cost per- +formance of our safe methods compared to that of the bench- +marks. Safe SAC and Safe DQN were able to get high ex- +pected rewards while satisfying the expected cost constraint. +We also provide results on a highway merge environment in +the appendix. +5.3. Safety Gym Environment: CVaR-CMDP +In this environment, we intend to compare the performance +of our safe methods with a CVaR optimizing CMDP method, +i.e., WCSAC (Yang et al., 2021). We test all the methods on +the same environment from (Yang et al., 2021) - StaticEnv +in Safety Gym (Ray et al., 2019). The environment is shown +in Figure 3. The point agent has two types of actions: one +is for turning and another is for moving forward/backward. +The objective is to reach the goal position while trying to +avoid hazardous areas. The agent gets a reward of r − 0.2 +in each time step, where r is an original reward signal of +Safety Gym (distance towards goal plus a constant for being +within range of goal) while -0.2 functions as a time penalty. +In each step, if the agent is located in the hazardous area, it +gets a cost of 1. We set cmax = 8, meaning agent could stay +in hazardous area for at most 8 time steps. For risk level α +in WCSAC, we set α = 0.9 and use the almost risk-neutral +WCSAC, which is proven to reach the best performance in +both reward and cost in experiment. +We show the results in Figure 3. As can be seen from the +figure, Safe SAC is able to achieve similar performance +to that of WCSAC. Safe DQN was unable to handle this +environment due to large size of state. For BVF, although +it reaches a good performance in reward, it violates the +constraint for many episodes before converging. +6. Conclusion +In this paper, we have provided a very generic and scalable +mechanism for handling a wide variety of policy based cost +constraints (expected cost, worst-case cost, VaR, CVaR) in +Constrained MDPs. Lagrangian based approaches, which +penalize with respect to expected cost are unable to as- +sign credit appropriately for a cost constraint violation, as +expected cost averages over all trajectories. Instead, we +propose to penalize with respect to individual reward while +maintaining a cost augmented state, thereby providing pre- +cise credit assignment with regards to cost constraint vio- +lations. We theoretically demonstrate that this simple cost +augmented state and reward penalized MDP (referred to +as EMDP) can represent all the aforementioned cost con- +straints. We then provide safety aware RL approaches, Safe +DQN and Safe SAC, which are able to outperform leading +expected cost constrained RL approaches (Lyapunov and +BVF) while at the same time providing similar performance +to leading approach for CVaR constrained RL (WCSAC). +References +Abe, N., Melville, P., Pendus, C., Reddy, C. K., Jensen, +D. L., Thomas, V. P., Bennett, J. J., Anderson, G. F., +Cooley, B. R., Kowalczyk, M., Domick, M., and Gar- +dinier, T. Optimizing debt collections using constrained +reinforcement learning. +In Proceedings of the 16th +ACM SIGKDD International Conference on Knowledge +Discovery and Data Mining, KDD ’10, pp. 75–84, +New York, NY, USA, 2010. Association for Comput- +ing Machinery. +ISBN 9781450300551. +doi: +10. +1145/1835804.1835817. URL https://doi.org/ +10.1145/1835804.1835817. +Achiam, J., Held, D., Tamar, A., and Abbeel, P. Constrained + +StaticEnv +Goal +Hazard +AgentAverage Score in each Episode +0.6 +0.4 +0.2 +Unsafe DQN +Safe DON +Score +BVF +0.0 +Safe SAC +Lyapunov +-0.2 +N +WCSAC +-0.4 +M +-0.6 +20 +40 +60 +80 +0 +100 +EpisodeAverage Constraint in each Episode +40 +Unsafe DQN +Safe DQN +35 +BVF +Safe SAC +30 +Lyapunov +WCSAC +25 +Constraint +20 +15 +10 +5 +0 +20 +40 +60 +80 +100 +0 +EpisodeSolving Constrained RL through Augmented State and Reward Penalties +policy optimization. CoRR, abs/1705.10528, 2017. URL +http://arxiv.org/abs/1705.10528. +Altman, E. +Constrained Markov decision processes: +stochastic modeling. Routledge, 1999. +Chow, Y., Pavone, M., Sadler, B. M., and Carpin, S. Trading +safety versus performance: Rapid deployment of robotic +swarms with robust performance constraints. +CoRR, +abs/1511.06982, 2015a. URL http://arxiv.org/ +abs/1511.06982. +Chow, Y., Tamar, A., Mannor, S., and Pavone, M. Risk- +sensitive and robust decision-making: a cvar optimization +approach. CoRR, abs/1506.02188, 2015b. URL http: +//arxiv.org/abs/1506.02188. +Chow, +Y., +Nachum, +O., +Duenez-Guzman, +E., +and +Ghavamzadeh, M. A lyapunov-based approach to safe +reinforcement learning. Advances in neural information +processing systems, 31, 2018. +Chow, Y., Nachum, O., Faust, A., Duenez-Guzman, E., +and Ghavamzadeh, M. +Lyapunov-based safe policy +optimization for continuous control. +arXiv preprint +arXiv:1901.10031, 2019a. +Chow, Y., Nachum, O., Faust, A., Ghavamzadeh, M., +and Du´e˜nez-Guzm´an, E. A. +Lyapunov-based safe +policy optimization for continuous control. +CoRR, +abs/1901.10031, 2019b. URL http://arxiv.org/ +abs/1901.10031. +Christodoulou, P. Soft actor-critic for discrete action settings. +arXiv preprint arXiv:1910.07207, 2019. +Di Castro, D., Tamar, A., and Mannor, S. Policy gradients +with variance related risk criteria, 2012. URL https: +//arxiv.org/abs/1206.6404. +El Chamie, M., Yu, Y., and Ac¸ıkmes¸e, B. Convex synthesis +of randomized policies for controlled markov chains with +density safety upper bound constraints. In 2016 American +Control Conference (ACC), pp. 6290–6295, 2016. doi: +10.1109/ACC.2016.7526658. +G´abor, Z., Kalm´ar, Z., and Szepesv´ari, C. Multi-criteria +reinforcement learning. In ICML, volume 98, pp. 197– +205, 1998. +Haarnoja, T., Zhou, A., Hartikainen, K., Tucker, G., Ha, +S., Tan, J., Kumar, V., Zhu, H., Gupta, A., Abbeel, P., +et al. Soft actor-critic algorithms and applications. arXiv +preprint arXiv:1812.05905, 2018. +Hou, P., Yeoh, W., and Varakantham, P. Revisiting risk- +sensitive mdps: New algorithms and results. In Pro- +ceedings of the International Conference on Automated +Planning and Scheduling, volume 24, pp. 136–144, 2014. +Jain, A., Khetarpal, K., and Precup, D. Safe option-critic: +learning safety in the option-critic architecture. +The +Knowledge Engineering Review, 36, 2021. +Junges, S., Jansen, N., Dehnert, C., Topcu, U., and Katoen, +J. Safety-constrained reinforcement learning for mdps. +CoRR, abs/1510.05880, 2015. URL http://arxiv. +org/abs/1510.05880. +Leike, J., Martic, M., Krakovna, V., Ortega, P. A., Everitt, +T., Lefrancq, A., Orseau, L., and Legg, S. Ai safety +gridworlds. arXiv preprint arXiv:1711.09883, 2017. +Leurent, E. +An environment for autonomous driv- +ing decision-making. +https://github.com/ +eleurent/highway-env, 2018. +Levine, S., Popovic, Z., and Koltun, V. Nonlinear inverse re- +inforcement learning with gaussian processes. Advances +in neural information processing systems, 24, 2011. +Lucas, A. and Klaassen, P. +Extreme returns, downside +risk, and optimal asset allocation. Journal of Portfolio +Management, 25(1):71, 1998. +Mastronarde, N. and van der Schaar, M. Fast reinforcement +learning for energy-efficient wireless communications. +CoRR, abs/1009.5773, 2010. URL http://arxiv. +org/abs/1009.5773. +Mnih, V., Kavukcuoglu, K., Silver, D., Rusu, A. A., Veness, +J., Bellemare, M. G., Graves, A., Riedmiller, M., Fidje- +land, A. K., Ostrovski, G., et al. Human-level control +through deep reinforcement learning. nature, 518(7540): +529–533, 2015. +Moldovan, T. M. and Abbeel, P. Safe exploration in markov +decision processes. CoRR, abs/1205.4810, 2012. URL +http://arxiv.org/abs/1205.4810. +Ono, M., Pavone, M., Kuwata, Y., and Balaram, J. Chance- +constrained dynamic programming with application to +risk-aware robotic space exploration. Auton. Robots, 39 +(4):555–571, dec 2015. ISSN 0929-5593. doi: 10.1007/ +s10514-015-9467-7. URL https://doi.org/10. +1007/s10514-015-9467-7. +Ray, A., Achiam, J., and Amodei, D. Benchmarking safe ex- +ploration in deep reinforcement learning. arXiv preprint +arXiv:1910.01708, 7:1, 2019. +Rockafellar, R. T., Uryasev, S., et al. Optimization of condi- +tional value-at-risk. Journal of risk, 2:21–42, 2000. +Satija, H., Amortila, P., and Pineau, J. Constrained markov +decision processes via backward value functions. +In +ICML, 2020a. + +Solving Constrained RL through Augmented State and Reward Penalties +Satija, H., Amortila, P., and Pineau, J. Constrained markov +decision processes via backward value functions. In In- +ternational Conference on Machine Learning, pp. 8502– +8511. PMLR, 2020b. +Yang, Q., Sim˜ao, T. D., Tindemans, S. H., and Spaan, M. T. +Wcsac: Worst-case soft actor critic for safety-constrained +reinforcement learning. In AAAI, pp. 10639–10646, 2021. + +Solving Constrained RL through Augmented State and Reward Penalties +A. SAC Pseudocode +Algorithm 2 provides the pseudocode for the Safe SAC algorithm. +Algorithm 2 SAC with Extended State Space +1: Initialize: policy network π with weight θ. +2: Value Function: Q1, Q2 with weights φ1, φ2, target Q value functions Qtarg,1, Qtarg,2 with weights φtarg +1 += +φ1, φtarg +2 += φ2. +3: Cost Function: Q1 +D, Q2 +D with weights θ1,D, θ2,D, target cost functions Qtarg,1 +D +, Qtarg,2 +D +with weights θtarg +1,D += +θ1,D, θtarg +2,D = φ2,D. +4: for episode=1,2,...,N do +5: +Get initial state-cost pair (s0, c0 = 0); t ← 1 +6: +while t ≤ T do +7: +tstart ← t +8: +while t ≤ tstart + n or t == T do +9: +Select action at using Equation 6. +10: +Execute at, observe (st+1, ct+1) and rt. +11: +t ← t + 1 +12: +end while +13: +{Calculate targets for each network:} +14: +˜rt ← if ct > cmax then rt − ∆dt/γt elif ct+1 > cmax then rt − ∆(ct + dt)/γt else rt +15: +R ← if t == T then 0 else ˜rt + γ mini=1,2 Qtarg,i((st+1, ct+1), ˜a′) − α log πθ(˜a′), ˜a′ ∼ πθ((st+1, ct+1)) +16: +RD ← if t == T then 0 else maxi=1,2 Qtarg,i +D +((st+1, ct+1), at+1; θD) +17: +{Update networks} +18: +for i ∈ {t − 1, ..., tstart} do +19: +R ← ri + αR, RD ← di + αRD +20: +for j = 1, 2 do +21: +dφj ← dφj + ∂(R − Qj)2/∂φj +22: +dθj,D ← dθj,D + ∂(RD − Qj +D)2/∂θj,D +23: +end for +24: +if the policy is safe then +25: +dθ ← dθ + ∇θ log π(ai)(minj=1,2 Qtarg,j − α log π(ai)) +26: +else +27: +dθ ← dθ − ∇θ log π(ai)RD +28: +end if +29: +end for +30: +{Update target networks} +31: +end while +32: end for +B. Proofs +B.1. Proof of Theorem 3.2 +Theorem 3.2. +If we set ∆ = ∞, then if π∗ solves (EMDP), it also solves the following worst-case constrained MDP +problem +max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +s.t. +� +st∈τ +d(st) ≤ cmax, ∀τ ∼ π. +As a result, π∗ is feasible to the risk-neutral CMDP (RN-CMDP). +Proof. We first see that there is a unique mapping between a trajectory τ = {s0, . . . , sT } from the original MDP to a + +Solving Constrained RL through Augmented State and Reward Penalties +trajectory of the extended MDP τ ′ = {(s0, c0), (s1, c1) . . . , (sT , cT )} with c0 = 0 and ct = �t−1 +i=0 d(st). Under the reward +penalties, we can write the objective of the extended MDP as +E +� T +� +t=0 +γtr(at|st, ct)|s0, π +� += +� +τ ′={(st,ct)}∼π +Pπ(τ ′) +�� +t +γt�r(at|st, ct) +� += +� +τ={s0,s1,...}∼π +D(τ)≤cmax +Pπ(τ) +�� +t +γtr(st, at) +� ++ +� +τ={s0,s1,...}∼π +D(τ)>cmax +Pπ(τ) +�� +t +γtr(st, at) − ∆ +� +t +d(st) +� += Eπ +�� +t +γtr(st, at) +� +− ∆ +� +τ∼π +D(τ)>cmax +Pπ(τ)D(τ) +(7) +As a result, we can rewrite the MDP problem (EMDP) as +max +π +� +� +� +� +� +Eπ +�� +t +γtr(st, at) +� +− ∆ +� +τ∼π +D(τ)>cmax +Pπ(τ)D(τ) +� +� +� +� +� +(8) +So, if we set ∆ = ∞, to maximize the expected reward, we need to seek a policy that assigns zero probabilities for all +the trajectories τ such that D(τ) > cmax. Let Π be the set of policies satisfying that condition (and assume that Π is not +empty), i.e., for any policy π ∈ Π and any trajectory τ such that D(τ) > cmax, Pπ(τ) = 0. This implies that when ∆ = ∞, +(8) is equivalent to +Eπ∈Π +�� +t +γtr(st, at) +� +which is also the worst-case CMDP problem. +B.2. Proof of Lemma 3.3 +Lemma 3.3. Let φ∗ = cmax − maxπ +� +�Eπ[D(τ)| D(τ) ≤ cmax] +� +. Given any policy π, if �Eπ[D(τ)| D(τ) > cmax] ≤ φ∗, +then Eπ[D(τ)] ≤ cmax. +Proof. For a policy π satisfying �Eπ[D(τ)| D(τ) > cmax] ≤ φ∗, we have +� +τ| D(τ)>cmax +Pπ(τ)D(τ) ≤ cmax − max +π +� +�Eπ[D(τ)| D(τ) ≤ cmax] +� +, +which is equivalent to +� +τ| D(τ)>cmax +Pπ(τ)D(τ) + max +π +� +�Eπ[D(τ)| D(τ) ≤ cmax] +� +≤ cmax +implying +cmax ≥ +� +τ| D(τ)>cmax +Pπ(τ)D(τ) + +� +τ| D(τ)≤cmax +Pπ(τ)D(τ) = Eπ[D(τ)] +which is the desired inequality. +B.3. Proof of Lemma 3.4 +Lemma 3.4. Given ∆ > 0, let π∗ be an optimal solution to (EMDP). We have +�Eπ∗ [D(τ)| D(τ) > cmax] ≤ Ψ∗ − Ψ +∆ +. + +Solving Constrained RL through Augmented State and Reward Penalties +Proof. We first note that, from (8), we can write +π∗ = argmaxπ +� +� +� +� +� +Eπ +�� +t +γtr(st, at) +� +− ∆ +� +τ +D(τ)≥cmax +Pπ(τ)D(τ) +� +� +� +� +� +Let π be an optimal policy to the worst-case CMDP (WC-CMDP). Since π is also feasible to the extended MDP (EMDP), +we have +Eπ∗ +�� +t +γtr(st, at) +� +− ∆ +� +τ +D(τ)≥cmax +Pπ∗(τ)D(τ) ≥ Eπ +�� +t +γtr(st, at) +� += Ψ +(9) +Moreover, since Ψ∗ is the optimal value of the original unconstrained problem Ψ∗ = maxπ E +��T +t=0 γtr(st, at)|s0, π +� +, we +should have +Ψ∗ ≥ Eπ∗ +�� +t +γtr(st, at) +� +(10) +Combining (24) and (10) gives +Ψ∗ − ∆ +� +τ +D(τ)≥cmax +Pπ∗(τ)D(τ) ≥ Ψ, +implying +� +τ| D(τ)≥cmax +Pπ∗(τ)D(τ) ≤ Ψ∗ − Ψ +∆ +, +which is the desired inequality. +B.4. Proof of Theorem 3.5 +Theorem 3.5. For any ∆ ≥ Ψ∗−Ψ +φ∗ +a solution to (EMDP) is always feasible to the risk-neutral CMDP (RN-CMDP). +Proof. The theorem is a direct result from Lemmas 3.3 and 3.4. That is, by selecting ∆ ≥ Ψ∗−Ψ +φ∗ +, from Lemm 3.4 we can +guarantee that +� +τ| D(τ)≥cmax +Pπ∗(τ)D(τ) ≤ Ψ∗ − Ψ +∆ +≤ φ∗, +(11) +where π∗ is an optimal policy to (EMDP). From Lemma 3.4, (11) also implies that π∗ is also feasible to the risk-neutral +CMDP (RN-CMDP), as desired. +B.5. Proof of Theorem 3.6 +Theorem 3.6. For any ∆ ≥ (Ψ∗ − Ψ)/(αcmax), a solution to (EMDP) is always feasible to (VaR-CMDP). +Proof. We use Lemma 3.4 to see that if π∗ is a solution to (EMDP), then it satisfies +�Eτ∼π∗ +� +D(τ)| D(τ) > cmax +� +≤ Ψ∗ − Ψ +∆ +. +(12) +On the other hand, we have +�Eτ∼π∗ +� +D(τ)| D(τ) > cmax +� += +� +τ|D(τ)>cmax +Pπ∗(τ)D(τ) +> cmax +� +τ|D(τ)>cmax +Pπ∗(τ) += cmaxPπ∗(D(τ) > cmax)) +(13) + +Solving Constrained RL through Augmented State and Reward Penalties +Thus, if we select ∆ ≥ (Ψ∗ − Ψ)/(αcmax), we will have the following chain of inequalities. +α ≥ Ψ∗ − Ψ +∆cmax +(a) +≥ +1 +cmax +�Eτ∼π∗ +� +D(τ)| D(τ) > cmax +� +(b) +≥ Pπ∗(D(τ) > cmax). +where (a) is due to (12) and (b) is due to (13). This implies that π∗ is feasible to the chance-constrained MDP (VaR-CMDP). +We complete the proof. +B.6. Proof of Theorem 3.7 +Theorem 3.7. If we define the reward penalties as +� +� +� +� +� +� +� +� +� +�r(at|(st, ct)) = r(at|st) if ct + d(st) ≤ cmax +�r(at|(st, ct)) = r(at|st) − ∆(t + 1)/γt +if ct ≤ cmax and ct + d(st) > cmax +�r(at|(st, ct)) = r(at|st) − ∆/γt if ct > cmax +then if π∗ is an optimal solution to (EMDP), then there is α∆ ∈ [0; Ψ∗−Ψ +∆T ] (α is dependent of ∆) such that π∗ is also optimal +to (VaR-CMDP). Moreover lim∆→∞ α∆ = 0. +Proof. Under the reward setting, we can write the objective of (EMDP) as +Eπ +� T +� +t=0 +γtr(at|st, ct)|s0 +� += +� +τ ′={(st,ct)}∼π +Pπ(τ ′) +�� +t +γt�r(at|st, ct) +� += +� +τ={s0,s1,...}∼π +D(τ)≤cmax +Pπ(τ) +�� +t +γtr(st, at) +� ++ +� +τ={s0,s1,...}∼π +D(τ)>cmax +Pπ(τ) +�� +t +γtr(st, at) − ∆T +� += Eπ +�� +t +γtr(st, at) +� +− ∆TPπ(D(τ) > cmax). +(14) +We now show that if π∗ is an optimal policy to (EMDP), then it is also optimal for (VaR-CMDP) with where α∆ = +Pπ∗(D(τ) > cmax). By contradiction, let us assume that it is not the case. Let π be optimal for (VaR-CMDP). We first see +that π∗ is feasible to (VaR-CMDP), thus +Eπ∗ +� T +� +t=0 +γtr(st, at) +� +< Eπ +� T +� +t=0 +γtr(st, at) +� +. +(15) +Moreover, since π is feasible to (VaR-CMDP), we have: +Pπ(D(τ) > cmax) ≤ Pπ∗(D(τ) > cmax). +(16) +Combine (15) and (16) and (14), it can be seen that π∗ is not an optimal policy to (EMDP), which is contrary to our initial +assumption. So, π∗ is an optimal policy for the (VaR-CMDP). We now prove that lim∆→∞ α∆ = 0. To this end, we first +see that if �π is an optimal solution to the worst-case CMDP (WC-CMDP), then P�π(D(τ) > cmax) = 0. Thus, we have the + +Solving Constrained RL through Augmented State and Reward Penalties +following chain of inequalities +Ψ∗ − ∆Tα∆ ≥ Eπ∗ +�� +t +γtr(st, at) +� +− ∆TPπ∗(D(τ) > cmax) +≥ E�π +�� +t +γtr(st, at) +� +− ∆TP�π(D(τ) > cmax) += E�π +�� +t +γtr(st, at) +� += Ψ +Thus +α∆ ≤ Ψ∗ − Ψ +∆T +. +implying lim∆→∞ α∆ = 0. +B.7. Proof of Theorem 3.8 +Theorem 3.8. If we define the reward penalties as +� +� +� +� +� +� +� +� +� +�r(at|(st, ct)) = r(at|st) if ct + d(st) ≤ cmax +�r(at|(st, ct)) = r(at|st) − ∆(ct + d(st) − cmax)/γt +if ct ≤ cmax and ct + d(st) > cmax +�r(at|(st, ct)) = r(at|st) − ∆d(st)/γt if ct > cmax +then for any ∆ > 0, there is β∆ ∈ [0; Ψ∗−Ψ +∆ +] (β∆ is dependent of ∆) such that any optimal solution to the extended CMDP +(EMDP) is also optimal to the following risk-averse CMDP +max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +s.t. +Eτ∼π +� +(D(τ) − cmax)+� +≤ β∆. +(CVaR-CMDP) +Moreover, lim∆→∞ β∆ = 0. +Proof. We first see that, under the reward penalties defined above, the objective of (EMDP) becomes +Eπ +� T +� +t=0 +γtr(at|st, ct)|s0 +� += +� +τ ′={(st,ct)}∼π +Pπ(τ ′) +�� +t +γt�r(at|st, ct) +� += +� +τ={s0,s1,...}∼π +D(τ)≤cmax +Pπ(τ) +�� +t +γtr(st, at) +� ++ +� +τ={s0,s1,...}∼π +D(τ)>cmax +Pπ(τ) +�� +t +γtr(st, at) − ∆ +�� +t +d(st) − cmax +�� += Eπ +�� +t +γtr(st, at) +� +− ∆ +� +τ∼π +D(τ)>cmax +Pπ(τ)(D(τ) − cmax) += Eπ +�� +t +γtr(st, at) +� +− ∆Eτ∼π +� +(D(τ) − cmax)+� +(17) +We now show that if π∗ is an optimal policy to (EMDP), then it is also optimal for (CVaR-CMDP) with where β∆ = +Eτ∼π∗ +� +(D(τ) − cmax)+� +. By contradiction, let us assume that π∗ is not optimal for (CVaR-CMDP). We then let π be +optimal for (CVaR-CMDP). We first see that π∗ is feasible to (CVaR-CMDP), thus +Eπ∗ +� T +� +t=0 +γtr(st, at) +� +< Eπ +� T +� +t=0 +γtr(st, at) +� +(18) + +Solving Constrained RL through Augmented State and Reward Penalties +Moreover, since π is feasible to (CVaR-CMDP), we have: +Eτ∼π +� +(D(τ) − cmax)+� +≤ β∆ = Eτ∼π∗ +� +(D(τ) − cmax)+� +(19) +Combine (18) and (19) we get +Eπ∗ +� T +� +t=0 +γtr(st, at) +� +− ∆Eτ∼π∗ +� +(D(τ) − cmax)+� +< Eπ +� T +� +t=0 +γtr(st, at) +� +− ∆�Eτ∼π +� +(D(τ) − cmax)+� +(20) +Using (17), (20) implies that �π yields a strictly better objective value to the extended MDP, as compared to π∗, which +is contrary to the assumption that π∗ is optimal for (EMDP). So, π∗ should be an optimal policy for the (CVaR-CMDP). +We now prove that lim∆→∞ β∆ = 0. To this end, we first see that if �π is an optimal solution to the worst-case CMDP +(WC-CMDP), then �Eτ∼�π +� +(D(τ) − cmax)+� += 0. Thus, we have the following chain of inequalities: +Ψ∗ − ∆β∆ ≥ Eπ∗ +�� +t +γtr(st, at) +� +− ∆Eτ∼π∗ � +(D(τ) − cmax)+� +≥ E�π +�� +t +γtr(st, at) +� +− ∆Eτ∼�π +� +(D(τ) − cmax)+� += E�π +�� +t +γtr(st, at) +� +(21) +We recall that E�π [� +t γtr(st, at)] = Ψ (i.e., objective value of the worst-case CMDP), thus, +β∆ ≤ Ψ∗ − Ψ +∆ +, +implying lim∆→∞ β∆ = 0 as desired. +C. Multi-constrained MDP +We present, in the following, a series of theoretical results for the multi-constrained MDP discussed in the main body of the +paper. Similar to the single-constrained case, we will show that +• If ∆k = ∞ for all k ∈ [K], then (2) is equivalent to a worst-case CMDP. +• There is a lower bound for each ∆k such that any optimal policy to (2) will always be feasible to a given risk-neutral or +chance-constrained MDP. +• By employing different reward penalty settings, (2) is equivalent to a VaR or CVaR CMDP. +Since all the proofs are similar to those in the single-constrained case, we keep them brief. +Proposition C.1. If we set ∆k = ∞ for all k ∈ [K], then the extended MDP is equivalent to the following worst-case +CMDP +max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +s.t. +� +st∈τ +dk(st) ≤ ck +max, ∀τ ∼ π, ∀k ∈ [K] +(22) + +Solving Constrained RL through Augmented State and Reward Penalties +Proof. Similar to the proof of Theorem 3.2, we write the objective of the extended MDP as +E +� T +� +t=0 +γtr(at|st, cK +t )|s0, π +� += +� +τ ′={(st,cK +t )}∼π +Pπ(τ ′) +�� +t +γt�r(at|st, cK +t ) +� += +� +τ={s0,s1,...}∼π +D(τ)≤cmax +Pπ(τ) +�� +t +γtr(st, at) +� ++ +� +τ={s0,s1,...}∼π +D(τ)>cmax +Pπ(τ) +� +�� +t +γtr(st, at) − +� +k∈[K] +∆k +� +t +dk(st) +� +� += Eπ +�� +t +γtr(st, at) +� +− +� +k∈[K] +∆k +� +τ∼π +Dk(τ)≥ck +max +Pπ(τ)Dk(τ). +(23) +So, if ∆k = ∞, then one needs to seek a policy that assigns zero probabilities to all the trajectories that violate the +constraints, implying that the extended MDP would yield the same optimal policies as the worst-case CMDP (22). +Proposition C.2. Let π∗ and π be optimal policies to the extended MDP (2) and the worst-case MDP, and φk = ck +max − +maxπ �Eπ[Dk(τ)|Dk(τ) ≤ ck +max], ∀k ∈ [K]. If we choose ∆k such that ∆k > (Ψ∗ − Ψ)/φk, then any optimal policy of +(2) is feasible to the risk-neutral CMDP with multiple constraints. +Proof. Since π is also feasible to (2), we have: +Eπ∗ +�� +t +γtr(st, at) +� +− +� +k∈[K] +∆k +� +τ +Dk(τ)>ck +max +Pπ∗(τ)Dk(τ) ≥ Eπ +�� +t +γtr(st, at) +� += Ψ. +(24) +Moreover, since Ψ∗ is an optimal value of the original unconstrained MDP, we have Ψ∗ ≥ Eπ∗ [� +t γtr(st, at)], leading to +� +k∈[K] +∆k +� +τ +Dk(τ)≥ck +max +Pπ∗(τ)Dk(τ) ≤ Ψ∗ − Ψ. +(25) +Moreover, from Lemma 3.3, we know that if �Eπ[Dk(τ)| Dk(τ) > ck +max] ≤ φk, then Eπ[Dk(τ)] < ck +max, where +φk = ck +max − maxπ �Eπ[Dk(τ)|Dk(τ) ≤ cmax]. Therefore, if we select ∆k ≥ (Ψ∗ − Ψ)/φk, then from (25) we see that +�Eπ∗[Dk(τ)| Dk(τ) > ck +max] ≤ φk for all k ∈ [K], implying that π∗ satisfies all the constraints, as desired. +Proposition C.3. Given any αk ∈ (0, 1), k ∈ [K], if we choose ∆k ≥ (Ψ∗ − Ψ)/(αkck +max), ∀k ∈ [K], then a solution π∗ +to (2) is always feasible to the following VaR (or chance-constrained) MDP. +max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +s.t. +Pπ +� +(Dk(τ) > ck +max +� +≤ αk, ∀k ∈ [K] +(26) +Proof. From the proof of Proposition C.2 above, we have the following inequalities +Ψ∗ − Ψ ≥ +� +k∈[K] +∆k +� +τ +Dk(τ)>ck +max +Pπ∗(τ)Dk(τ) +≥ +� +k∈[K] +∆kck +maxPπ∗(Dk(τ) > ck +max) +So if we choose ∆k ≥ (Ψ∗ − Ψ)/(αkck +max), ∀k ∈ [K], we will have +Ψ∗ − Ψ ≥ (Ψ∗ − Ψ) +(αkckmax)ck +maxPπ∗(Dk(τ) > ck +max), ∀k ∈ [K], +implying Pπ∗(Dk(τ) > ck +max) ≤ αk, as desired. + +Solving Constrained RL through Augmented State and Reward Penalties +Proposition C.4. If we define the reward penalties as +�r(at|(st, cK +t )) = r(at|st) − +� +k∈[K] +∆kδk(ct), ∀st, at, cK +t , +where δk(ct), ∀k ∈ [K], are defined as follows: +δk(ct) = +� +� +� +� +� +0 if ck +t + dk(st) ≤ ck +max +(T + 1)/γt if ck +t ≤ ck +max, ck +t + dk(st) > ck +max +1/γt if ck +t > ck +max, +then if π∗ is an optimal solution to (EMDP), there is α∆ +∆ +∆ +k ∈ [0; Ψ∗−Ψ +T ∆k ] (αk is dependent of ∆ +∆ +∆)1 such that π∗ is also optimal +to the following VaR CMDP +max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +s.t. +Pπ +� +(D(τ) > ck +max +� +≤ α∆ +∆ +∆ +k , ∀k ∈ [K]. +(27) +Moreover lim∆k→∞ α∆ +∆ +∆ +k = 0, ∀k ∈ [K]. +Proof. Similar to the proof of Theorem 3.6, we can write the objective of the extended MDP as +Eπ +�� +t +γtr(at|st, cK +t ) +� += Eπ +�� +t +γtr(st, at) +� +− +� +k∈[K] +∆kTPπ(Dk(τ) > ck +max) +Then, in a similar way, if we let α∆ +∆ +∆ +k = TPπ∗(Dk(τ) > ck +max), then π∗ should be an optimal policy to (27). In addition, we +can bound αk by deriving the following inequalities. +Ψ∗ − +� +k∈[K] +∆kTα∆ +∆ +∆ +k ≥ Eπ∗ +�� +t +γtr(st, at) +� +− +� +k∈[K] +∆kTPπ∗(Dk(τ) > ck +max) +≥ E�π +�� +t +γtr(st, at) +� +− +� +k∈[K] +∆kTP�π(Dk(τ) > ck +max) += E�π +�� +t +γtr(st, at) +� += Ψ, +(28) +where �π and Ψ are optimal policy and optimal value of the worst-case CMDP (22). This implies +� +k∈[K] +∆kTα∆ +∆ +∆ +k ≤ Ψ∗ − Ψ, +which tells us that α∆ +∆ +∆ +k ≤ Ψ∗−Ψ +T ∆k , implying that lim∆k→∞ α∆ +∆ +∆ +k = 0. +Proposition C.5. For any ∆k > 0, k ∈ [K], if we define the reward penalties as +�r(at|(st, cK +t )) = r(at|st) − +� +k∈[K] +∆kδk(ct), ∀st, at, cK +t , +where δk(ct), ∀k ∈ [K], are defined as follows: +δk(ct) = +� +� +� +� +� +0 if ck +t + dk(st) ≤ ck +max +(ck +t + dk +t − ck +max)/γt if ck +t ≤ ck +max, ck +t + dk(st) > ck +max +dk +t /γt if ck +t > ck +max, +1∆ +∆ +∆ denotes the vector (∆1, . . . , ∆K) + +Solving Constrained RL through Augmented State and Reward Penalties +then there are β∆ +∆ +∆ +k ∈ [0; Ψ∗−Ψ +∆k ] (β∆ +∆ +∆ +k is dependent of ∆ +∆ +∆) such that any optimal solution π∗ to the extended CMDP (2) is also +optimal to the following multi-constrained CVaR CMDP +max +π +E +� T +� +t=0 +γtr(st, at)|s0, π +� +s.t. +Eτ∼π +� +(D(τ) − cmax)+� +≤ β∆ +∆ +∆ +k , ∀k ∈ [K] +(29) +Moreover, lim∆k→∞ β∆ +∆ +∆ +k = 0. +Proof. Under the reward setting, we first write the objective function of the extended MDP as +Eπ +�� +t +γtr(at|st, cK +t ) +� += Eπ +�� +t +γtr(st, at) +� +− +� +k∈[K] +∆kEπ +� +(Dk(τ) − ck +max)+� +Following the same derivations as in the proof of Theorem 3.8, we can further show that, by contradiction, π∗ is also optimal +for the CVaR CMDP (29) with β∆ +∆ +∆ +k = Eπ∗ +� +(Dk(τ) − ck +max)+� +. To prove lim∆k→∞ β∆ +∆ +∆ +k = 0, we derive similar inequalities +as in the proof of Proposition C.4, as follows: +Ψ∗ − +� +k∈[K] +∆kβ∆ +∆ +∆ +k ≥ Eπ∗ +�� +t +γtr(st, at) +� +− +� +k∈[K] +∆kEπ∗ � +(Dk(τ) − ck +max)+� +≥ E�π +�� +t +γtr(st, at) +� +− +� +k∈[K] +∆kE�π +� +(Dk(τ) − ck +max)+� += E�π +�� +t +γtr(st, at) +� += Ψ, +implying that β∆ +∆ +∆ +k ≤ Ψ∗−Ψ +∆k , thus lim∆k→∞ β∆ +∆ +∆ +k = 0 as desired. +D. Experimental Results on Puddle Environment +D.1. Continuous Puddle Environment: RN-CMDP +Inspired by (Jain et al., 2021), we test all the methods on the continuous puddle environment. The environment is shown in +Figure 4. It is a continuous two-dimensional state-space environment in [0, 1]. The agent starts at the bottom left corner of +the map (0, 0) and the objective is to move to the goal at the upper right corner (1, 1). The agent can move in four directions +and occasionally agent will execute a random action with probability p = 0.05 instead of the one selected by the agent. +In each position transition, noise is drawn from the Uniform[−0.025, 0.025] distribution and added to both coordinates. +When the agent is within 0.1 L1 distance from the goal state, the agent can be seen as reaching the goal and receive a reward +of 100 while agent gets a time penalty as -0.1 at each time step. There is a square puddle region centering at (0.5, 0.5) with +0.4 height. In each time step, if agent is located in the puddle area, it gets a cost of 1. Due to the existence of noise, we +cannot set the threshold cmax too small as it would be hard for agent to reach the goal, so we set cmax = 8, meaning agent +could stay in puddle area for at most 8 time steps. +We show the results in Figure 4. As can be seen from the figure, safe SAC could outperform other methods in both reward +and cost. Although safe DQN can always satisfy the constraint, it always fail to reach the goal to get the maximum reward. +For BVF, when the backward value function succeeds to estimate the cost, the reward starts to decrease and worse than safe +SAC. +E. Experimental Results on Highway Merge Environment +We also evaluate our safe methods on another highway environment - merge. The environment is shown in Figure 5 +where agent needs to take actions to complete merging with other vehicles. The rewards are similar to those in highway + +Solving Constrained RL through Augmented State and Reward Penalties +Figure 4. Performance in Puddle Environment +Figure 5. Merge environment and reward, cost comparison of different approaches +environment. Figure 5 shows a comparison of our safe methods with other benchmarks. Although Safe DQN fails to +complete the task in merge environment, Safe SAC still outperforms BVF and unsafe DQN with better score and lower cost. +The reason that safe DQN fails is that the combinations of extended space is too large in merge environment for safe DQN +to figure it out. That is also why safe DQN converges quite slowly in highway environment. As safe DQN is unable to deal +with large size of state space, safe SAC outperforms safe DQN in continuous environments. +F. Hyperparameters +In case of discrete environment - GridWorld, the size of state space is 8 × 8 with 18 pits. In Highway environment (including +merge), related parameters and their values are listed below. There is an additional reward in merge environment named +mergingspeedreward with value of -0.5. It penalties the agent if it drives with speed less than 30 while merging. +• lanes count: Number of lanes, setting as 4 in both environments. +• vehicles count: Number of vehicles on lanes, setting as 50 in both environments. +• controlled vehicles: Number of agents, setting as 1 in both environments. +• duration: Duration of the game, setting as 40 in both environments. +• ego spacing: The space of vehicles, setting as 2 in both environments. +• vehicles density: The density of vehicles on lanes, setting as 1 in both environments. +• reward speed range: The range where agent can receive high speed reward, setting as [20, 30] in both environ- +ments. +• high speed reward: Reward received when driving with speed in reward speed range, setting as 0.4 in highway +while 0.2 in merge. + +Average Score in each Episode +75 +Unsafe DQN +Safe DQN +BVF +50 +Safe SAC +Lyapunov +25 +0 +Score +-25 +-50 +-75 +-100 +250 +500 +750 +1000 +1250 +1500 +1750 +2000 +0 +EpisodeAverage Constraint in each Episode +Unsafe DQN +350 +Safe DON +BVF +300 +Safe SAC +Lyapunov +250 +Constraint +200 +150 +100 +50 +0 +0 +250 +500 +750 +1000 +1250 +1500 +1750 +2000 +EpisodeAverage Score in each Episode +15 +14 +13 +12 +Unsafe DON +Safe DQN +Score +11 +BVF +Safe SAC +10 +Lyapunov +9 +8 +7 +0 +2000 +4000 +6000 +8000 +10000 +12000 +14000 +EpisodeAverage Constraint in each Episode +Unsafe DQN +16 +Safe DON +BVF +Safe SAC +14 +Lyapunov +Constraint +12 +10 +8 +6 +2000 +4000 +6000 +8000 +10000 +12000 +14000 +0 +EpisodeSolving Constrained RL through Augmented State and Reward Penalties +• collision reward: Reward received when colliding with a vehicle, setting as -1 in both environments. +• right lane reward: Reward received when driving on the right-most lane, setting as 0.1 in both environments. +• lane change reward: Reward received when taking lane change action, setting as 0 in highway while -0.05 in merge. +In all the methods, we use networks with a hidden layer size of 64,64,64 along with the ReLu activation and use Adam +optimizer to optimize the networks. We test our methods on GridWorld, Highway, Safety Gym, Puddle, Highway for 15000, +15000, 1000, 2000, 15000 episodes respectively and update the network every 4 steps. + diff --git a/8dFJT4oBgHgl3EQfnix6/content/tmp_files/load_file.txt b/8dFJT4oBgHgl3EQfnix6/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..c1f662cf6087cc3c3fcdcda15a6db5aec1047758 --- /dev/null +++ b/8dFJT4oBgHgl3EQfnix6/content/tmp_files/load_file.txt @@ -0,0 +1,1051 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf,len=1050 +page_content='Solving Constrained Reinforcement Learning through Augmented State and Reward Penalties Hao Jiang 1 Tien Mai 1 Pradeep Varakantham 1 Abstract Constrained Reinforcement Learning has been employed to enforce safety constraints on policy through the use of expected cost constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The key challenge is in handling expected cost accu- mulated using the policy and not just in a single step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Existing methods have developed innova- tive ways of converting this cost constraint over entire policy to constraints over local decisions (at each time step).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' While such approaches have provided good solutions with regards to objective, they can either be overly aggressive or conserva- tive with respect to costs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' This is owing to use of estimates for ”future” or ”backward” costs in local cost constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' To that end, we provide an equivalent uncon- strained formulation to constrained RL that has an augmented state space and reward penalties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' This intuitive formulation is general and has in- teresting theoretical properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' More importantly, this provides a new paradigm for solving con- strained RL problems effectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' As we show in our experimental results, we are able to outper- form leading approaches on multiple benchmark problems from literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Introduction There are multiple objectives of interest when handling safety depending on the type of domain: (a) ensuring safety constraint is never violated;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (b) ensuring safety constraint is not violated in expectation;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (c) ensuring the chance of safety constraint violation is small (Value at Risk, VaR) (Lucas & Klaassen, 1998);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (d) ensuring the expected cost of violation is bounded (Conditional Value at Risk, CVaR) (Rockafellar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2000;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2021);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' and others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' One of the main models in Reinforcement Learning to ensure safety is Constrained RL, which employs objective (b) above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Our focus in this paper is also on Constrained RL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 1School of Computing and Information Systems, Singapore Management University.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Preprint Constrained RL problems are of relevance in domains that can be represented using an underlying Constrained Markov Decision Problem (CMDP) (Altman, 1999).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The main chal- lenge in solving Constrained RL problems is the expected cost constraint, which requires averaging over multiple tra- jectories from the policy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Such problems have many appli- cations including but not limited to: (a) electric self driving cars reaching destination at the earliest while minimizing the risk of not getting stranded on the road with no charge;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (b) robots moving through unknown terrains to reach a des- tination, while having a threshold on the average risk of passing through unsafe areas (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', a ditch).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Broadly, they are also applicable to problems robot motion planning (Ono et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Moldovan & Abbeel, 2012;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Chow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2015a), resource allocation (Mastronarde & van der Schaar, 2010;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Junges et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2015), and financial engineering (Abe et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2010;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Di Castro et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2012).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Related Work: Many model free approaches have been pro- posed to solve Constrained RL problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' One of the initial approaches to be developed for addressing such constraints is the Lagrangian method (Chow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2015b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' However, such an approach does not provide either theoretical or em- pirical guarantees in ensuring the constraints are enforced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' To counter the issue of safety guarantees, next set of ap- proaches focused on transforming the cost constraint over trajectories into cost constraint over individual decisions in many different ways.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' One such approach imposed surrogate constraints (El Chamie et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' G´abor et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 1998) on individual state and action pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Since the surrogate con- straints are typically stricter than the original constraint on the entire trajectory, they were able to provide theoretical guarantees on safety.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' However, the issue with such type of approaches is their conservative nature, which can poten- tially hamper the expected reward objective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' More recent approaches such as CPO (Constrained Policy Optimiza- tion) (Achiam et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2017), Lyapunov (Chow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2019b), BVF (Satija et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2020a) have since provided more tighter local constraints (over individual decisions) and thereby have improved the state of art in guaranteeing safety while providing high quality solutions (with regards to expected reward).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In converting a trajectory based constraint to a local constraint, there is an estimation of cost involved for the rest of the trajectory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Due to such estimation, trans- arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='11592v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='LG] 27 Jan 2023 Solving Constrained RL through Augmented State and Reward Penalties formed cost constraints over individual decisions are error prone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In problems where the estimation is not close to the actual, results with such approaches with regards to cost constraint enforcement are poor (as we demonstrate in our experimental results).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Contributions: To that end, we focus on an approach that relies on exact accumulated costs (and not on estimated costs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In this paper, we make four key contributions: We provide a re-formulation of the constrained RL prob- lem through augmenting the state space with cost ac- cumulated so far and also considering reward penalties when cost constraint is violated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' This builds on the idea of augmented MDPs (Hou et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2014) employed to solve Risk Sensitive MDPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The key advantage of this reformulation is that by penalizing rewards (as opposed to the entire expected value that is done typically using Lagrangian methods), we get more fine grained control on how to handle the constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Also, we can utilize existing RL methods with minor modifications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We show theoretically that the reward penalties em- ployed in the new formulation are not adhoc and can equivalently represent different constraints mentioned in the first paragraph of introduction, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' risk-neural, chance constrained (or VAR) and CVaR constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We modify existing RL methods (DQN and SAC) to solve the re-formulated RL problem with augmented state space and reward penalties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' A key advantage for the new approaches is the knowledge of exact costs incurred so far (available within the state space) and this allows for assigning credit for cost constraint violations more precisely during learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Finally, we demonstrate the utility of our approach by comparing against leading approaches for constrained RL on multiple benchmark problems from literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We empirically demonstrate that our approaches are able to outperform leading Constrained RL approaches from the literature either with respect to expected value or in enforcing the cost constraint or both.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Constrained Markov Decision Process A Constrained Markov Decision Process (CMDP) (Altman, 1999) is defined using tuple ⟨S, A, r, p, d, s0, cmax⟩, where S is set of states with initial state as s0, A is set of actions, r : S × A → R is reward with respect to each state-action pair, p : S × A → P is transition probability of each state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' d : S → d(S) is the cost function and cmax is the maximum allowed cumulative cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Here, we assume that d(s) ≥ 0 for all s ∈ S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' This assumption is not restrictive as one can always add positive amounts to d(s) and cmax to meet the assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The objective in a risk-neural CMDP is to compute a policy, π : S × A → [0, 1], which maximizes reward over a finite horizon T while ensuring the cumulative cost does not exceed the maximum allowed cumulative cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' max π E � T � t=0 γtr(st, at)|s0, π � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' E � T � t=0 d(st)|s0, π � ≤ cmax.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (RN-CMDP) The literature has seen other types of constraints, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', chance constraints requiring that Pπ(D(τ) > cmax) ≤ α for a risk level α ∈ [0, 1], or CVaR ones of the form Eπ[(D(τ) − cmax)+] ≤ β.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Handling different types of constraints would require different techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In the next section, we present our approach based on augmented state and reward penalties that assembles all the aforementioned constraint types into one single framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Cost Augmented Formulation for Safe RL We first present our extended MDP reformulation and pro- vide several theoretical findings that connect our extended formula with different variants of CMDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We first focus on the case of single-constrained MDP and show how the results can be extended to the multi-constrained setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Extended MDP Reformulation We introduce our approach to track the accumulated cost at each time period, which allows us to determine states that potentially lead to high-cost trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' To this end, let us define a new MDP with an extended state space � �S, A, �r, �p, d, s0, cmax � where �S = {(s, c)| s ∈ S, c ∈ R+}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' That is, each state s′ of the extended MDP includes an original state from S and information about the accumu- lated cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We the define the transition probabilities between states in the extended space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' � � � � � �p(s′ t+1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' c′ t+1|(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' ct),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' at) = p(s′ t+1|st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' at) if c′ t+1 = ct + d(st) �p(s′ t+1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' c′ t+1|(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' ct),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' at) = 0 otherwise and new rewards with penalties � � � � � � � � � �r(at|(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' ct)) = r(at|st) if ct + d(st) ≤ cmax �r(at|(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' ct)) = r(at|st) − ∆(ct + d(st))/γt if ct ≤ cmax and ct + d(st) > cmax �r(at|(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' ct)) = r(at|st) − ∆d(st)/γt if ct > cmax where ∆ is a positive scalar and ∆d(st) and ∆(ct + d(st)) are penalties given to the agent if the accumu- lated cost exceeds the upper bound cmax.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Under these Solving Constrained RL through Augmented State and Reward Penalties reward penalties, the accumulated reward for each tra- jectory τ = {(s0, a0), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' , (sT , aT )} can be written as �R(τ) = � t γtr(at|st) if D(τ) ≤ cmax and �R(τ) = � t γtr(at|st) − ∆D(τ) if D(τ) > cmax, where D(τ) is the total cost of trajectory τ, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', D(τ) = � st∈τ d(st).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' So, in fact, we penalize every trajectory that violates the cost constraint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We now consider the following extended MDP, which han- dles the constraints in a relaxed manner through penalties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' max π E � T � t=0 γt�r(at|(st, ct)) ���(s0, c0), π � (EMDP) where c0 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' There are also other ways to penalize the rewards, allowing us to establish equivalences between the extended MDP to other risk-averse CMDP, which we will discuss later in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Theoretical Properties To demonstrate the generality and power in representation of the reward penalties along with state augmentation in the un- constrained MDP (EMDP), we provide theoretical properties that map reward penalties to different types of constraints (expected cost, VaR, CVaR, Worst-case cost): (i) Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1 states that if the penalty parameter ∆ = 0, then (EMDP) becomes the classical unconstrained MDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (ii) Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 shows that if ∆ = ∞, then (EMDP) is equivalent to a worst-case constrained MDP (iii) Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 establishes a lower bound on ∆ from which any solution to (EMDP) will satisfy the risk- neural constraint in (RN-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (iv) Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6 connects (EMDP) with chance- constrained MDP by providing a lower bound for ∆ from which any solution to (EMDP) will satisfy a VaR constraint P(� t d(st) ≤ cmax) ≤ α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (v) Theorems 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6 and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='8 further strengthen the above re- sults by showing that, under some different reward set- tings, (EMDP) is equivalent to a chance-constrained (or VaR) or equivalent to a CVaR CMDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We now describe our theoretical results in detail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' All the proofs can be found in the appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We first state, in Propo- sition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1, a quite obvious result saying that if we set the penalty parameter ∆ = 0, then the MDP with augmented state space becomes the original unconstrained MDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' If ∆ = 0, then (EMDP) is equivalent to the unconstrained MDP maxπ E ��T t=0 γtr(st, at)|s0, π � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' It can be seen that increasing ∆ will set more penalties to trajectories whose costs exceed the maximum cost allowed cmax, which also implies that (EMDP) would lower the prob- abilities of taking these trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' So, intuitively, if we raise ∆ to infinity, then (EMDP) will give policies that yield zero probabilities to violating trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We state this result in Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 (Connection to worst-case CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' If we set ∆ = ∞, then if π∗ solves (EMDP), it also solves the following worst-case constrained MDP problem max π E � T � t=0 γtr(st, at)|s0, π � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' � st∈τ d(st) ≤ cmax, ∀τ ∼ π.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (WC-CMDP) As a result, π∗ is feasible to the risk-neural CMDP (RN-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The above theorem implies that if we set the penalties to be very large (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', ∞), then all the trajectories generated by the optimal policy π∗ will satisfy the constraint, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', the accumulated cost will not exceed cmax.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Such a conservative policy would be useful in critical environments where the agent is strictly not allowed to go beyond the maximum allowed cost cmax.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' An example would be a routing problem for electrical cars where the remaining energy needs not become empty before reaching a charging station or the destination.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Note that the worst-case CMDP (WC-CMDP) would be non-stationary and history-dependent, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', there would be no stationary and history-independent policies being optimal for the worst-case CMDP (WC-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' This remark is obviously seen, as at a stage, one needs to consider the current accumulated cost to make feasible actions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Thus, a policy that ignores the historical states and actions would be not optimal (or even not feasible) for the worst-case MDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' As a result, this worst-case CMDP can not be presented by a standard-constrained MDP formulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 also tells us that one can get a feasible solution to the risk-neural CMDP (RN-CMDP) by just raising ∆ to infinity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In fact, ∆ does not need to be infinite to achieve feasibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Below we establish a lower bound for the penalty parameter ∆ such that a solution to (EMDP) is always feasi- ble to the risk-neural CMDP (RN-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Let us define Ψ∗ as the optimal value of the unconstrained MDP problem Ψ∗ = max π E � T � t=0 γtr(st, at)|s0, π � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' and Ψ be the optimal value of the worst-case CMDP (WC-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We define a conditional expectation �Eπ [D(τ)| D(τ) ≤ cmax] as the expected cost over trajecto- ries whose costs are less than cmax �Eπ [D(τ)| D(τ) ≤ cmax] = � τ| D(τ)≤cmax Pπ(τ)D(τ) Solving Constrained RL through Augmented State and Reward Penalties where Pπ(τ) is the probability of τ under policy π.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Before presenting the bound, we first need two lemmas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3 establishes a condition under which a policy π is feasible to the risk-neural CMDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Let φ∗ = cmax − maxπ � �Eπ[D(τ)| D(τ) ≤ cmax] � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Given any policy π, if �Eπ[D(τ)| D(τ) > cmax] ≤ φ∗, then Eπ[D(τ)] ≤ cmax.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4 below further provides an upper bound for the expected cost of violating trajectories under an optimal pol- icy given by the extended MDP reformulation (EMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Given ∆ > 0, let π∗ be an optimal solution to (EMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We have �Eπ∗ [D(τ)| D(τ) > cmax] ≤ Ψ∗ − Ψ ∆ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Using Lemmas 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3 and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4, we are ready to state the main result in Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 (Connection to the risk-neural CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For any ∆ ≥ Ψ∗−Ψ φ∗ , a solution to (EMDP) is always feasible to the risk-neural CMDP (RN-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' To prove Lemmas 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3, 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4, we leverage the fact that the objective of (EMDP) can be written equivalently as Eπ �� t γtr(st, at) � − ∆�Eπ [D(τ)| D(τ) > cmax] (1) which allows us to establish a relation between ∆ and �Eπ∗ [D(τ)| D(τ) > cmax], where π∗ is an optimal policy of (EMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The bounds then come from this relation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We refer the reader to the appendix for detailed proofs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' There is also a lower bound for ∆ from which any solution to (EMDP) always satisfies a chance constraint (or VaR).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' To state this result, let us first define the following VaR CMDP, for any risk level α ∈ [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' max π E � T � t=0 γtr(st, at)|s0, π � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Pπ � (D(τ) > cmax � ≤ α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (VaR-CMDP) We have the following theorem showing a connection be- tween (EMDP) and the VaR CMDP above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6 (Connection to VaR CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For any ∆ ≥ (Ψ∗ − Ψ)/(αcmax), a solution to (EMDP) is always feasi- ble to (VaR-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We also leverage Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 1 to prove the theorem by show- ing that when ∆ is sufficiently large, the conditional ex- pectation �Eπ∗ [D(τ)| D(τ) > cmax] can be bounded from above (π∗ is an optimal policy of (EMDP)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We then can link this to the chance constraint by noting that �Eπ∗ [D(τ)| D(τ) > cmax] ≥ cmaxP(D(τ) > cmax).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6 tells us that one can just raise ∆ to a suffi- ciently large value to meet a chance constraint of any risk level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Here, Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6 only guarantees feasibility to (VaR-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Interestingly, if we modify the reward penal- ties by making them independent of the costs d(s), than an equivalent mapping to (VaR-CMDP) can be obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Specifically, let us re-define the following reward for the extended MDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' That is, we replace the cost d(st) by a con- stant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='7 below shows that (EMDP) is actually equivalent to a chance-constrained CMDP under the new reward setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='7 (VaR equivalence).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' If we modify the reward penalties as � � � � � � � � � �r(at|(st, ct)) = r(at|st) if ct + d(st) ≤ cmax �r(at|(st, ct)) = r(at|st) − ∆(t + 1)/γt if ct ≤ cmax and ct + d(st) > cmax �r(at|(st, ct)) = r(at|st) − ∆/γt if ct > cmax then if π∗ is an optimal solution to (EMDP), then there is α∆ ∈ [0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Ψ∗−Ψ ∆T ] (α is dependent of ∆) such that π∗ is also optimal to (VaR-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Moreover lim∆→∞ α∆ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' It can be also seen that Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 is a special case of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='7 when ∆ = ∞.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We finally connect (EMDP) with a risk-averse CMDP that has a CVaR intuition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The theorem below shows that, by slightly changing the reward penalties, (EMDP) actually solves a risk-averse CMDP problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='8 (CVaR CMDP equivalence).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' If we modify the reward penalties as � � � � � � � � � �r(at|(st, ct)) = r(at|st) if ct + d(st) ≤ cmax �r(at|(st, ct)) = r(at|st) − ∆(ct + d(st) − cmax)/γt if ct ≤ cmax and ct + d(st) > cmax �r(at|(st, ct)) = r(at|st) − ∆d(st)/γt if ct > cmax then for any ∆ > 0, there is β∆ ∈ � 0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Ψ∗−Ψ ∆ � (β∆ is de- pendent of ∆) such that any optimal solution to the extended CMDP (EMDP) is also optimal to the following risk-averse CMDP max π E � T � t=0 γtr(st, at)|s0, π � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Eτ∼π � (D(τ) − cmax)+� ≤ β∆.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (CVaR-CMDP) Moreover, lim∆→∞ β∆ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Solving Constrained RL through Augmented State and Reward Penalties In practice, since ∆ is just a scalar, one can just grad- ually increase it from 0 to get a desired policy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' This indicates the generality of the unconstrained exended MDP formulation (EMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In summary, we show that (EMDP) brings risk-neural, worst-case and VaR and CVaR CMDPs in (RN-CMDP), (WC-CMDP), (VaR-CMDP) and (CVaR-CMDP) under one umbrella.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Multi-constrained CMDP We now discuss extension to CMDP with multiple cost constraints (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', limited fuel and bounded risk) and show how the above theoretical results can be extended to the multi-constrained variants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' A multi-constrained risk-neural CMDP can be formulated as max π E � T � t=0 γtr(st, at)|s0, π � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' E � T � t=0 dk(st)|s0, π � ≤ ck max, ∀k ∈ [K] (MRN-CMDP) where [K] denotes the set {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' , K}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Similar to the single constraint case, to include cost functions in the rewards, we extend the state space to keep track of the accumulated costs as �S = {(s, c1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' , cK)| s ∈ S, ck ∈ R, ∀k ∈ [K]} and define new transitions probabilities as � � � � � �p(st+1, cK t+1|(st, cK t ), at) = p(st+1|st, at) if ck t+1 = ck t + dk(st) �p(st+1, cK t+1|(st, cK t ), at) = 0 otherwise where cK t = (c1 t, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' , cK) for notational simplicity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The new rewards are also updated in such a way that every trajectory violating the constraints will be penalized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' �r(at|(st, cK t )) = r(at|st) − � k∈[K] ∆kδk(ct), where δk(ct), ∀k ∈ [K], are defined as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' δk(ct) = � � � � � � � � � 0 if , ck t + dk(st) ≤ ck max (ck t + dk(st))/γt if ck t ≤ ck max, ck t + dk(st) ≥ ck max dk(st)/γt if ck t > ck max.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Here, we allow penalty parameters ∆k to be different over constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We formulate the extended unconstrained MDP as: max π � E � T � t=0 γt�r(at|(st, cK t )) ���(s0, cK 0 ), π �� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (2) Similar to the single-constrained case, the reward penalties allow us to write the objective function of the extended MDP as Eπ �� t γtr(st, at) � − � k∈[K] ∆k�Eπ � Dk(τ)| Dk(τ) > ck max � (3) where Dk(τ) is the accumulated cost dk(st) on trajectory τ, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Dk(τ) = � st∈τ dk(st).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' As a result, when ∆k grows, the extended MDP will discount the second term of (3), thus yielding policies that satisfy or even solve risk-neural or risk-averse CMDP problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Specifically, the following results can be proved: When ∆k = ∞, ∀k ∈ [K], then (2) is equivalent to worst-case CMDP (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', all the trajectories generated by the policy will satisfy all the cost constraints).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' There are lower bounds for ∆k from which any so- lution to (2) will be feasible to risk-neural and VaR CMDP with multiple constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For any ∆k > 0, under different reward penalty set- tings, (2) is equivalent to a multi-constrained CVaR CMDP or equivalent to a multi-constrained VaR CMDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' All the detailed proofs and discussions can be found in the appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Safe RL Algorithms In this section, we update existing RL methods to effectively utilize the extended state space and reward penalties, while considering RN-CMDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Due to the theoretical properties in the previous section, just by tweaking ∆, we can also handle other Constrained MDPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Safe DQN Deep Q Network (DQN) (Mnih et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2015) is an efficient method to learn in primarily discrete action Reinforcement Learning problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' However, the original DQN does not consider safety constraints and cannot be applied to any of the CMDP variants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The main modifications in the updated algorithm, referred to as Safe DQN are with regards to exploiting the extended state space and the reward penalties based on constraint violations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The pseudo code for the Safe DQN algorithm is provided in Algorithm 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The impact of extended state space on the algorithm can be observed in almost every line of the algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The penalty for violation of constraints When selecting an action (line 4), Safe DQN not consider the feasibility of the action with respect to cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Instead, like in the original DQN, it is purely based on the current Q value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The assumption is that the Solving Constrained RL through Augmented State and Reward Penalties Algorithm 1 DQN with Extended State Space Initialization: Relay buffer D with capacity N, action- value function Q with weight θ, target action-value function ˆQ with weight θ− = θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 1: for each episode do 2: Initialize with sequence (s0, c0 = 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 3: for each time step t do 4: Select a random action at with probability ϵ, oth- erwise select at = arg maxa Q((st, ct), a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' θ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 5: Execute action at, observe (st+1, ct+1), rt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 6: Store ((st, ct), at, rt, (st+1, ct+1)) in D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 7: Update state-cost pair to (st+1, ct+1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 8: Sample ((sj, cj), aj, rj, (sj+1, cj+1)) from D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 9: if cj > cmax then 10: ˜rj = r(sj) − ∆d(sj)/γt 11: else if cj+1 > cmax then 12: ˜rj = r(sj) − ∆(ct + d(sj))/γt 13: else 14: ˜rj = r(sj) 15: end if 16: {”mask” indicates if the episode terminates} 17: yj = ˜rj + γ ∗ maxa′ ˆQ((sj+1, cj+1), a′;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' θ−) ∗ maskj+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 18: Update θ using l = (yi − Q((sj, cj), aj;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' θ))2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 19: Every C steps reset ˆQ = Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 20: end for 21: end for penalties accrued due to violation (in lines 9-12) will be suf- ficient to force the agent away from cost infeasible actions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Once the new rewards are obtained (based on considering reward penalties), the Q network is updated using the mean square error loss on line 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Safe SAC Soft Actor-Critic (SAC) (Haarnoja et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2018) is an off- policy algorithm that learns a stochastic policy for discrete and continuous action RL problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' SAC employs policy entropy in conjunction with value function to ensure more exploration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Q value function in SAC is defined as follows: Q(s, a) =E[ ∞ � t=0 γtr(st, at, st+1)+ α ∞ � t=1 γtH(π(·|st))|s0 = s, a0 = a] (4) where H(.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=') denotes the entropy of the action distribution for a given state, st).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' SAC also employs the double Q- trick, where we use the minimum of two Q value functions (Qi(.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' ), i ∈ 1, 2) as the target, y to avoid overestimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' y =r(s, a, s′) + γ min i=1,2 Qi(s′, ˜a′) − α log π(˜a′|s′) (5) where ˜a′ ∼ π(·|s′).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Our algorithm, referred to as Safe SAC builds on SAC by having an extended state space and a new action selection strategy that exploits the extended state space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In Safe DQN, we primarily rely on violation of constraints, so as to learn about the bad trajectories and avoid them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' While such approach works well for discrete action settings and in an off policy setting, it is sample inefficient and can be slow for actor-critic settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In Safe SAC, apart from reward penalty, we also focus on learning about feasible actions, which are generated through the use of the cost accumulated so far (available as part of the state space) and a Q value on the future cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Formally, we define the optimization to select safe actions (at each decision epoch) in Equation 6 and show safe SAC algorithm in Algorithm 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Extending on the double Q trick for reward, we also have double Q for future cost, referred to as {Qi d}i∈1,2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' At each step, the objective is to pick an action that will maximize the reward Q value for the extended state, action minus the weighted entropy of the action.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The constraint here is to pick only those actions, which will not violate the cost constraint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In the left hand side of the constraint, we calculate the overall expected cost from: (a) (estimate) of the future cost, from the current state;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (b) (actual) cost incurred so far;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' and (c) subtracting the (actual) cost incurred at the current step, as it is part of both (a) and (b);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' arg max a min i=1,2 Qi((s, c), a) − α log π(a|(s, c)) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' max i=1,2 Qi D((s, c), a) + c − d((s, c)) ≤ cmax, ∀(s, c) (6) Algorithm 2 (in appendix) provides the pseudo code for Safe SAC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Experiment We empirically compare the performance of our approaches on both discrete and continuous environments with respect to expected reward and expected cost achieved against lead- ing benchmark approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For an RL benchmark, we use the original DQN (Mnih et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2015) and it is referred to as unsafe DQN, as it does not account for cost con- straints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For leading Constrained RL benchmarks, we use BVF (Backward Value Function) (Satija et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2020b) and Lyapunov (Chow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2019a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We mostly show results with respect to expected cost constraint, as there are many model free approaches that solve the RN-CMDP problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For one example (Safety Gym), we also provide comparison when a CVaR constraint is provided.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The performance val- ues (expected cost and expected reward) along with standard deviation in each experiment are averaged over 5 runs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Solving Constrained RL through Augmented State and Reward Penalties Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Gridworld environment and reward, cost comparison of different approaches Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Highway environment and reward, cost comparison of different approaches 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' GridWorld: RN-CMDP For a discrete state and discrete action environment, we consider the stochastic 2D grid world problem introduced previously (Leike et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Chow et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Satija et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2020b;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Jain et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The grid on the left of Figure 1 shows the environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The agent starts at the bottom right corner of the map (green cell) and the objective is to move to the goal at the bottom left corner (blue cell).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The agent can only move in the adjoining cells in the cardinal directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Occasionally agent will execute a random action with probability p = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='05 instead of the one selected by the agent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' It gets a reward of +100 on reaching the goal, and a penalty of -1 at every time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' There are a number of pits in the map (red cell) and agent gets a random cost ranging from 1 to 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 on passing through any pit cell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We consider an 8x8 grid and the maximum time horizon is 200 steps, after which the episode terminates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' This modified GridWorld environment is challenging because agent can travel to destination with a short path with a high cost, but if it wishes to travel safely, it needs to explore enough to find a safe path which is far from the shortest one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We set the expected cost threshold, cmax = 2, meaning agent could pass at most one pit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For discrete state environments, we use the discrete SAC in (Christodoulou, 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Figure 1 shows the performance of each method with respect to expected reward (score) and expected cost (constraint).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Here are the key observations: With respect to expected reward, among safe approaches, Lyapunov achieves the highest reward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' However, it violates the expected cost constraint by more than twice the cost constraint value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Safe SAC and Safe DQN achieve similar expected re- ward values, though Safe SAC reaches there faster.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' This high expected reward value is achieved while satisfying the expected cost constraint after 1000 episodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The other constrained RL approach, BVF achieved the lowest value while not being able to satisfy the expected cost constraint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' As expected, Unsafe DQN achieved the highest expected reward but was unable to satisfy the expected cost con- straint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Highway Environment:RN-CMDP Inspired by experiment in GPIRL (Levine et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2011), we test our safe methods in the highway environment (Leurent, 2018) of Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The task in highway environment is to navigate a car on a four-lane highway with all other vehicles HAverage Score in each Episode 100 80 60 Score 40 20 0 Unsafe DQN Safe DQN BVF 20 Safe SAC Lyapunov 40 0 2000 4000 6000 8000 10000 12000 14000 EpisodeAverage Constraint in each Episode Unsafe DQN Safe DON 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 BVF Safe SAC 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 Lyapunov 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 Constraint 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 0 2000 4000 6000 8000 10000 12000 14000 EpisodeAverage Score in each Episode 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 Score 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 Unsafe DQN 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 Safe DQN BVF 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 Safe SAC Lyapunov 2000 4000 6000 8000 10000 12000 14000 0 EpisodeAverage Constraint in each Episode 10 8 Constraint 6 4 Unsafe DQN Safe DQN BVF 2 Safe SAC Lyapunov 2000 4000 6000 8000 10000 12000 14000 0 EpisodeSolving Constrained RL through Augmented State and Reward Penalties Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Safety Gym Environment and reward, cost comparison of different approaches acting randomly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The goal for the agent is to maximize its reward by travelling on the right lane at the highest speed, vmax.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' However, to ensure safety, we set the constraint on the time the agent drives faster than a given speed in the rightmost lane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Figure 2 shows the expected reward and expected cost per- formance of our safe methods compared to that of the bench- marks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Safe SAC and Safe DQN were able to get high ex- pected rewards while satisfying the expected cost constraint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We also provide results on a highway merge environment in the appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Safety Gym Environment: CVaR-CMDP In this environment, we intend to compare the performance of our safe methods with a CVaR optimizing CMDP method, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', WCSAC (Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We test all the methods on the same environment from (Yang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2021) - StaticEnv in Safety Gym (Ray et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The environment is shown in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The point agent has two types of actions: one is for turning and another is for moving forward/backward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The objective is to reach the goal position while trying to avoid hazardous areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The agent gets a reward of r − 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 in each time step, where r is an original reward signal of Safety Gym (distance towards goal plus a constant for being within range of goal) while -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 functions as a time penalty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In each step, if the agent is located in the hazardous area, it gets a cost of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We set cmax = 8, meaning agent could stay in hazardous area for at most 8 time steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For risk level α in WCSAC, we set α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='9 and use the almost risk-neutral WCSAC, which is proven to reach the best performance in both reward and cost in experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We show the results in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' As can be seen from the figure, Safe SAC is able to achieve similar performance to that of WCSAC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Safe DQN was unable to handle this environment due to large size of state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For BVF, although it reaches a good performance in reward, it violates the constraint for many episodes before converging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Conclusion In this paper, we have provided a very generic and scalable mechanism for handling a wide variety of policy based cost constraints (expected cost, worst-case cost, VaR, CVaR) in Constrained MDPs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Lagrangian based approaches, which penalize with respect to expected cost are unable to as- sign credit appropriately for a cost constraint violation, as expected cost averages over all trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Instead, we propose to penalize with respect to individual reward while maintaining a cost augmented state, thereby providing pre- cise credit assignment with regards to cost constraint vio- lations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We theoretically demonstrate that this simple cost augmented state and reward penalized MDP (referred to as EMDP) can represent all the aforementioned cost con- straints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We then provide safety aware RL approaches, Safe DQN and Safe SAC, which are able to outperform leading expected cost constrained RL approaches (Lyapunov and BVF) while at the same time providing similar performance to leading approach for CVaR constrained RL (WCSAC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' References Abe, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Melville, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Pendus, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Reddy, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Jensen, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Thomas, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Bennett, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Anderson, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Cooley, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Kowalczyk, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Domick, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Gar- dinier, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Optimizing debt collections using constrained reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In Proceedings of the 16th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD ’10, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 75–84, New York, NY, USA, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Association for Comput- ing Machinery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' ISBN 9781450300551.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 1145/1835804.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1835817.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='org/ 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1145/1835804.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1835817.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Achiam, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Held, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Tamar, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Abbeel, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Constrained StaticEnv Goal Hazard AgentAverage Score in each Episode 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 Unsafe DQN Safe DON Score BVF 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 Safe SAC Lyapunov 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 N WCSAC 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4 M 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6 20 40 60 80 0 100 EpisodeAverage Constraint in each Episode 40 Unsafe DQN Safe DQN 35 BVF Safe SAC 30 Lyapunov WCSAC 25 Constraint 20 15 10 5 0 20 40 60 80 100 0 EpisodeSolving Constrained RL through Augmented State and Reward Penalties policy optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' CoRR, abs/1705.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='10528, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' URL http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='org/abs/1705.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='10528.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Altman, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Constrained Markov decision processes: stochastic modeling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Routledge, 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Chow, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Pavone, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Sadler, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Carpin, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Trading safety versus performance: Rapid deployment of robotic swarms with robust performance constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' CoRR, abs/1511.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='06982, 2015a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' URL http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='org/ abs/1511.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='06982.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Chow, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Tamar, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Mannor, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Pavone, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Risk- sensitive and robust decision-making: a cvar optimization approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' CoRR, abs/1506.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='02188, 2015b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' URL http: //arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='org/abs/1506.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='02188.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Chow, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Nachum, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Duenez-Guzman, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Ghavamzadeh, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' A lyapunov-based approach to safe reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Advances in neural information processing systems, 31, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Chow, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Nachum, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Faust, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Duenez-Guzman, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Ghavamzadeh, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Lyapunov-based safe policy optimization for continuous control.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' arXiv preprint arXiv:1901.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='10031, 2019a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Chow, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Nachum, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Faust, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Ghavamzadeh, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Du´e˜nez-Guzm´an, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Lyapunov-based safe policy optimization for continuous control.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' CoRR, abs/1901.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='10031, 2019b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' URL http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='org/ abs/1901.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='10031.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Christodoulou, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Soft actor-critic for discrete action settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' arXiv preprint arXiv:1910.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='07207, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Di Castro, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Tamar, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Mannor, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Policy gradients with variance related risk criteria, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' URL https: //arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='org/abs/1206.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6404.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' El Chamie, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Yu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Ac¸ıkmes¸e, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Convex synthesis of randomized policies for controlled markov chains with density safety upper bound constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In 2016 American Control Conference (ACC), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 6290–6295, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1109/ACC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='7526658.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' G´abor, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Kalm´ar, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Szepesv´ari, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Multi-criteria reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In ICML, volume 98, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 197– 205, 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Haarnoja, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Zhou, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Hartikainen, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Tucker, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Ha, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Tan, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Kumar, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Zhu, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Gupta, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Abbeel, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Soft actor-critic algorithms and applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' arXiv preprint arXiv:1812.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='05905, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Hou, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Yeoh, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Varakantham, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Revisiting risk- sensitive mdps: New algorithms and results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In Pro- ceedings of the International Conference on Automated Planning and Scheduling, volume 24, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 136–144, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Jain, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Khetarpal, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Precup, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Safe option-critic: learning safety in the option-critic architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The Knowledge Engineering Review, 36, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Junges, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Jansen, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Dehnert, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Topcu, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Katoen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Safety-constrained reinforcement learning for mdps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' CoRR, abs/1510.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='05880, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' URL http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' org/abs/1510.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='05880.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Leike, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Martic, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Krakovna, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Ortega, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Everitt, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Lefrancq, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Orseau, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Legg, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Ai safety gridworlds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' arXiv preprint arXiv:1711.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='09883, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Leurent, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' An environment for autonomous driv- ing decision-making.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='com/ eleurent/highway-env, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Levine, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Popovic, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Koltun, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Nonlinear inverse re- inforcement learning with gaussian processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Advances in neural information processing systems, 24, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Lucas, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' and Klaassen, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Extreme returns, downside risk, and optimal asset allocation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Journal of Portfolio Management, 25(1):71, 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Mastronarde, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' and van der Schaar, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Fast reinforcement learning for energy-efficient wireless communications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' CoRR, abs/1009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5773, 2010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' URL http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' org/abs/1009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5773.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Mnih, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Kavukcuoglu, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Silver, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Rusu, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Veness, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Bellemare, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Graves, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Riedmiller, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Fidje- land, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Ostrovski, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Human-level control through deep reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' nature, 518(7540): 529–533, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Moldovan, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' and Abbeel, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Safe exploration in markov decision processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' CoRR, abs/1205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4810, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' URL http://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='org/abs/1205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4810.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Ono, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Pavone, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Kuwata, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Balaram, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Chance- constrained dynamic programming with application to risk-aware robotic space exploration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Auton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Robots, 39 (4):555–571, dec 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' ISSN 0929-5593.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' doi: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1007/ s10514-015-9467-7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' URL https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 1007/s10514-015-9467-7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Ray, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Achiam, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Amodei, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Benchmarking safe ex- ploration in deep reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' arXiv preprint arXiv:1910.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='01708, 7:1, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Rockafellar, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Uryasev, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Optimization of condi- tional value-at-risk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Journal of risk, 2:21–42, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Satija, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Amortila, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Pineau, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Constrained markov decision processes via backward value functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In ICML, 2020a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Solving Constrained RL through Augmented State and Reward Penalties Satija, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Amortila, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Pineau, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Constrained markov decision processes via backward value functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In In- ternational Conference on Machine Learning, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 8502– 8511.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' PMLR, 2020b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Yang, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Sim˜ao, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', Tindemans, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', and Spaan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Wcsac: Worst-case soft actor critic for safety-constrained reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In AAAI, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 10639–10646, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Solving Constrained RL through Augmented State and Reward Penalties A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' SAC Pseudocode Algorithm 2 provides the pseudocode for the Safe SAC algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Algorithm 2 SAC with Extended State Space 1: Initialize: policy network π with weight θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 2: Value Function: Q1, Q2 with weights φ1, φ2, target Q value functions Qtarg,1, Qtarg,2 with weights φtarg 1 = φ1, φtarg 2 = φ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 3: Cost Function: Q1 D, Q2 D with weights θ1,D, θ2,D, target cost functions Qtarg,1 D , Qtarg,2 D with weights θtarg 1,D = θ1,D, θtarg 2,D = φ2,D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 4: for episode=1,2,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=',N do 5: Get initial state-cost pair (s0, c0 = 0);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' t ← 1 6: while t ≤ T do 7: tstart ← t 8: while t ≤ tstart + n or t == T do 9: Select action at using Equation 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 10: Execute at, observe (st+1, ct+1) and rt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' 11: t ← t + 1 12: end while 13: {Calculate targets for each network:} 14: ˜rt ← if ct > cmax then rt − ∆dt/γt elif ct+1 > cmax then rt − ∆(ct + dt)/γt else rt 15: R ← if t == T then 0 else ˜rt + γ mini=1,2 Qtarg,i((st+1, ct+1), ˜a′) − α log πθ(˜a′), ˜a′ ∼ πθ((st+1, ct+1)) 16: RD ← if t == T then 0 else maxi=1,2 Qtarg,i D ((st+1, ct+1), at+1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' θD) 17: {Update networks} 18: for i ∈ {t − 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', tstart} do 19: R ← ri + αR, RD ← di + αRD 20: for j = 1, 2 do 21: dφj ← dφj + ∂(R − Qj)2/∂φj 22: dθj,D ← dθj,D + ∂(RD − Qj D)2/∂θj,D 23: end for 24: if the policy is safe then 25: dθ ← dθ + ∇θ log π(ai)(minj=1,2 Qtarg,j − α log π(ai)) 26: else 27: dθ ← dθ − ∇θ log π(ai)RD 28: end if 29: end for 30: {Update target networks} 31: end while 32: end for B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proofs B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' If we set ∆ = ∞, then if π∗ solves (EMDP), it also solves the following worst-case constrained MDP problem max π E � T � t=0 γtr(st, at)|s0, π � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' � st∈τ d(st) ≤ cmax, ∀τ ∼ π.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' As a result, π∗ is feasible to the risk-neutral CMDP (RN-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We first see that there is a unique mapping between a trajectory τ = {s0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' , sT } from the original MDP to a Solving Constrained RL through Augmented State and Reward Penalties trajectory of the extended MDP τ ′ = {(s0, c0), (s1, c1) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' , (sT , cT )} with c0 = 0 and ct = �t−1 i=0 d(st).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Under the reward penalties, we can write the objective of the extended MDP as E � T � t=0 γtr(at|st, ct)|s0, π � = � τ ′={(st,ct)}∼π Pπ(τ ′) �� t γt�r(at|st, ct) � = � τ={s0,s1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='}∼π D(τ)≤cmax Pπ(τ) �� t γtr(st, at) � + � τ={s0,s1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='}∼π D(τ)>cmax Pπ(τ) �� t γtr(st, at) − ∆ � t d(st) � = Eπ �� t γtr(st, at) � − ∆ � τ∼π D(τ)>cmax Pπ(τ)D(τ) (7) As a result, we can rewrite the MDP problem (EMDP) as max π � � � � � Eπ �� t γtr(st, at) � − ∆ � τ∼π D(τ)>cmax Pπ(τ)D(τ) � � � � � (8) So, if we set ∆ = ∞, to maximize the expected reward, we need to seek a policy that assigns zero probabilities for all the trajectories τ such that D(τ) > cmax.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Let Π be the set of policies satisfying that condition (and assume that Π is not empty), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', for any policy π ∈ Π and any trajectory τ such that D(τ) > cmax, Pπ(τ) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' This implies that when ∆ = ∞, (8) is equivalent to Eπ∈Π �� t γtr(st, at) � which is also the worst-case CMDP problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof of Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3 Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Let φ∗ = cmax − maxπ � �Eπ[D(τ)| D(τ) ≤ cmax] � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Given any policy π, if �Eπ[D(τ)| D(τ) > cmax] ≤ φ∗, then Eπ[D(τ)] ≤ cmax.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For a policy π satisfying �Eπ[D(τ)| D(τ) > cmax] ≤ φ∗, we have � τ| D(τ)>cmax Pπ(τ)D(τ) ≤ cmax − max π � �Eπ[D(τ)| D(τ) ≤ cmax] � , which is equivalent to � τ| D(τ)>cmax Pπ(τ)D(τ) + max π � �Eπ[D(τ)| D(τ) ≤ cmax] � ≤ cmax implying cmax ≥ � τ| D(τ)>cmax Pπ(τ)D(τ) + � τ| D(τ)≤cmax Pπ(τ)D(τ) = Eπ[D(τ)] which is the desired inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof of Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4 Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Given ∆ > 0, let π∗ be an optimal solution to (EMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We have �Eπ∗ [D(τ)| D(τ) > cmax] ≤ Ψ∗ − Ψ ∆ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Solving Constrained RL through Augmented State and Reward Penalties Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We first note that, from (8), we can write π∗ = argmaxπ � � � � � Eπ �� t γtr(st, at) � − ∆ � τ D(τ)≥cmax Pπ(τ)D(τ) � � � � � Let π be an optimal policy to the worst-case CMDP (WC-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Since π is also feasible to the extended MDP (EMDP),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' we have Eπ∗ �� t γtr(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' at) � − ∆ � τ D(τ)≥cmax Pπ∗(τ)D(τ) ≥ Eπ �� t γtr(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' at) � = Ψ (9) Moreover,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' since Ψ∗ is the optimal value of the original unconstrained problem Ψ∗ = maxπ E ��T t=0 γtr(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' at)|s0,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' π � ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' we should have Ψ∗ ≥ Eπ∗ �� t γtr(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' at) � (10) Combining (24) and (10) gives Ψ∗ − ∆ � τ D(τ)≥cmax Pπ∗(τ)D(τ) ≥ Ψ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' implying � τ| D(τ)≥cmax Pπ∗(τ)D(τ) ≤ Ψ∗ − Ψ ∆ ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' which is the desired inequality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5 Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For any ∆ ≥ Ψ∗−Ψ φ∗ a solution to (EMDP) is always feasible to the risk-neutral CMDP (RN-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The theorem is a direct result from Lemmas 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3 and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' That is, by selecting ∆ ≥ Ψ∗−Ψ φ∗ , from Lemm 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4 we can guarantee that � τ| D(τ)≥cmax Pπ∗(τ)D(τ) ≤ Ψ∗ − Ψ ∆ ≤ φ∗, (11) where π∗ is an optimal policy to (EMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' From Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4, (11) also implies that π∗ is also feasible to the risk-neutral CMDP (RN-CMDP), as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6 Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For any ∆ ≥ (Ψ∗ − Ψ)/(αcmax), a solution to (EMDP) is always feasible to (VaR-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We use Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4 to see that if π∗ is a solution to (EMDP), then it satisfies �Eτ∼π∗ � D(τ)| D(τ) > cmax � ≤ Ψ∗ − Ψ ∆ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (12) On the other hand, we have �Eτ∼π∗ � D(τ)| D(τ) > cmax � = � τ|D(τ)>cmax Pπ∗(τ)D(τ) > cmax � τ|D(τ)>cmax Pπ∗(τ) = cmaxPπ∗(D(τ) > cmax)) (13) Solving Constrained RL through Augmented State and Reward Penalties Thus, if we select ∆ ≥ (Ψ∗ − Ψ)/(αcmax), we will have the following chain of inequalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' α ≥ Ψ∗ − Ψ ∆cmax (a) ≥ 1 cmax �Eτ∼π∗ � D(τ)| D(τ) > cmax � (b) ≥ Pπ∗(D(τ) > cmax).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' where (a) is due to (12) and (b) is due to (13).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' This implies that π∗ is feasible to the chance-constrained MDP (VaR-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We complete the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='7 Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' If we define the reward penalties as � � � � � � � � � �r(at|(st, ct)) = r(at|st) if ct + d(st) ≤ cmax �r(at|(st, ct)) = r(at|st) − ∆(t + 1)/γt if ct ≤ cmax and ct + d(st) > cmax �r(at|(st, ct)) = r(at|st) − ∆/γt if ct > cmax then if π∗ is an optimal solution to (EMDP), then there is α∆ ∈ [0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Ψ∗−Ψ ∆T ] (α is dependent of ∆) such that π∗ is also optimal to (VaR-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Moreover lim∆→∞ α∆ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Under the reward setting, we can write the objective of (EMDP) as Eπ � T � t=0 γtr(at|st, ct)|s0 � = � τ ′={(st,ct)}∼π Pπ(τ ′) �� t γt�r(at|st, ct) � = � τ={s0,s1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='}∼π D(τ)≤cmax Pπ(τ) �� t γtr(st, at) � + � τ={s0,s1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='}∼π D(τ)>cmax Pπ(τ) �� t γtr(st, at) − ∆T � = Eπ �� t γtr(st, at) � − ∆TPπ(D(τ) > cmax).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (14) We now show that if π∗ is an optimal policy to (EMDP), then it is also optimal for (VaR-CMDP) with where α∆ = Pπ∗(D(τ) > cmax).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' By contradiction, let us assume that it is not the case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Let π be optimal for (VaR-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We first see that π∗ is feasible to (VaR-CMDP), thus Eπ∗ � T � t=0 γtr(st, at) � < Eπ � T � t=0 γtr(st, at) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (15) Moreover, since π is feasible to (VaR-CMDP), we have: Pπ(D(τ) > cmax) ≤ Pπ∗(D(τ) > cmax).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (16) Combine (15) and (16) and (14), it can be seen that π∗ is not an optimal policy to (EMDP), which is contrary to our initial assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' So, π∗ is an optimal policy for the (VaR-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We now prove that lim∆→∞ α∆ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' To this end, we first see that if �π is an optimal solution to the worst-case CMDP (WC-CMDP), then P�π(D(τ) > cmax) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Thus, we have the Solving Constrained RL through Augmented State and Reward Penalties following chain of inequalities Ψ∗ − ∆Tα∆ ≥ Eπ∗ �� t γtr(st, at) � − ∆TPπ∗(D(τ) > cmax) ≥ E�π �� t γtr(st, at) � − ∆TP�π(D(τ) > cmax) = E�π �� t γtr(st, at) � = Ψ Thus α∆ ≤ Ψ∗ − Ψ ∆T .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' implying lim∆→∞ α∆ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='8 Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' If we define the reward penalties as � � � � � � � � � �r(at|(st, ct)) = r(at|st) if ct + d(st) ≤ cmax �r(at|(st, ct)) = r(at|st) − ∆(ct + d(st) − cmax)/γt if ct ≤ cmax and ct + d(st) > cmax �r(at|(st, ct)) = r(at|st) − ∆d(st)/γt if ct > cmax then for any ∆ > 0, there is β∆ ∈ [0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Ψ∗−Ψ ∆ ] (β∆ is dependent of ∆) such that any optimal solution to the extended CMDP (EMDP) is also optimal to the following risk-averse CMDP max π E � T � t=0 γtr(st, at)|s0, π � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Eτ∼π � (D(τ) − cmax)+� ≤ β∆.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (CVaR-CMDP) Moreover, lim∆→∞ β∆ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We first see that, under the reward penalties defined above, the objective of (EMDP) becomes Eπ � T � t=0 γtr(at|st, ct)|s0 � = � τ ′={(st,ct)}∼π Pπ(τ ′) �� t γt�r(at|st, ct) � = � τ={s0,s1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='}∼π D(τ)≤cmax Pπ(τ) �� t γtr(st, at) � + � τ={s0,s1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='}∼π D(τ)>cmax Pπ(τ) �� t γtr(st, at) − ∆ �� t d(st) − cmax �� = Eπ �� t γtr(st, at) � − ∆ � τ∼π D(τ)>cmax Pπ(τ)(D(τ) − cmax) = Eπ �� t γtr(st, at) � − ∆Eτ∼π � (D(τ) − cmax)+� (17) We now show that if π∗ is an optimal policy to (EMDP), then it is also optimal for (CVaR-CMDP) with where β∆ = Eτ∼π∗ � (D(τ) − cmax)+� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' By contradiction, let us assume that π∗ is not optimal for (CVaR-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We then let π be optimal for (CVaR-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We first see that π∗ is feasible to (CVaR-CMDP),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' thus Eπ∗ � T � t=0 γtr(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' at) � < Eπ � T � t=0 γtr(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' at) � (18) Solving Constrained RL through Augmented State and Reward Penalties Moreover,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' since π is feasible to (CVaR-CMDP),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' we have: Eτ∼π � (D(τ) − cmax)+� ≤ β∆ = Eτ∼π∗ � (D(τ) − cmax)+� (19) Combine (18) and (19) we get Eπ∗ � T � t=0 γtr(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' at) � − ∆Eτ∼π∗ � (D(τ) − cmax)+� < Eπ � T � t=0 γtr(st,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' at) � − ∆�Eτ∼π � (D(τ) − cmax)+� (20) Using (17),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (20) implies that �π yields a strictly better objective value to the extended MDP,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' as compared to π∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' which is contrary to the assumption that π∗ is optimal for (EMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' So, π∗ should be an optimal policy for the (CVaR-CMDP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We now prove that lim∆→∞ β∆ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' To this end, we first see that if �π is an optimal solution to the worst-case CMDP (WC-CMDP), then �Eτ∼�π � (D(τ) − cmax)+� = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Thus, we have the following chain of inequalities: Ψ∗ − ∆β∆ ≥ Eπ∗ �� t γtr(st, at) � − ∆Eτ∼π∗ � (D(τ) − cmax)+� ≥ E�π �� t γtr(st, at) � − ∆Eτ∼�π � (D(τ) − cmax)+� = E�π �� t γtr(st, at) � (21) We recall that E�π [� t γtr(st, at)] = Ψ (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', objective value of the worst-case CMDP), thus, β∆ ≤ Ψ∗ − Ψ ∆ , implying lim∆→∞ β∆ = 0 as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Multi-constrained MDP We present, in the following, a series of theoretical results for the multi-constrained MDP discussed in the main body of the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Similar to the single-constrained case, we will show that If ∆k = ∞ for all k ∈ [K], then (2) is equivalent to a worst-case CMDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' There is a lower bound for each ∆k such that any optimal policy to (2) will always be feasible to a given risk-neutral or chance-constrained MDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' By employing different reward penalty settings, (2) is equivalent to a VaR or CVaR CMDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Since all the proofs are similar to those in the single-constrained case, we keep them brief.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proposition C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' If we set ∆k = ∞ for all k ∈ [K], then the extended MDP is equivalent to the following worst-case CMDP max π E � T � t=0 γtr(st, at)|s0, π � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' � st∈τ dk(st) ≤ ck max, ∀τ ∼ π, ∀k ∈ [K] (22) Solving Constrained RL through Augmented State and Reward Penalties Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Similar to the proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2, we write the objective of the extended MDP as E � T � t=0 γtr(at|st, cK t )|s0, π � = � τ ′={(st,cK t )}∼π Pπ(τ ′) �� t γt�r(at|st, cK t ) � = � τ={s0,s1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='}∼π D(τ)≤cmax Pπ(τ) �� t γtr(st, at) � + � τ={s0,s1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='}∼π D(τ)>cmax Pπ(τ) � �� t γtr(st, at) − � k∈[K] ∆k � t dk(st) � � = Eπ �� t γtr(st, at) � − � k∈[K] ∆k � τ∼π Dk(τ)≥ck max Pπ(τ)Dk(τ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (23) So, if ∆k = ∞, then one needs to seek a policy that assigns zero probabilities to all the trajectories that violate the constraints, implying that the extended MDP would yield the same optimal policies as the worst-case CMDP (22).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proposition C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Let π∗ and π be optimal policies to the extended MDP (2) and the worst-case MDP, and φk = ck max − maxπ �Eπ[Dk(τ)|Dk(τ) ≤ ck max], ∀k ∈ [K].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' If we choose ∆k such that ∆k > (Ψ∗ − Ψ)/φk, then any optimal policy of (2) is feasible to the risk-neutral CMDP with multiple constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Since π is also feasible to (2), we have: Eπ∗ �� t γtr(st, at) � − � k∈[K] ∆k � τ Dk(τ)>ck max Pπ∗(τ)Dk(τ) ≥ Eπ �� t γtr(st, at) � = Ψ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (24) Moreover, since Ψ∗ is an optimal value of the original unconstrained MDP, we have Ψ∗ ≥ Eπ∗ [� t γtr(st, at)], leading to � k∈[K] ∆k � τ Dk(τ)≥ck max Pπ∗(τ)Dk(τ) ≤ Ψ∗ − Ψ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (25) Moreover, from Lemma 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3, we know that if �Eπ[Dk(τ)| Dk(τ) > ck max] ≤ φk, then Eπ[Dk(τ)] < ck max, where φk = ck max − maxπ �Eπ[Dk(τ)|Dk(τ) ≤ cmax].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Therefore, if we select ∆k ≥ (Ψ∗ − Ψ)/φk, then from (25) we see that �Eπ∗[Dk(τ)| Dk(τ) > ck max] ≤ φk for all k ∈ [K], implying that π∗ satisfies all the constraints, as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proposition C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Given any αk ∈ (0, 1), k ∈ [K], if we choose ∆k ≥ (Ψ∗ − Ψ)/(αkck max), ∀k ∈ [K], then a solution π∗ to (2) is always feasible to the following VaR (or chance-constrained) MDP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' max π E � T � t=0 γtr(st, at)|s0, π � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Pπ � (Dk(τ) > ck max � ≤ αk, ∀k ∈ [K] (26) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' From the proof of Proposition C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 above, we have the following inequalities Ψ∗ − Ψ ≥ � k∈[K] ∆k � τ Dk(τ)>ck max Pπ∗(τ)Dk(τ) ≥ � k∈[K] ∆kck maxPπ∗(Dk(τ) > ck max) So if we choose ∆k ≥ (Ψ∗ − Ψ)/(αkck max), ∀k ∈ [K], we will have Ψ∗ − Ψ ≥ (Ψ∗ − Ψ) (αkckmax)ck maxPπ∗(Dk(τ) > ck max), ∀k ∈ [K], implying Pπ∗(Dk(τ) > ck max) ≤ αk, as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Solving Constrained RL through Augmented State and Reward Penalties Proposition C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' If we define the reward penalties as �r(at|(st, cK t )) = r(at|st) − � k∈[K] ∆kδk(ct), ∀st, at, cK t , where δk(ct), ∀k ∈ [K], are defined as follows: δk(ct) = � � � � � 0 if ck t + dk(st) ≤ ck max (T + 1)/γt if ck t ≤ ck max, ck t + dk(st) > ck max 1/γt if ck t > ck max, then if π∗ is an optimal solution to (EMDP), there is α∆ ∆ ∆ k ∈ [0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Ψ∗−Ψ T ∆k ] (αk is dependent of ∆ ∆ ∆)1 such that π∗ is also optimal to the following VaR CMDP max π E � T � t=0 γtr(st, at)|s0, π � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Pπ � (D(τ) > ck max � ≤ α∆ ∆ ∆ k , ∀k ∈ [K].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' (27) Moreover lim∆k→∞ α∆ ∆ ∆ k = 0, ∀k ∈ [K].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Similar to the proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6, we can write the objective of the extended MDP as Eπ �� t γtr(at|st, cK t ) � = Eπ �� t γtr(st, at) � − � k∈[K] ∆kTPπ(Dk(τ) > ck max) Then, in a similar way, if we let α∆ ∆ ∆ k = TPπ∗(Dk(τ) > ck max), then π∗ should be an optimal policy to (27).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In addition, we can bound αk by deriving the following inequalities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Ψ∗ − � k∈[K] ∆kTα∆ ∆ ∆ k ≥ Eπ∗ �� t γtr(st, at) � − � k∈[K] ∆kTPπ∗(Dk(τ) > ck max) ≥ E�π �� t γtr(st, at) � − � k∈[K] ∆kTP�π(Dk(τ) > ck max) = E�π �� t γtr(st, at) � = Ψ, (28) where �π and Ψ are optimal policy and optimal value of the worst-case CMDP (22).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' This implies � k∈[K] ∆kTα∆ ∆ ∆ k ≤ Ψ∗ − Ψ, which tells us that α∆ ∆ ∆ k ≤ Ψ∗−Ψ T ∆k , implying that lim∆k→∞ α∆ ∆ ∆ k = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proposition C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For any ∆k > 0, k ∈ [K], if we define the reward penalties as �r(at|(st, cK t )) = r(at|st) − � k∈[K] ∆kδk(ct), ∀st, at, cK t , where δk(ct), ∀k ∈ [K], are defined as follows: δk(ct) = � � � � � 0 if ck t + dk(st) ≤ ck max (ck t + dk t − ck max)/γt if ck t ≤ ck max, ck t + dk(st) > ck max dk t /γt if ck t > ck max, 1∆ ∆ ∆ denotes the vector (∆1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' , ∆K) Solving Constrained RL through Augmented State and Reward Penalties then there are β∆ ∆ ∆ k ∈ [0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Ψ∗−Ψ ∆k ] (β∆ ∆ ∆ k is dependent of ∆ ∆ ∆) such that any optimal solution π∗ to the extended CMDP (2) is also optimal to the following multi-constrained CVaR CMDP max π E � T � t=0 γtr(st, at)|s0, π � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Eτ∼π � (D(τ) − cmax)+� ≤ β∆ ∆ ∆ k , ∀k ∈ [K] (29) Moreover, lim∆k→∞ β∆ ∆ ∆ k = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Under the reward setting, we first write the objective function of the extended MDP as Eπ �� t γtr(at|st, cK t ) � = Eπ �� t γtr(st, at) � − � k∈[K] ∆kEπ � (Dk(τ) − ck max)+� Following the same derivations as in the proof of Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='8, we can further show that, by contradiction, π∗ is also optimal for the CVaR CMDP (29) with β∆ ∆ ∆ k = Eπ∗ � (Dk(τ) − ck max)+� .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' To prove lim∆k→∞ β∆ ∆ ∆ k = 0, we derive similar inequalities as in the proof of Proposition C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4, as follows: Ψ∗ − � k∈[K] ∆kβ∆ ∆ ∆ k ≥ Eπ∗ �� t γtr(st, at) � − � k∈[K] ∆kEπ∗ � (Dk(τ) − ck max)+� ≥ E�π �� t γtr(st, at) � − � k∈[K] ∆kE�π � (Dk(τ) − ck max)+� = E�π �� t γtr(st, at) � = Ψ, implying that β∆ ∆ ∆ k ≤ Ψ∗−Ψ ∆k , thus lim∆k→∞ β∆ ∆ ∆ k = 0 as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Experimental Results on Puddle Environment D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Continuous Puddle Environment: RN-CMDP Inspired by (Jain et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=', 2021), we test all the methods on the continuous puddle environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The environment is shown in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' It is a continuous two-dimensional state-space environment in [0, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The agent starts at the bottom left corner of the map (0, 0) and the objective is to move to the goal at the upper right corner (1, 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The agent can move in four directions and occasionally agent will execute a random action with probability p = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='05 instead of the one selected by the agent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In each position transition, noise is drawn from the Uniform[−0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='025, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='025] distribution and added to both coordinates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' When the agent is within 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1 L1 distance from the goal state, the agent can be seen as reaching the goal and receive a reward of 100 while agent gets a time penalty as -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1 at each time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' There is a square puddle region centering at (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5) with 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4 height.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In each time step, if agent is located in the puddle area, it gets a cost of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Due to the existence of noise, we cannot set the threshold cmax too small as it would be hard for agent to reach the goal, so we set cmax = 8, meaning agent could stay in puddle area for at most 8 time steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We show the results in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' As can be seen from the figure, safe SAC could outperform other methods in both reward and cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Although safe DQN can always satisfy the constraint, it always fail to reach the goal to get the maximum reward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' For BVF, when the backward value function succeeds to estimate the cost, the reward starts to decrease and worse than safe SAC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Experimental Results on Highway Merge Environment We also evaluate our safe methods on another highway environment - merge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The environment is shown in Figure 5 where agent needs to take actions to complete merging with other vehicles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The rewards are similar to those in highway Solving Constrained RL through Augmented State and Reward Penalties Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Performance in Puddle Environment Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Merge environment and reward, cost comparison of different approaches environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Figure 5 shows a comparison of our safe methods with other benchmarks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Although Safe DQN fails to complete the task in merge environment, Safe SAC still outperforms BVF and unsafe DQN with better score and lower cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' The reason that safe DQN fails is that the combinations of extended space is too large in merge environment for safe DQN to figure it out.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' That is also why safe DQN converges quite slowly in highway environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' As safe DQN is unable to deal with large size of state space, safe SAC outperforms safe DQN in continuous environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' Hyperparameters In case of discrete environment - GridWorld, the size of state space is 8 × 8 with 18 pits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In Highway environment (including merge), related parameters and their values are listed below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' There is an additional reward in merge environment named mergingspeedreward with value of -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' It penalties the agent if it drives with speed less than 30 while merging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' lanes count: Number of lanes, setting as 4 in both environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' vehicles count: Number of vehicles on lanes, setting as 50 in both environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' controlled vehicles: Number of agents, setting as 1 in both environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' duration: Duration of the game, setting as 40 in both environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' ego spacing: The space of vehicles, setting as 2 in both environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' vehicles density: The density of vehicles on lanes, setting as 1 in both environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' reward speed range: The range where agent can receive high speed reward, setting as [20, 30] in both environ- ments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' high speed reward: Reward received when driving with speed in reward speed range, setting as 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4 in highway while 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2 in merge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Average Score in each Episode ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='75 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Unsafe DQN ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Safe DQN ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='BVF ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Safe SAC ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Lyapunov ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='25 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Score ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='25 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='75 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='750 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1750 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='EpisodeAverage Constraint in each Episode ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Unsafe DQN ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='350 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Safe DON ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='BVF ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Safe SAC ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Lyapunov ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Constraint ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='150 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='50 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='750 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1250 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1750 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='EpisodeAverage Score in each Episode ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='15 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='14 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Unsafe DON ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Safe DQN ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Score ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='11 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='BVF ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Safe SAC ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Lyapunov ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='8000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='10000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='12000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='14000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='EpisodeAverage Constraint in each Episode ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Unsafe DQN ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='16 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Safe DON ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='BVF ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Safe SAC ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='14 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Lyapunov ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='Constraint ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='12 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='2000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='4000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='6000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='8000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='10000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='12000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='14000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='EpisodeSolving Constrained RL through Augmented State and Reward Penalties ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='collision reward: Reward received when colliding with a vehicle,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' setting as -1 in both environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' right lane reward: Reward received when driving on the right-most lane, setting as 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='1 in both environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' lane change reward: Reward received when taking lane change action, setting as 0 in highway while -0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content='05 in merge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' In all the methods, we use networks with a hidden layer size of 64,64,64 along with the ReLu activation and use Adam optimizer to optimize the networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} +page_content=' We test our methods on GridWorld, Highway, Safety Gym, Puddle, Highway for 15000, 15000, 1000, 2000, 15000 episodes respectively and update the network every 4 steps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8dFJT4oBgHgl3EQfnix6/content/2301.11592v1.pdf'} diff --git a/8tE3T4oBgHgl3EQfqgri/content/2301.04653v1.pdf b/8tE3T4oBgHgl3EQfqgri/content/2301.04653v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f82f46df1a60d2e5b0dc0f891608be8668cdb83f --- /dev/null +++ b/8tE3T4oBgHgl3EQfqgri/content/2301.04653v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baa94911d44951382f0e2319006387f14a3583937a2639019029b4b9a429596e +size 1045248 diff --git a/8tE3T4oBgHgl3EQfqgri/vector_store/index.pkl b/8tE3T4oBgHgl3EQfqgri/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..25a6a938e93bf044da0ff6c86948c8a46350347d --- /dev/null +++ b/8tE3T4oBgHgl3EQfqgri/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4cd12392e60ea48971d0e6752c7278369b34d61b81b6a754178796b6e42cbd7 +size 252434 diff --git a/A9E2T4oBgHgl3EQf8Qn_/content/2301.04218v1.pdf b/A9E2T4oBgHgl3EQf8Qn_/content/2301.04218v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a1e37f394850b689fafcc435c14c0aef790ed6f1 --- /dev/null +++ b/A9E2T4oBgHgl3EQf8Qn_/content/2301.04218v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:636898bcc99640cb2ee1c1abb8e52729206d398fa2da146c41cfaea1cf426a46 +size 3047682 diff --git a/BdAzT4oBgHgl3EQfGPtU/content/2301.01024v1.pdf b/BdAzT4oBgHgl3EQfGPtU/content/2301.01024v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..631472a02e83d7bfd032cec8d04bfb2873cd51c5 --- /dev/null +++ b/BdAzT4oBgHgl3EQfGPtU/content/2301.01024v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1daabb6f590d68569f63b1c2a009b6eb11dedc199c59dfedaf4426924ef37358 +size 621456 diff --git a/BdAzT4oBgHgl3EQfGPtU/vector_store/index.faiss b/BdAzT4oBgHgl3EQfGPtU/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..e0b22c5443bcb4c93bfa4f53598f99b79172bd3b --- /dev/null +++ b/BdAzT4oBgHgl3EQfGPtU/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be6b34dcd88db6bfe97d59b9686b03071232efa4677fbdb3774c9db5fed6ba97 +size 2752557 diff --git a/BdAzT4oBgHgl3EQfGPtU/vector_store/index.pkl b/BdAzT4oBgHgl3EQfGPtU/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..cdbd3891e31069f3ce7cf80b42c32564ca574acb --- /dev/null +++ b/BdAzT4oBgHgl3EQfGPtU/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:992be76deab4833ad76b37bb955ef138c463bce433d04be90ca51d138dbc72de +size 101722 diff --git a/BtE0T4oBgHgl3EQfyAJM/content/2301.02653v1.pdf b/BtE0T4oBgHgl3EQfyAJM/content/2301.02653v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f567a9c0bb5992b59b6bc9e4b4f841c93c215682 --- /dev/null +++ b/BtE0T4oBgHgl3EQfyAJM/content/2301.02653v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13d2a97a4ae2ce19488f810525793f5dd2894a9bc1bf0c5aeddcfa8d4939a8b4 +size 6178295 diff --git a/BtE0T4oBgHgl3EQfyAJM/vector_store/index.faiss b/BtE0T4oBgHgl3EQfyAJM/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..038bea54e6578a09de12b4620df099a050b57f6f --- /dev/null +++ b/BtE0T4oBgHgl3EQfyAJM/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43ab7bc8967068df7c576869757fa17157e189fed87a7e1bbfc6748dcc9e9daa +size 4849709 diff --git a/BtE0T4oBgHgl3EQfyAJM/vector_store/index.pkl b/BtE0T4oBgHgl3EQfyAJM/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..687a25cd0abf4ff628b8de5bc00204f277d841e8 --- /dev/null +++ b/BtE0T4oBgHgl3EQfyAJM/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e3f0c37430afe43db793d93c62584ddbe089722508a9930b5833d841820f067 +size 178181 diff --git a/BtE1T4oBgHgl3EQf9gZu/vector_store/index.faiss b/BtE1T4oBgHgl3EQf9gZu/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..4fe32a2798987468fa760a323f53f93a0e10ac6d --- /dev/null +++ b/BtE1T4oBgHgl3EQf9gZu/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31a715046e9006154ec1ae1a1d7b87ab50ae8b0cb2a97172f6432448e9d07b4e +size 4063277 diff --git a/BtE1T4oBgHgl3EQf9gZu/vector_store/index.pkl b/BtE1T4oBgHgl3EQf9gZu/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d1aecbc275554d23319f7dc87ddc9ee7be2f0c45 --- /dev/null +++ b/BtE1T4oBgHgl3EQf9gZu/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5cf0dab3ef0e9515b5e1a560471e63ec882e5f7360fc4260c5069a2bb597cfe +size 148859 diff --git a/CNE2T4oBgHgl3EQf9AkT/content/tmp_files/2301.04223v1.pdf.txt b/CNE2T4oBgHgl3EQf9AkT/content/tmp_files/2301.04223v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..e137c488a7122d60af0e753984a16cb94e4ab27d --- /dev/null +++ b/CNE2T4oBgHgl3EQf9AkT/content/tmp_files/2301.04223v1.pdf.txt @@ -0,0 +1,2202 @@ +Red Emission from Copper-Vacancy Color Centers in Zinc Sulfide Colloidal +Nanocrystals +Sarah M. Thompson,1 C¨uneyt S¸ahin,2, 3 Shengsong Yang,4 Michael E. Flatt´e,3, 5 +Christopher B. Murray,4, 6 Lee C. Bassett,7, ∗ and Cherie R. Kagan1, 8, 9, ∗ +1Department of Electrical and Systems Engineering, +University of Pennsylvania, Philadelphia Pennsylvania 19104, USA +2UNAM – National Nanotechnology Research Center and Institute of +Materials Science and Nanotechnology, Bilkent University, Ankara, Turkey +3Department of Physics and Astronomy, University of Iowa, Iowa City IA, 52242, USA +4Department of Chemistry, University of Pennsylvania, Philadelphia PA, 19104, USA +5Department of Applied Physics, Eindhoven University of Technology, +P. O. Box 513, 5600 MB Eindhoven, The Netherlands +6Department of Materials Science and Engineering, +University of Pennsylvania, Philadelphia PA, 19104, USA +7Department of Electrical and Systems Engineering, +University of Pennsylvania, Philadelphia PA, 19104, USA +8Department of Materials Science and Engineering, +University of Pennsylvania, Philadelphia Pennsylvania 19104, USA +9Department of Chemistry, University of Pennsylvania, Philadelphia Pennsylvania 19104, USA +(Dated: January 12, 2023) +Copper-doped zinc sulfide (ZnS:Cu) exhibits down-conversion luminescence in the UV, visible, +and IR regions of the electromagnetic spectrum; the visible red, green, and blue emission is referred +to as R-Cu, G-Cu, and B-Cu, respectively. The sub-bandgap emission arises from optical transitions +between localized electronic states created by point defects, making ZnS:Cu a prolific phosphor ma- +terial and an intriguing candidate material for quantum information science, where point defects +excel as single-photon sources and spin qubits. Colloidal nanocrystals (NCs) of ZnS:Cu are partic- +ularly interesting as hosts for the creation, isolation, and measurement of quantum defects, since +their size, composition, and surface chemistry can be precisely tailored for bio-sensing and opto- +electronic applications. Here, we present a method for synthesizing colloidal ZnS:Cu NCs that emit +primarily R-Cu, which has been proposed to arise from the CuZn-VS complex, an impurity-vacancy +point defect structure analogous to well-known quantum defects in other materials that produce +favorable optical and spin dynamics. First principles calculations confirm the thermodynamic sta- +bility and electronic structure of CuZn-VS. Temperature- and time-dependent optical properties of +ZnS:Cu NCs show blueshifting luminescence and a non-monotonic intensity dependence as temper- +ature is increased from 19 K to 290 K, for which we propose an empirical dynamical model based +on thermally-activated coupling between two manifolds of states inside the ZnS bandgap. Under- +standing of R-Cu emission dynamics, combined with a controlled synthesis method for obtaining +R-Cu centers in colloidal NC hosts, will greatly facilitate the development of CuZn-VS and related +complexes as quantum point defects in ZnS. +I. +INTRODUCTION +Controlled impurity doping of wide-bandgap semi- +conductors can be used to introduce color centers, +which are point defects that activate sub-bandgap, +optical +photoluminescence +(PL). +Color +centers +can +serve as sources of tunable PL for bio-imaging and +opto-electronic applications[1, 2], as well as localized, +optically-addressable, electronic spin states for applica- +tions in quantum information science[3, 4]. +For all of +these applications, colloidal nanocrystals (NCs) can pro- +vide unique advantages over analogous, bulk materials +because they can be processed using wet-chemical meth- +ods, and their large surface areas and effects of quantum +∗ Corresponding +authors. +lbassett@seas.upenn.edu +& +ka- +gan@seas.upenn.edu +confinement allow for highly tunable optical and elec- +tronic properties[5]. +Impurity-doped ZnS has long been exploited as a UV, +visible, and NIR luminescent material in its bulk and +colloidal NC forms, and it has more recently been studied +as a potential host material for defect-based quantum +emitters and quantum bits, or defect qubits[6, 7]. Cu- +doping of ZnS introduces sub-bandgap red, green, and +blue PL emission bands that are associated with color +centers known respectively as R-Cu, G-Cu, and B-Cu. +R-Cu color centers are particularly interesting thanks to +their peak PL emission in the NIR biological window. +However, R-Cu remains under-utilized since it is rarely +observed in colloidal ZnS:Cu NCs, which typically emit +visible PL dominated by B-Cu and G-Cu[8, 9]. +Past studies have indicated that R-Cu emission in bulk +ZnS:Cu arises from a defect complex consisting of a sub- +stitutional copper impurity (CuZn) and a sulfur vacancy +arXiv:2301.04223v1 [cond-mat.mtrl-sci] 10 Jan 2023 + +2 +(VS) in a nearest-neighbor CuZn-VS complex[10]. Com- +pared to transition metal-doped phosphors like ZnS:Mn +that rely on electric-dipole-forbidden, intra-d-shell transi- +tions between substitutional MnZn levels to produce visi- +ble PL, the mixed orbital character and lowered symme- +try of the CuZn-VS complex are associated with more +dipole-allowed radiative transitions[11], and therefore +shorter optical lifetimes, as desired for many applications. +Moreover, the symmetry of the CuZn-VS complex is de- +scribed by the C3v point group, which is characteristic of +well-developed defect qubits [12, 13] and is a key factor in +producing favorable defect orbital and spin structures[4]. +R-Cu emission has further been associated with electron +paramagnetic resonance (EPR) spectra which indicate a +paramagnetic ground state [14]. These characteristics are +especially compelling in combination with the favorable +properties of ZnS as a host for defect qubits, which in- +clude a high natural abundance of spin-free nuclei and +relatively weak spin-orbit coupling, as well as the ease +of ZnS colloidal NC synthesis and surface modification +compared to hosts materials such as diamond [5, 15]. +Here, we report the synthesis and characterization of +colloidal ZnS:Cu NCs emitting visible PL dominated by +R-Cu. We study the structural, compositional, and time- +and temperature-dependent optical properties of NCs +synthesized with 0, 0.05, 0.075, and 0.1 mol% Cu:Zn. +The R-Cu emission intensity scales with the copper con- +centration, and the R-Cu emission band exhibits com- +plex temperature- and time-dependent properties that +are generally consistent with observations of R-Cu in +bulk ZnS:Cu [16]. In particular, the R-Cu emission peak +blueshifts with increasing temperature from 19 K to 290 +K, and the R-Cu emission intensity increases with in- +creasing temperature between 150 K and 270 K, a phe- +nomenon known as negative thermal quenching (NTQ). +We propose a single mechanism to explain the blueshift +and the NTQ based on thermally-activated carrier trans- +fer between two manifolds of radiative states. This mech- +anism is consistent with time-resolved PL measurements +showing the presence of two distinct radiative transitions +in the R-Cu band at low temperature. +Drawing from +first-principles calculations, we discuss the role of defect +species, spatial arrangement, and charge state in produc- +ing the manifolds of states responsible for measured R-Cu +characteristics. A detailed understanding of these char- +acteristics can facilitate the realization of protocols for +initialization, readout, and control of the defect’s charge +and spin states for development of a quantum defect ar- +chitecture. +II. +RESULTS AND DISCUSSION +A. +Synthesis of ZnS:Cu NCs with R-Cu Emission +ZnS NCs are synthesized using the single-source pre- +cursor approach developed by Zhang et al., [17] in which +zinc diethyldithiocarbamate (Zn(Ddtc)2) is thermally de- +FIG. 1. +ZnS:Cu NC synthesis and structure. +(a) +Schematic of the synthesis of R-Cu emitting ZnS:Cu NCs, +where red represents the application of heat. +Photographs +show the reaction vessel before and after NC formation. (b) +Cu:Zn mol% measured by ICP-OES (black circles) as a func- +tion of the Cu:Zn mol% added to the synthesis pot. +The +component weight of the R-Cu peak when the total PL spec- +trum is decomposed by non-negative matrix factorization (red +squares and right-hand vertical axis) also scales with the +Cu:Zn mol% . +The R2 values for the linear regression fits +(black and red lines) are 0.917 and 0.957, respectively. (c) +TEM image and electron diffraction pattern of a sample of +ZnS:Cu NCs with 0.1 mol% Cu:Zn. (d) Distribution of NC +diameters for samples of 100 NCs measured from TEM images +obtained for each Cu:Zn ratio (0-0.1 mol%). +composed in oleic acid (OA) and oleylamine (OM); see +Figure 1a. In previously reported syntheses of colloidal +ZnS:Cu NCs, the absence of R-Cu emission may result +from unintentional Cl impurities introduced by CuCl2 +precursors, which are known to quench R-Cu in bulk +ZnS:Cu along with Al, In, and Ga impurities [10, 14, 16]. +To avoid the introduction of Cl impurities, we instead +add a fixed volume (0.1 mL) of Cu(CH3COO)2·H2O dis- +solved in ultrapure DI water, with concentrations corre- +sponding to Cu:Zn molar ratios of 0 %, 0.05 %, 0.075 %, +and 0.1 %, to the synthesis pot prior to degassing. In the +case of undoped ZnS NCs, the 0.1 mL addition consists +of DI water only. Inductively-coupled plasma - optical +emission spectroscopy (ICP-OES) and PL measurement +results (Figure 1b) show that varying the concentration of +the Cu precursor directly influences the final Cu concen- +tration in the NC samples, as well as the relative intensity +of the red PL emitted by the NCs. The PL measurement +results are discussed further in the next subsection. +A representative TEM image of ZnS:Cu NCs with 0.1 +mol% Cu:Zn (Figure 1c) shows that the samples are com- +posed of 7.2±1.2 nm diameter particles. +The NC size +distribution remains consistent across differently-doped +samples (Figure 1d). Electron diffraction measurements +(inset, Figure 1c) exhibit peaks at 2Θ values that corre- + +0.25 +100 +(a) +(b) +Red PL Component +0.2 +80 +口 +0.15 +KOH +60 +Cu(C2HgO2)2°H,0 +Inject +0.1 +40 +DI water +Zn(Ddtc)2 +Oleylamine +Oleic Acid +0.05 +20 +% +precursor +nanocrysta +degasdegas +decomposition +formation +0 +0 +0 +0.05 +0.1 +Cu:Znmoi%DuringSynthesis +(c) +22031 +10 +(d) +111 +NC Diameter (nm) +4 +0%0.05%0.075%0.1% +Cu:Znmol%DuringSvnthesis3 +FIG. 2. +Room-temperature optical properties. +(a) +PL spectra under continuous-wave excitation at λex=375 nm, +normalized to the PL intensity at λem=442 nm from ZnS NCs +synthesized with 0–0.1 mol% Cu:Zn. (b) Absorption spectra +(black curves) and PLE spectra (colored curves) monitoring +the emission intensity at λem=670 nm as a function of λex, +from ZnS NCs synthesized with 0–0.1 mol% Cu:Zn. +spond to the ⟨111⟩, ⟨220⟩, and ⟨311⟩ planes of sphalerite +(zinc blende), according to PDF# 98-000-04053. +B. +Room-Temperature Optical Characterization +PL emission spectra from NC samples containing the +four different Cu concentrations are shown in Figure 2a. +The intrinsic, background PL peak with emission wave- +length, λem, between 430 nm and 600 nm, is present re- +gardless of Cu concentration. This PL feature is char- +acteristic of undoped ZnS NCs and is widely accepted +to arise from radiative transitions between intrinsic de- +fect states inside the ZnS bandgap created by Zn and S +vacancies (VZn and VS) and interstitials (Zni and Si)[18– +20]. Similar PL is also observed from bulk, undoped ZnS, +with features being attributed to both intrinsic defects +and unintentional impurities[21]. +Distinct emission with λem centered at 670 nm is ob- +served in the Cu-doped NCs with a relative intensity that +increases with the Cu:Zn molar ratio. The PL spectra +of Figure 2a are decomposed using nonnegative matrix +factorization (SI Section 1) in order to calculate the rela- +tive strengths of the intrinsic and dopant-induced compo- +nents, yielding the concentration dependence plotted in +Figure 1b. +Absorption spectra and λem=670 nm PLE +spectra for all NC materials are shown in Figure 2b. +From the absorption spectra, we construct Tauc plots (SI +Section 2) and extract bandgap energies between 3.77 eV +and 3.79 eV. +The λem=670 nm PL and PLE spectra in Figure 2 align +well with those reported for R-Cu in bulk ZnS:Cu, which +also peaks at λem=670 nm at room temperature and is at- +tributed to transitions between VS levels and CuZn levels +inside the ZnS bandgap.[16] The PLE spectra of Figure +2b show that the λem=670 nm PL is broadly excited by +wavelengths in the range λex= 330 – 450 nm in all four +samples, consistent with measurements by Shionoya et +al. of R-Cu PLE in bulk ZnS:Cu[10]. The polarization +dependence of the PLE in their report also indicates a +nearest-neighbor configuration of VS and CuZn with C3v +point-group symmetry[10]. R-Cu is quenched when bulk +ZnS:Cu phosphors are fired in atmospheres containing +high sulfur pressure[10], further supporting the role of +VS levels in the PL. +Compared to their bulk counterparts, impurity dop- +ing of NC materials can be challenging to achieve and to +confirm.[22] The alignment between the R-Cu PL/PLE +spectra we measure here and those arising from bulk +ZnS:Cu is suggestive of successful Cu doping of the +ZnS:Cu NCs, since there is extensive evidence that R- +Cu in bulk ZnS:Cu is activated by Cu substitutionally +occupying Zn sites. +We additionally carry out studies +in which we deposit NC thin films and treat them with +methanol and methanolic Na2S and Zn(CO2CH3)2·2H2O +solutions known to remove organic ligands and to strip +surface cations[23], and to enrich the NC surface in S2- +or Zn2+, respectively,[24, 25] altering the presence or en- +vironment of Cu cations if they are on the surface. In +all cases, the surface treatments do not quench or en- +hance the R-Cu PL from our NCs, again consistent with +successful Cu doping of their cores (SI Section 3). +C. +Variable Temperature Studies Probing the +Origins of Cu-Induced Sub-Bandgap PL/PLE +NCs are drop-cast onto sapphire substrates and loaded +inside an evacuated cryostat for temperature- and time- +dependent spectroscopic measurements. Figure 3 shows +PL/PLE maps of ZnS NCs without Cu doping (Cu:Zn at +0 mol%) and with Cu doping (Cu:Zn 0.1 mol%), mea- +sured at 19 K and at 290 K. PL from the undoped +ZnS NCs is dominated by intrinsic PL at all tempera- + +1.5 +(a) +0.1% +Normalized PL Intensity +Increasing +Cuaddition +0.075% +0.5 +0.05% +0% +0 +400 +500 +600 +700 +800 +006 +Emission Wavelength,入..(nm) +(b) +ExcitationEnergy (eV) +4.5 +4 +3.5 +3 +2.5 +Cu:Zn 0% +Absorbance (a.u.) +0.05% +0.075% +E +e +0.1% +300 +350 +400 +450 +500 +Excitation Wavelength, ^ox (nm)4 +FIG. 3. +Temperature-dependent PL/PLE (a) PL spec- +tra (top) and PL/PLE maps (below) of films of ZnS NCs +synthesized with 0 mol% (left) and 0.1 mol% Cu:Zn (right), +measured at 19 K and room temperature. The PL spectra +extracted from the 19 K PL/PLE maps are photoexcited at +λex=375 nm and λex=320 nm for the ZnS NC and ZnS:Cu +NC films, respectively, to show the clearest peak separation +and spectrally reduce intrinsic PL in the case of ZnS:Cu NCs. +(b) Energy level diagrams showing key defect states respon- +sible for sub-bandgap PL emission in the undoped and doped +NCs. Dashed lines represent shallow surface defect states. Ar- +rows are used to indicate assigned radiative transitions in the +doped NCs, which involve different sub-levels of the CuZn 3d +shell. The t2 level is indicated with a heavier line to suggest +that it may be further split into e and a sub-levels depending +on the CuZn impurity site coordination. +tures. +The 19 K PL spectrum from the undoped ZnS +NCs (λex=375 nm) can be described using three Gaus- +sian peaks, which are labeled α, β, and γ in Figure 3a. +Peaks α and β dominate the PL for λex > 330 nm (corre- +sponding to below-bandgap excitation of the ZnS NCs), +and their peak emission wavelength varies with λex. The +third peak observable in the undoped film, peak γ, re- +mains relatively fixed regardless of λex and is the only +peak observed in this spectral region for λex <330 nm. +For ZnS:Cu NCs, the 19 K PL spectrum (λex=320 nm) +TABLE I. Peak positions and widths for I, II, R-Cu, and α, +β, and γ, from Gaussian fitting of PL data shown in Fig. 3 +Cu:Zn mol% Label λem (nm) Eem (eV) FWHM (eV) +0.1 +I +471 +2.61 +0.13 +0.1 +II +562 +2.19 +0.21 +0.1 +R-Cu +709 +1.74 +0.27 +0 +α +440 +2.81 +0.17 +0 +β +442 +2.65 +1.12 +0 +γ +560 +2.20 +0.30 +shows R-Cu emission, as well as blue and green emission +peaks labeled I and II (Figure 3). The three PL peaks are +observed for λex ranging from 290 – 420 nm. For λex < +330 nm, to the blue of the ZnS bandgap wavelength, the +sub-bandgap intrinsic PL is significantly diminished in +intensity compared to peaks I, II, and R-Cu. The R-Cu +peak is distinguishable at all temperatures, and the peak +emission wavelength blueshifts from 709 nm at 19 K to +670 nm at room temperature. This observation is similar +to the reported blueshift in bulk ZnS:Cu from 700 nm at 4 +K to 670 nm at room temperature.[10, 16] Peaks I and II, +with λem= 471 nm and λem= 562 nm, respectively, are +quenched at room temperature. The λem and FWHM +values for PL labeled in Figure 3 are listed in Table I. +Line plots of the spectral data in the PL/PLE maps of +Figure 3a are included in SI Section 5. +Figure 3b shows energy level diagrams containing key +defect levels believed to activate PL in the undoped and +doped NCs. In undoped ZnS, intrinsic PL is assigned to +transitions between Zni, VS, VZn, and Si levels, for which +the relative energy levels shown are agreed upon, but the +exact energies are not known[18, 26]. PL peaks activated +by Cu doping the ZnS NCs, which can be spectrally sep- +arated from intrinsic PL as discussed in this section, are +assigned to radiative transitions in the diagram. R-Cu +emission arises from transitions between states primarily +associated with VS and the CuZn t2 levels[16]. We assign +peak I to a transition between VS levels and the lower- +lying CuZn e levels [8]. This assignment is supported by +our measurement of a 0.87 eV energy difference between +peak I and R-Cu PL, similar to the reported 0.86 eV en- +ergy difference between CuZn t2 and e levels[27]. Peak II +is assigned to transitions between donor levels that are +shallower than VS, attributed here to surface defects, and +the CuZn t2 levels[28]. We note that the state labels and +identifications in Figure 3b are based on an approximate +picture of isolated VS and CuZn in cubic ZnS, whereas +the CuZn-VS is characterized by lowered C3v symmetry +and hybridization between these levels. We discuss this +point in more detail later in the next section, drawing +insight from theoretical calculations. +Figure 4a shows how time-resolved emission spec- +troscopy can be used to isolate R-Cu PL from the intrin- +sic background PL, since most of the intrinsic PL occurs +within 250 ns of excitation while the R-Cu PL is longer +lived. The room-temperature PL decay of ZnS:Cu NCs, +excited with a pulsed excitation source at λex= 375 nm + +ZnS NCs +ZnS:Cu NCs +(a) +375nmex. +320nmex. +R-Cu +2 +1.5 +a +Cts +Cts +0 +/105 +/105 +410 +10 +5 +(wu) +370 +330 +3 +Excitation Wavelength. +19K +19K +410 +2.6 +1 +2 +0.8 +370 +1.5 +0.6 +0.4 +330 +0.5 +0.2 +290K +290K +290 +430 +530 +630 +730 +830 +430 +530 +630 +730 +830 +Emission Wavelength, +(nm) +em +(b) +CBM +CBM +Vs +Zn; +Energy +II R-Cu +Vzn +t2 +Cuzn +S +e +VBM +VBM5 +FIG. 4. +Isolation of Cu-Activated PL (a) Time-resolved +emission spectra from ZnS:Cu NC films at 290 K under 375 +nm, 1 kHz pulsed excitation, in which counts from the first +250 ns following the laser pulse (black) are plotted separately +from subsequent counts (red), effectively separating the in- +trinsic background from the R-Cu peak emission. +(b) PL +spectra from ZnS:Cu NC (black trace) and undoped ZnS NC +(grey trace) films, collected at 19 K with continuous wave, 375 +nm excitation. Intensities are normalized at 430 nm. The dif- +ference spectrum (red curve) is almost identical to the time- +gated spectrum from ZnS:Cu NC films under 375 nm, 500 +kHz pulsed excitation (purple dashed curve). +and monitored at λem= 670 nm, is tri-exponential with +decay time constants (τi) of τ1=1.85µs, τ2=8.72µs, and +τ3=26.47µs. +With 95% confidence, we find that these +τi are consistent among all three Cu-doped samples (SI +Section 4). At 19 K, time-resolved emission spectroscopy +separates peaks I and II as well as R-Cu from the intrin- +sic background PL. Figure 4b shows that time-gating the +PL from the ZnS:Cu NC samples yields an almost iden- +tical spectral shape to that of calculating the difference +between the normalized, CW PL spectra from the doped +NCs and the undoped NCs. The CW spectra in Figure 4b +are normalized such that the PL intensities collected at +430 nm (the shortest emission wavelength in the measure- +ment) are the same, as PL at this wavelength is expected +to arise predominantly from intrinsic defects. The obser- +vation that nearly identical spectra are obtained, either +by time-gating the doped spectrum or by subtracting the +undoped CW spectrum, strongly supports that peaks I +and II arise from Cu doping, along with the R-Cu peak, +and these peaks coexist with the intrinsic PL in doped +samples for sub-bandgap excitation. +D. +First-principles calculations +R-Cu PL has been proposed to arise from a nearest- +neighbor (NN) complex of CuZn and VS defects, rather +than more distant associations[10]. To confirm the ther- +modynamic stability of the NN CuZn-VS complex, we +use density functional theory (DFT) to calculate the +formation energies, defect levels, and projected density +of states (PDOS) for ground-state configurations of the +complex in several charge states, as well as for the next- +nearest-neighbor (NNN) complex. The results of these +calculations are shown in Figure 5. We find that the for- +mation energy of the NN CuZn-VS complex is lower than +that of the NNN complex. The formation energy calcu- +lations in Figure 5a indicate that the two stable charge +states are either negative (−1) or positive (+1), depend- +ing on the Fermi level, with the neutral (0) configuration +always lying higher in energy. This is in contrast to the +calculation for NN CuZn-VS in an unrelaxed ZnS lattice, +which significantly increases the formation energy of all +charge states, but particularly the negative and neutral +configurations. +Figure 5b shows the defect levels and their projections +at k = 0 for each charge state of the NN complex. These +calculations qualitatively agree with the relative arrange- +ment of levels in Figure 3b, with orange lines indicating +the positions of two, higher-energy states derived from +Zn dangling bonds surrounding the VS site, and green +lines indicating ten, lower-energy states derived from the +CuZn d-shell. The total density of states for pure ZnS +and for ZnS containing a neutral CuZn-VS complex are +included in SI Section 9. In the negatively-charged com- +plex, all twelve states are occupied, and the VS-derived +states are strongly mixed with the CuZn-derived states. +In the neutral complex, the VS-derived states are only +partially filled and are no longer mixed with the CuZn- +derived states. In the positively-charged complex, only +the CuZn-derived states are occupied and the VS-derived +states are no longer easily isolated; likely because they +have been pushed far into the conduction band; however, +this may be an artifact of well-known DFT bandgap er- +rors (the estimated bandgap in this calculation is 2.14 +eV, compared to the expected value around 3.6 eV), and +the VS states may still exist within the bandgap. +For the R-Cu transition depicted in Figure 3b to occur, +there must be a hole in the higher-energy CuZn states. +This hole is likely created by photo-ionization of a CuZn +electron into the conduction band based on the large +Stokes shift we observe between peak λem and peak λex +for R-Cu PL. It has also been proposed that this Stokes +shift is a result of lattice relaxation around the excited +complex when a CuZn electron is transferred directly to +a VS state[10, 29]. In this case, the excited VS level lies + +(a) +250ns-250μs +290 K +0-250ns +R-Cu +Intrinsic PL +0 +400 +500 +600 +700 +800 +EmissionWavelength,> +(nm) +(b) +2.5 +R-Cu +19 K +Normalized PL Intensity +6 +2 +1.5 +4 +0.5 +500 +600 +700 +800 +900 +Emission Wavelength, +(nm)6 +FIG. 5. +First-principles calculations (a) Formation en- +ergies for nearest- and next-nearest-neighbor (NN and NNN, +respectively) associations of CuZn and VS in ZnS, as a func- +tion of the Fermi level (solid curves). +Charge states with +respect to the ZnS lattice are indicated as -1, 0, and +1 for +the negatively charged, neutral, and positively charged com- +plex, respectively. +The dashed curve shows the formation +energy for the NN complex in an unrelaxed ZnS lattice. All +formation energy calculations are performed under zinc-rich +sulfur-poor thermodynamical stability conditions. (b) Defect +levels at k = 0 for three different charge states of the NN +CuZn-VS complex. Orange lines indicate VS-derived states, +and green lines indicate CuZn-derived states. Solid lines indi- +cate the valence band maximum (0 eV) and conduction band +minimum (2.14 eV). Dotted lines indicate the Fermi level. +above the conduction band minimum immediately after +excitation, and may therefore release an electron to the +conduction band before being lowered into the bandgap +following lattice relaxation. If the excited complex re- +sulting from either of the above processes contains an +electron in a Vs state, R-Cu emission can subsequently +occur. +Otherwise, an electron must be re-captured by +the complex into a Vs state for R-Cu emission to occur, +leading to a longer emission lifetime. Based on this ob- +servation, the electron occupations of the defect levels in +Figure 5b indicate how the charge state prior to excita- +tion determines the possible emission pathways, which +define the emission lifetime and the energy of the R-Cu +PL. +E. +R-Cu Emission Dynamics +Figure 6 shows how the spectral and temporal char- +acteristics of the R-Cu PL as a function of temperature +provide detailed insight regarding the emission mecha- +nisms and the states involved. At temperatures from 19 +K to 290 K, we measure the PL emission spectrum to find +the peak λem, and we then measure the corresponding PL +decay curve for that λem. The PL spectra at each tem- +perature are converted to energy units (see Methods) and +fit using Gaussian functions to extract the peak energies +(Eem) and integrated intensities. The emission spectra +at 19 K, 110 K, 150 K, 190 K, and 290 K are plotted +as examples in Figure 6a along with the corresponding +fit results. The spectral data for all measurement tem- +peratures are shown in the pseudocolor plot of the inset, +and fitted spectra for all measurement temperatures not +included in Figure 6a are shown in SI Section 6. +For +the PL decay measurements at each temperature (Fig- +ure 6b), we find that a tri-exponential decay model most +effectively describes the data compared to fitting with +one, two, or four exponential terms or a stretched expo- +nential decay function. The best-fit lifetime components, +τi for i = 1,2,3 at every temperature are plotted in Figure +6c, showing three, well-separated decay lifetimes. +At the lowest measurement temperature of 19 K (Fig- +ure 6d), we acquire PL decay curves across the R-Cu +emission band with 2.5 nm resolution, and we fit the data +to the tri-exponential model with fixed lifetimes based on +the fit results from Figure 6c. Figure 6d shows the PL +amplitudes corresponding to the decay components τ1, +τ2, and τ3 as a function of λem. +The fast component +τ1 likely reflects the tail of one or more peaks outside +the R-Cu emission band, with little spectral dependence. +However, separating the slow (τ3) and fast (τ2) PL con- +tributions this way reveals the presence of two distinct +peaks at 1.73 eV and 1.82 eV. The observation of en- +ergetically distinct PL peaks with different lifetimes is +consistent with the co-existence of two separate radia- +tive transitions. +Figure 6e shows the integrated PL intensity over the +R-Cu band and best-fit Eem at every temperature, ex- +tracted from Gaussian fits to the PL data in Figure 6a. +As noted previously, Eem blueshifts as the temperature +increases, and Figure 6e illustrates that the shift oc- +curs non-linearly, with a marked inflection between 100 +K and 200 K and saturation at both higher and lower +temperatures. Meanwhile, the R-Cu emission intensity +varies non-monotonically with temperature; it decreases +with increasing temperature from 19 K to 190 K, then +temporarily increases between 190 K and 210 K, be- +fore decreasing again at higher temperatures. The ini- +tial decrease in intensity is consistent with quenching +through thermally-activated non-radiative recombination +pathways and is typical for defect emission. The tempo- +rary increase in intensity with increasing temperature is +referred to as negative thermal quenching (NTQ) and +is occasionally observed in defect emission; for example, + +(a) +Formation Energy (eV) +6 +0 +5.5 +5 +4.5 ++1 +--NN,unrelaxed +4 +NNN.relaxed +NN,relaxed +3.5 +0 +0.5 +1 +1.5 +2 +Fermi Level (eV) +(b) +(Cuzn-Vs)1- (Cuzn-Vs)0 (Cuzn-Vs)1+ +2.14 +(eV) +Energy ( +cuZn +07 +FIG. 6. +R-Cu Emission Dynamics (a) PL spectra measured at temperatures ranging from 19 K-290 K (data points for five +representative temperatures are shown, with all data plotted in the inset) and Gaussian fits (solid traces). (b) Time-dependent +PL emission following pulsed excitation at λex=375 nm, measured at the peak PL wavelength for temperatures from 19 K to +290 K. (c) PL decay lifetimes extracted from a tri-exponential fit to the time-dependent PL curves in (d) at each measurement +temperature. (d) Amplitudes of each tri-exponential decay component at a single temperature (19 K) as a function of emission +energy. (e) Integrated emission intensity (black points) and peak energy (colored points) as a function of temperature, extracted +from the Gaussian fits of (a). Error bars represent 68% confidence intervals from fit results. The solid black curve is a fit to the +intensity data using the model described in the text. Red and blue shaded regions represent the relative temperature-dependent +intensities IA(T) and IB(T) from the best-fit model, and the dashed curve is a sum of the two emission energies resolved in (d), +weighted by their corresponding best-fit emission intensities. (f) Energy level diagram showing two manifolds of states inside +the ZnS bandgap with coupled relaxation processes, where radiative recombination from A to G results in 1.73 eV emission +and radiative recombination from B to G results in 1.82 eV emission. +in the case of the 2.65 eV PL (referred to as the YS1 +peak) from ZnS:I[30]. NTQ has generally been explained +by thermally-activated carrier transfer from lower- to +higher- energy emissive defect states. ZnS:Cu NCs have +been synthesized in water at room temperature with the +same Cu(CH3COO)2 precursor[8] and then subsequently +annealed at 450 ◦C, but the resulting red peak (600 nm at +room temperature) is not resolvable from other emission +peaks when the temperature is less than 220 K, making +it impossible to observe a similar NTQ or blueshift. +In Figure 6f, we propose an empirical model to cap- +ture both the temperature-dependent blueshift and the +NTQ of R-Cu emission. Motivated by the time-resolved +observations of Figure 6d, we include two radiative re- +combination transitions with emission energies at 1.73 eV +(A→G) and 1.82 eV (B→G), corresponding to two dis- +tinct excited-state configurations. These radiative tran- +sitions compete with thermally-activated, non-radiative +transitions that generally tend to quench the emission +at elevated temperatures. However, as carriers are ther- +mally excited from state A to state B at temperatures +with thermal energy corresponding to the energy offset +ETR, the faster B→G transition increasingly becomes the +dominant radiative recombination pathway, resulting in +blueshifted PL and temporary NTQ. This mechanism is +consistent with our observation that inflection points in +the PL intensity align with the onset and saturation of +the blueshift in Eem. +To quantify this model, we derive the following ana- +lytical expressions for the temperature-dependent PL in- +tensities, I(A) and I(B), from the radiative transitions +occurring from excited states A and B, respectively: +IA(T) = IA(0) +krA +krA + knrA + kTR +, +(1) +IB(T) = IB(0) +krB +krB + knrB ++ IA(0) +kTRkrB +krB + knrB +. +(2) +Here, krA and krB are the radiative recombination rates +shown in Figure 6f (solid lines), which are independent +of temperature. The terms knrA and knrB are rates for +non-radiative relaxation, and kTR is the rate for non- +radiative transfer between states A and B (dashed lines +in Figure 6f). These non-radiative rates are temperature- +dependent with the form kj = Γj exp(−Ej/kBT), where +Γj is a proportionality constant, Ej is the activation en- +ergy of the transition, and kB is Boltzmann’s constant. +See SI Section 7 for a derivation of these expressions. + +(b) 104 +(c) +200 +19K +50 +.110K +8 +19K +150K +(srl) +150 +103 +72 +6 +190K +290K +T3 +290K +Lifetime +250 +100 +4 +1.5 +Energy (eV) +2 +50 +101 +0 +0 +1.4 +1.6 +1.8 +2 +2.2 +0 +0.5 +0 +100 +200 +300 +Emission Energy (eV) +Time (ms) +Temperature (K) +(d) +(e) +(f) +,=1.134 μs +Component Amplitude +00 +1.82 +B +6 +T2=18.57 μs +Counts/10 +10 + ζ +� +Gπ +T (s, a) +���� +≤ |E(s,a)∼ˆµ +� +w(s, a)Gπ +T (s, a) +� +| ++ Vmax +���E(s,a)∼ρπ +T +� +I +�ρπ +T (s, a) +ˆµ(s, a) > ζ +����� +≤ |E(s,a)∼µ +� +w(s, a)Gπ +T (s, a) +� +| + ζVmaxTV (ˆµ, µ) ++ Vmax +����E(s,a)∼ρπ +T +� +I +�ρπ +T (s, a) +ˆµ(s, a) > ζ +������. +Hence, ignoring statistical error due to finite dataset, we can +upper bound the estimation error |η(T ⋆, π) − η(T, π)| by +γ +1 − γ +� +sup +g∈G +���ℓwπ,T (g, T) +��� + ζVmaxTV (ˆµ, µ) ++ VmaxE(s,a)∼ρπ +T +� +I +�ρπ +T (s, a) +ˆµ(s, a) > ζ +��� +. +(3) +Intuitively, the first term measures the error caused by im- +perfect dynamics T, the second term captures the estimation +error of the behavior distribution, and the last term comes +from truncating the density ratios. +Pessimistic Policy Optimization with Model +Misspecification +In this section, we explicitly consider misspecifications of +the function classes used for representing the value func- +tion and dynamics models (G and T , respectively). Most +prior theoretical work on model-based RL make strong as- +sumptions on the realizability of the dynamics model class. +For example, in the offline setting, Voloshin, Jiang, and Yue +(2021) focus on exact realizability of the dynamics model +(that is, T ⋆ ∈ T ). In the online setting, Jin et al. (2020) pro- +vide bounds where there is a linear regret term due to global +model misspecification. Their bounds require a T ∈ T such +that TV (T(s, a), T ⋆(s, a)) ≤ ϵ for all (s, a), even if the +state-action pair (s, a) is only visited under some poorly per- +forming policies. We now show that offline RL tasks can +need much weaker realizability assumptions on the dynam- +ics model class. +Our key observation is that for a given dynamics T and +policy π, computing the density ratio wπ,T is statistically +efficient. Note that to compute wπ,T we do not need any +samples from the true dynamics: instead, we only need to be +able to estimate the state-action density under a dynamics +model T for policy π. This allows us to explicitly utilize the +density ratio to get a relaxed realizability assumption. +Definition 2. The local value function error for a particular + +dynamics model T and policy π is defined as +ϵV (T, π) ≜ inf +g∈G |E(s,a)∼µ[wπ,T (s, a)(Es′∼T (s,a)[(g − V π +T ⋆)(s′)] ++ Es′∼T ⋆(s,a)[(g − V π +T ⋆)(s′)])]|. +The term ϵV measures the local misspecification of the +value function class – that is, the error between the true +value of the policy V π +T ⋆ and the best fitting value function +in the class G – only on the state-action pairs that policy π +visits under a particular potential dynamics model T. In con- +trast, previous results (Jin et al. 2020; Nachum et al. 2019; +Voloshin, Jiang, and Yue 2021) take the global maximum +error over all (reachable) (s, a), which can be much larger +than the local misspecification error ϵV (T, π). +With this local misspecification error, we can establish a +pessimistic estimation of the true reward. Let E be a high +probability event under which the loss function ℓwπ,T (T, g) +is close to its expectation (randomness comes from the +dataset D). In the Appendix, we define this event formally +and prove that Pr(E) ≥ 1 − δ. The following lemma gives +a lower bound on the true reward. Proofs, when omitted, are +in the Appendix. +Lemma 3. Let ι = log(2|G||T ||Π|/δ). For any dynamics +model T and policy π, we define +lb(T, π) = η(T, π) − +1 +1 − γ +� +sup +g∈G +ℓwπ,T (g, T) ++ VmaxE(s,a)∼ρπ +T +� +I +�ρπ +T (s, a) +ˆµ(s, a) > ζ +��� +. +(4) +Then, under the event E, we have +η(T ⋆, π) ≥ lb(T, π) − +1 +1 − γ +� +ϵV (T, π) +− 2Vmax +� +ζι/n − ζVmaxTV (ˆµ, µ) +� +. +(5) +We use this to define our offline policy selection Alg. 1. +Algorithm 1: Model-based Offline RL with Local +Misspecification Error +Require: estimated behavior distribution ˆµ, +truncation threshold ζ. +for π ∈ Π, T ∈ T do +Compute wπ,T (s, a) = I +� +ρπ +T (s,a) +ˆµ(s,a) ≤ ζ +� +ρπ +T (s,a) +ˆµ(s,a) . +Compute lb(T, π) by Eq. (4). +end +π ← argmaxπ∈Π maxT ∈T lb(T, π). +In contrast to existing offline model-based algorithms (Yu +et al. 2020; Voloshin, Jiang, and Yue 2021), our algorithm +optimizes the dynamics and policy jointly. For a given dy- +namics model, policy pair, our Alg. 1 computes the trun- +cated density ratio wπ,T which does not require collecting +new samples and then uses this to compute a lower bound +lb(T, π) (Eq. (4)). Finally, it outputs a policy that maximizes +the lower bound. We will shortly show this joint optimiza- +tion can lead to better offline learning. +Parameter ζ controls the truncation of the stationary im- +portance weights. Increasing ζ decreases the last term in the +lower bound objective lb(T, π), but it may also increase the +variance given the finite dataset size. Note that by setting +ζ = log(n) and letting n → ∞ (i.e., with infinite data), the +last term in Eq. (4) and the statistical error converge to zero. +Safe Policy Improvement +We now derive a novel safe policy improvement result, up +to the error terms given below. Intuitively this guarantees +that the policy returned by Alg. 1 will improve over the be- +havior policy when possible, which is an attractive property +in many applied settings. Note that recent work (Voloshin, +Jiang, and Yue 2021; Yu et al. 2020) on model-based of- +fline RL does not provide this guarantee when the dynamics +model class is misspecified. For a fixed policy π, define +ϵρ(π) ≜ infT ∈T E(s,a)∼ρπ +T ⋆ [TV (T(s, a), T ⋆(s, a))], (6) +ϵµ(π) ≜ E(s,a)∼ρπ +T ⋆ +� +I +�ρπ +T ⋆(s, a) +ˆµ(s, a) +> ζ/2 +�� +. +(7) +The term ϵρ measures the local misspecification error of the +dynamics model class in being able to represent the dynam- +ics for state-action pairs encountered for policy π. ϵµ rep- +resents that overlap of the dataset for an alternate policy π: +such a quantity is common in much of offline RL. In the fol- +lowing theorem, we prove that the true value of the policy +computed by Alg. 1 is lower bounded by that of the optimal +policy in the function class with some error terms. +Theorem 4. Consider a fixed parameter ζ. Let ˆπ be the pol- +icy computed by Alg. 1 and ˆT = argmaxT lb(T, ˆπ). Let +ι = log(2|G||T ||Π|/δ). Then, with probability at least 1−δ, +we have +η(T ⋆, ˆπ) ≥ sup +π +� +η(T ⋆, π) − 6Vmaxϵρ(π) +(1 − γ)2 +− Vmaxϵµ(π) +1 − γ +� +− ϵV ( ˆT, ˆπ) +1 − γ +− 4Vmax +1 − γ +� +ζι +n − 2ζVmaxTV (ˆµ, µ) +1 − γ +. +(8) +To prove Theorem 4, we prove the tightness of lb(T, π) — +the lower bound maxT lb(T, π) is at least as high as the true +value of the policy with some errors. Consequently, maxi- +mizing the lower bound also maximizes the true value of the +policy. Formally speaking, we have the following Lemma. +Lemma 5. For any policy π ∈ Π, under the event E we have +max +T ∈T lb(T, π) ≥ η(T ⋆, π) − 6Vmaxϵρ(π)/(1 − γ)2 +− +1 +1 − γ +� +Vmaxϵµ(π) − 2Vmax +� +ζι/n − ζVmaxTV (ˆµ, µ) +� +. +In the sequel, we present a proof sketch for Lemma 5. +In this proof sketch, we hide 1/(1 − γ) factors in the big- +O notation. For a fixed policy π, let ˆT be the minimizer of +Eq. (6). We prove Lemma 5 by analyzing the terms in the +definition of lb( ˆT, π) (Eq. (4)) separately. +i. Following the definition of Eq. (6), we can show that +∥ρπ +ˆT − ρπ +T ⋆∥1 +≤ +O(ϵρ(π)). Consequently we get +η( ˆT, π) ≥ η(T ⋆, π) − O(ϵρ(π)). + +ii. Recall that 0 ≤ g(s, a) ≤ Vmax for all g +∈ G. +Then for any (s, a) we have supg∈G |Es′∼ ˆT (s,a)g(s′) − +Es′∼T ⋆(s,a)g(s′)]| ≤ VmaxTV( ˆT(s, a), T ⋆(s, a)). Com- +bining the definition of ℓw(g, T), Eq. (6) and statistical +error we get supg∈G ℓwπ,T (g, T) ≤ � +O(ϵρ(π) + 1/√n + +VmaxTV (ˆµ, µ)) under event E. +iii. For the last term regarding distribution mismatch, we +combine Eq. (7) and Lemma 8. We can upper bound this +term by O(ϵρ(π) + ϵµ(π)). +iv. The final term arises due to the potential estimation error +in the behavior policy distribution. +Theorem 4 follows directly from combining Lemma 3 and +Lemma 5. Note that Theorem 4 accounts for estimation er- +ror in the behavior policy, misspecification in the dynamics +model class, and misspecification in the value function class, +the latter two in a more local, tighter form than prior work. +Illustrative Example +To build intuition of where our approach may yield benefits, +we provide an illustrative example where Alg. 1 has better +performance than existing approaches: an MDP whose state +space is partitioned into several parts. The model class is re- +stricted so that every model can only be accurate on one part +of the state space. When each deterministic policy only vis- +its one part of the state space, the local misspecification error +is small — for each policy, there exists a dynamics model +in the set which can accurately estimate the distribution of +states and actions visited under that policy. In contrast, if the +dynamics are learned to fit the whole state space, the estima- +tion error will be large. +More precisely, for a fixed parameter d, consider a MDP +where S = {s0, · · · , sd} ∪ {sg, sb}. There are d actions +denoted by a1, · · · , ad. The true dynamics are deterministic +and given by +T ⋆(s0, ai) = si, +T ⋆(si, aj) = +�sg, +if I [i = j] , +sb, +if I [i ̸= j] , +(9) +T ⋆(sg, ai) = sg, +T ⋆(sb, ai) = sb, ∀i ∈ [d]. +(10) +And the reward is r(s, ai) = I [s = sg] , ∀i ∈ [d]. +The transition function class T is parameterized by θ ∈ +Rd. For a fixed θ, the transition for states s1, . . . , sd is +Tθ(si, aj) = +�sg, +w.p. 1 +2 +� +1 + e⊤ +j θ +� +, +sb, +w.p. 1 +2 +� +1 − e⊤ +j θ +� +, +(11) +where ej is the j-th standard basis of Rd. The transitions +for states s0, sg, sb is identical to the true dynamics T ⋆. +But the transition model Tθ in the function class must use +the same parameter θ to approximate the dynamics in states +s1, · · · , sd, which makes it misspecified. +Decoupling learning the dynamics model and policy is +suboptimal. Most prior algorithms first learn a dynamics +model and then do planning with that model. However, note +here that the optimal action induced by MDP planning given +a particular Tθ is suboptimal (assuming a uniformly random +tie-breaking). This is because, for any given θ, that dynam- +ics model will estimate the dynamics of states s1, · · · , sd +as being identical, with identical resulting value functions. +Note this is suboptimality will occur in this example even if +the dataset is large and covers the state–action pairs visited +by any possible policy (ϵµ(π) = 0), the value function class +is tabular and can represent any value function ϵV = 0, the +behavior policy is known or the resulting estimation error is +small (TV (ˆµ, µ) = 0, and ζ = 0). In such a case, Theo- +rem 4 guarantees that with high probability, our algorithm +will learn the optimal policy because there exist couplings +of the dynamics models and optimal policies such that the +local misspecification error ϵρ = 0. This demonstrates that +prior algorithms (including MML (Voloshin, Jiang, and Yue +2021)) that decouple the learning of dynamics and policy +can be suboptimal. We now state this more formally: +Theorem 6. Consider any (possibly stochastic) algorithm +that outputs an estimated dynamics Tθ ∈ T . Let πθ be the +greedy policy w.r.t. Tθ (with ties breaking uniformly at ran- +dom). Then +max +π +η(T ⋆, π) − η(T ⋆, πθ) ≥ (A − 1)γ2 +A(1 − γ) . +(12) +As a side point, we also show that the off-policy estima- +tion error in Voloshin, Jiang, and Yue (2021) is large when +the dynamics model class is misspecified in Proposition 7. +We defer this result to the Appendix. +Experiments +While our primary contribution is theoretical, we now inves- +tigate how our method can be used for offline model-based +policy selection with dynamics model misspecification. We +first empirically evaluate our method on Linear-Quadratic +Regulator (LQR), a commonly used environment in optimal +control theory (Bertsekas et al. 2000), in order to assess: Can +Algorithm 1 return the optimal policy when we have both +model and distribution mismatch? We also evaluate our ap- +proach using D4RL (Fu et al. 2020), a standard offline RL +benchmark for continuous control tasks. Here we consider: +Given policies and dynamics pairs obtained using state-of- +the-art offline model-based RL methods with ensemble dy- +namics, does Alg. 1 allow picking the best policy, outper- +forming previous methods? +Linear-Quadratic Regulator (LQR) +LQR is defined by a linear transition dynamics st+1 = +Ast + Bat + η, where st ∈ Rn and at ∈ Rm are state and +action at time step t, respectively. η ∼ N(0, σ2I) is ran- +dom noise. LQR has a quadratic reward function R(s, a) = +−(sT Qs + aT Ra) with Q ∈ Rn×n and R ∈ Rm×m be- +ing positive semi-definite matrices, Q, R ⪰ 0. The op- +timal controller to maximize the sum of future rewards +�H +t=1 −(sT +t Qst+aT +t Rat) until the end of horizon H has the +form at = −Kst (K ∈ Rm×n) (Bertsekas et al. 2000). The +value function is also a quadratic function, V (s) = sT Us+q +for some constant q and positive semi-definite matrix U ⪰ 0. +In the experiment, the state space is [−1, 1]. +Misspecified transition classes. Consider a 1D version of +LQR with A(x) = (1 + x/10), B(x) = (−0.5 − x/10), + +-0.6 +-0.4 +-0.2 +0.0 +0.2 +0.4 +0.6 +K +-12 +-9 +-6 +-3 +0 +Return +Returns of different policies under true environment +Ours +MML +1 +2 +3 +4 +5 +Rank +0.0 +0.5 +1.0 +1.5 +2.0 +2.5 +3.0 +3.5 +4.0 +Negative of lower bound +(0.00,-0.25) +(0.00,0.00) +(0.00,0.25) +(0.20,0.25) +(0.20,-0.25) +Ranking imposed by Eq 6 on policy-model pair +(T, ) +Model Loss+Distribution Shift +0.1 +0.2 +0.3 +0.4 +MBLB +MML +MOPO +D4RL IQM +Normalized Score +Figure 1: Left: Visualization of true policy value η(T ⋆, π). Our algorithm picks the optimal policy, whereas MML picks a +suboptimal policy. Middle: Visualization of negative lower bounds lb(T, π) for different policies and models (indexed by the +values of (v, u)). Right: We show the interquartile mean (IQM) scores of two model-based lower bounds (MML and MBLB) +and a recent model-based policy learning algorithm (MOPO) on D4RL. +Q = 1, R = 1 and noise η ∼ N(0, 0.05). Our true dy- +namics is given by x∗ = 6, and the corresponding optimal +policy has K = −1.1. Function classes used by Alg. 1 are +finite and computed as follows: (i) the value function class +G contains the value functions of 1D LQR with parameters +x ∈ {2, 4, 10} and K ∈ {−1.1, −0.9, −0.7}; (ii) the transi- +tion class T is misspecified. We use the following transition +class Tu ∈ T parametrized by u, +Tu = +�st+1 = A(x∗)st − B(x∗)at, +st ∈ [u, u + 1], +st+1 = st, +otherwise, +with u ∈ {−0.75, −0.5, −0.25, 0, 0.25}. In other words, +the capacity of the transition class is limited – each func- +tion can only model the true dynamics of a part of the +states; (iii) the policy class is given by πv parameterized +by v, and πv(s) = −1.1(s − v) + N(0, 0.01) with v ∈ +{−0.6, −0.4, −0.2, 0, 0.2, 0.4, 0.6}. Intuitively, πv tries to +push the state toward s = v. +Since the state and action spaces are one dimensional, we +can compute the density ratio wπ,T efficiently by discretiza- +tion. The implementation details are deferred to Appendix. +Baseline. We compare our algorithm to minimizing MML +loss as described in the OPO algorithm of Voloshin, Jiang, +and Yue (2021, Algorithm 2). MML strictly outperformed +VAML (Farahmand, Barreto, and Nikovski 2017) as shown +in the experiments of (Voloshin, Jiang, and Yue 2021); +hence, we only compare to MML in our experiments. +Results. Figure 1 (Left) shows the return of different poli- +cies under the true environment. Our method picks the op- +timal policy for the true model, whereas MML picks the +wrong policy. In Figure 1 (Middle), we also visualize dif- +ferent terms in the definition of lb(T, π) (Eq. (5)). Note that +the model loss for different policy is different (model loss for +(v, u) = (0, 0) is significantly larger than (0.0.−0.25), even +if the dynamics are the same). This is because the model loss +is evaluated with a different density ratio. +This highlights the main benefit of our method over the +baseline. Since the model class is misspecified, maximizing +over the weight function w in the MML loss results in an +unrealistically large loss value for some models. However, +if the chosen policy does not visit the part of the state space +with a large error, there is no need to incur a high penalty. +D4RL +D4RL (Fu et al. 2020) is an offline RL standardized bench- +mark designed and commonly used to evaluate the progress +of offline RL algorithms. This benchmark is standard for +evaluating offline policy learning algorithms. Here, we use +a state-of-the-art policy learning algorithm MOPO (Yu et al. +2020) to propose a set of policy-transition model tuples – +for N policy hyperparameters and K transition models, we +can get M × K tuples: {(π1, T1), (π1, T2), ..., (πN, TK)}. +The MOPO algorithm learns an ensemble of transition mod- +els and randomly chooses one to sample trajectories during +each episode of training. Instead, we choose one transition +model to generate trajectories for the policy throughout the +entire training. In our experiment, we choose M = 1 and +K = 5, and train each tuple for 5 random seeds on Hopper +and HalfCheetah tasks (see Appendix). We then compute the +model-based lower bound for each (πi, Tj), and select the +optimal policy that has the highest lower bound. We learn the +dynamics using 300k iterations and we train each policy us- +ing 100k gradient iterations steps with SAC (Haarnoja et al. +2018) as the policy gradient algorithm, imitating MOPO (Yu +et al. 2020) policy gradient update. +MML. +Voloshin, Jiang, and Yue (2021) recommended +two practical implementations for computing MML lower +bounds. The implementation parametrizes w(s, a)V (s′) +jointly via a new function h(s, a, s′). We refer readers to +Prop 3.5 from Voloshin, Jiang, and Yue (2021) for a detailed +explanation. We describe how we parametrize this function +as follows: +• Linear: Voloshin, Jiang, and Yue (2021) showed that if +T, V, µ are all from the linear function classes, then a +model T that minimizes MML loss is both unique and +identifiable. This provides a linear parametrization of +h(s, a, s′) = ψ(s, a, s′)T θ, where ψ is a basis function. +We choose ψ to be either a squared basis function or a +polynomial basis function with degree 2. +• Kernel: Using a radial basis function (RBF) over S × + +Dataset Type +Env +MOPO +MML +(Squared) +MML +(Polynomial) +MML +(RKHS) +MBLB +(Linear) +MBLB +(Quad) +medium +hopper +175.4 +(95.3) +379.4 +(466.4) +375.6 +(459.5) +375.0 +(459.9) +591.7 +(523.1) +808.5 +(502.7) +med-expert +hopper +183.8 +(94.4) +160.9 +(131.5) +116.5 +(148.4) +61.4 +(35.0) +261.1 +(157.9) +242.5 +(134.0) +expert +hopper +80.4 +(63.4) +93.8 +(87.9) +61.6 +(61.9) +70.0 +(56.2) +118.2 +(61.6) +121.0 +(72.5) +medium +halfcheetah +599.8 +(668.4) +1967.6 +(1707.5) +2625.1 +(937.2) +3858.2 +(1231.1) +3290.4 +(1753.1) +2484.2 +(1526.8) +med-expert +halfcheetah +-486.6 +(48.1) +-188.5 +(137.2) +-77.0 +(252.5) +-343.2 +(225.2) +207.4 +(509.5) +192.8 +(432.0) +Table 2: We report the mean and (standard deviation) of selected policy’s simulator environment performance across 5 random +seeds. MML and MBLB are used as model-selection procedures where they select the best policy for each seed. Our method is +choosing the most near-optimal policy across the datasets. +A × S and computing K((s, a, s′), (˜s, ˜a, ˜s′)), Voloshin, +Jiang, and Yue (2021) showed that there exists a closed- +form solution to compute the maxima of the MML loss +(RKHS). Here, there is no need for any gradient update, +we only sample s′ from T. +MBLB (Ours). +For a continuous control task, we compute +our model-based lower bound (MBLB) as follows: +Compute η(T, π). Although it is reasonable to directly use a +value function V π +T trained during policy learning to compute +η(T, π), Paine et al. (2020); Kumar et al. (2021) points out +how this value function often severely over-estimates the ac- +tual discounted return. Therefore, we estimate the expected +value of policy π using the generalized advantage estima- +tor (GAE) (Schulman et al. 2016). For a sequence of tran- +sitions {st, at, r(st, at), st+1}t∈[0,N], it is defined as: At = +�t+N +t′=t (γλ)t′−t(r(st′, at′) + γVφ(st′+1) − Vφ(st′)), with λ +a fixed hyperparameter and Vφ the value function estimator +at the previous optimization iteration. Then, to estimate the +value function, we solve the non-linear regression problem +minimizeφ +�t+N +t′=t (Vφ(st′)− ˆVt′)2 where ˆVt = At+Vφ(st′). +We also provide a comparison to using the standard TD-1 +Fitted Q Evaluation (FQE) (Le, Voloshin, and Yue 2019) in- +stead in Table A1 in the Appendix. We find that using GAE +provides better policy evaluation estimations. +Behavior density modeling. We use a state-of-the-art nor- +malizing flow probability model to estimate the density of +state-action pairs (Papamakarios et al. 2021). For ρπ +T , we +sample 10,000 trajectories from T, π, and estimate the cor- +responding density; for the behavior distribution µ, we use +the given dataset D. We empirically decide the number of +training epochs that will give the model the best fit. +Compute supg∈G |ℓwπ,T (g, T)|. We parametrize g either as +a linear function of state: g(s) = mT s, or a quadratic func- +tion of the state: g(s) = sT Ms + b. We use gradient ascent +on ℓwπ,T (g, T) to maximize this objective. +Results. We report the results in Table 2. There is gen- +eral overlap across seeds for the performance between vari- +ous methods, but our approach has the best average perfor- +mance or is within the standard deviation of the best. We also +show that for different choices of how we parameterize the +w(s, a)V (s′) distribution (MML) and how we choose the +family of g test function (MBLB), we are selecting differ- +ent final policies. However, overall, MBLB can pick better- +performing final policies with two different parametrizations +while MML is choosing lower-performing policies with its +three parametrizations. We find that our approach of select- +ing among the set of policies computed from each of the +models used by MOPO consistently outperforms the policy +produced by MOPO in the considered tasks. +To summarize these results, we report the interquartile +mean (IQM) scores of each method in Figure 1 (Right). IQM +is an outlier robust metric proposed by Agarwal et al. (2021) +to compare deep RL algorithms. We create the plot by sam- +pling with replacement over all runs on all datasets 50000 +times. Though there is significant overlap, our method gen- +erally outperforms policies learned from MOPO. +Conclusion +There are many directions for future work. The current +lb(T, π) implementation with density ratio wπ,T (s, a) is not +differentiable: an interesting question is to make this differ- +entiable so that we can directly optimize a policy. Another +interesting question would be to construct estimators for the +local misspecification errors ϵρ, ϵµ and ϵV , which could be +used to refine the model class to optimize performance. +To conclude, this paper studies model-based offline rein- +forcement learning with local model misspecification errors, +and proves a novel safe policy improvement theorem. Our +theoretical analysis shows the benefit of this tighter analy- +sis and approach. We illustrate the advantage of our method +over prior work in a small linear quadratic example and +also demonstrate that it is competitive or has stronger per- +formance than recent model-based offline RL methods on +policy selection in a set of D4RL tasks. + +Acknowledgment +Research reported in this paper was sponsored in part by +NSF grant #2112926, the DEVCOM Army Research Lab- +oratory under Cooperative Agreement W911NF-17-2-0196 +(ARL IoBT CRA) and a Stanford Hoffman-Yee grant. The +views and conclusions contained in this document are those +of the authors and should not be interpreted as representing +the official policies, either expressed or implied, of the Army +Research Laboratory or the U.S.Government. The U.S. Gov- +ernment is authorized to reproduce and distribute reprints for +Government purposes notwithstanding any copyright nota- +tion herein. +References +Agarwal, R.; Schwarzer, M.; Castro, P. S.; Courville, A. C.; +and Bellemare, M. 2021. Deep reinforcement learning at the +edge of the statistical precipice. Advances in neural infor- +mation processing systems, 34: 29304–29320. +Argenson, A.; and Dulac-Arnold, G. 2020. Model-based of- +fline planning. arXiv preprint arXiv:2008.05556. +Bertsekas, D. P.; et al. 2000. Dynamic programming and +optimal control: Vol. 1. Athena scientific Belmont. +Chen, J.; and Jiang, N. 2019. Information-Theoretic Consid- +erations in Batch Reinforcement Learning. In International +Conference on Machine Learning, 1042–1051. +Curi, S.; Berkenkamp, F.; and Krause, A. 2020. Efficient +Model-Based Reinforcement Learning through Optimistic +Policy Search and Planning. Advances in Neural Informa- +tion Processing Systems, 33. +Delgrange, F.; Nowe, A.; and P´erez, G. A. 2022. Distilla- +tion of RL Policies with Formal Guarantees via Variational +Abstraction of Markov Decision Processes. +Farahmand, A.-m.; Barreto, A.; and Nikovski, D. 2017. +Value-aware loss function for model-based reinforcement +learning. In Artificial Intelligence and Statistics, 1486–1494. +PMLR. +Farahmand, A. M.; Munos, R.; and Szepesv´ari, C. 2010. Er- +ror propagation for approximate policy and value iteration. +In Advances in Neural Information Processing Systems. +Fu, J.; Kumar, A.; Nachum, O.; Tucker, G.; and Levine, S. +2020. +D4rl: Datasets for deep data-driven reinforcement +learning. arXiv preprint arXiv:2004.07219. +Fu, J.; and Levine, S. 2021. +Offline Model-Based Opti- +mization via Normalized Maximum Likelihood Estimation. +arXiv preprint arXiv:2102.07970. +Fujimoto, S.; van Hoof, H.; Meger, D.; et al. 2018. +Ad- +dressing function approximation error in actor-critic meth- +ods. Proceedings of Machine Learning Research, 80. +Gelada, C.; Kumar, S.; Buckman, J.; Nachum, O.; and Belle- +mare, M. G. 2019. Deepmdp: Learning continuous latent +space models for representation learning. In International +Conference on Machine Learning, 2170–2179. PMLR. +Haarnoja, T.; Zhou, A.; Abbeel, P.; and Levine, S. 2018. +Soft actor-critic: Off-policy maximum entropy deep rein- +forcement learning with a stochastic actor. arXiv preprint +arXiv:1801.01290. +Jiang, N.; and Huang, J. 2020. Minimax confidence inter- +val for off-policy evaluation and policy optimization. arXiv +preprint arXiv:2002.02081. +Jin, C.; Yang, Z.; Wang, Z.; and Jordan, M. I. 2020. Provably +efficient reinforcement learning with linear function approx- +imation. In Conference on Learning Theory, 2137–2143. +Kakade, S.; and Langford, J. 2002. Approximately Optimal +Approximate Reinforcement Learning. In Proceedings of +the Nineteenth International Conference on Machine Learn- +ing, 267–274. Morgan Kaufmann Publishers Inc. +Kallus, N.; and Uehara, M. 2020. Double Reinforcement +Learning for Efficient Off-Policy Evaluation in Markov De- +cision Processes. J. Mach. Learn. Res., 21: 167–1. +Kidambi, R.; Rajeswaran, A.; Netrapalli, P.; and Joachims, +T. 2020. Morel: Model-based offline reinforcement learning. +arXiv preprint arXiv:2005.05951. +Kumar, A.; Fu, J.; Soh, M.; Tucker, G.; and Levine, S. 2019. +Stabilizing Off-Policy Q-Learning via Bootstrapping Error +Reduction. Advances in Neural Information Processing Sys- +tems, 32: 11784–11794. +Kumar, A.; Singh, A.; Tian, S.; Finn, C.; and Levine, S. +2021. A Workflow for Offline Model-Free Robotic Rein- +forcement Learning. In 5th Annual Conference on Robot +Learning. +Kumar, A.; Zhou, A.; Tucker, G.; and Levine, S. 2020. Con- +servative Q-Learning for Offline Reinforcement Learning. +In Larochelle, H.; Ranzato, M.; Hadsell, R.; Balcan, M. F.; +and Lin, H., eds., Advances in Neural Information Process- +ing Systems, volume 33, 1179–1191. Curran Associates, Inc. +Le, H.; Voloshin, C.; and Yue, Y. 2019. Batch policy learning +under constraints. In International Conference on Machine +Learning, 3703–3712. PMLR. +Liu, Q.; Li, L.; Tang, Z.; and Zhou, D. 2018a. Breaking the +curse of horizon: infinite-horizon off-policy estimation. In +Proceedings of the 32nd International Conference on Neural +Information Processing Systems, 5361–5371. +Liu, Y.; Bacon, P.-L.; and Brunskill, E. 2020. Understanding +the curse of horizon in off-policy evaluation via conditional +importance sampling. In International Conference on Ma- +chine Learning, 6184–6193. PMLR. +Liu, Y.; Gottesman, O.; Raghu, A.; Komorowski, M.; Faisal, +A.; Doshi-Velez, F.; and Brunskill, E. 2018b. Representa- +tion Balancing MDPs for Off-Policy Policy Evaluation. Ad- +vances in neural information processing systems. +Liu, Y.; Swaminathan, A.; Agarwal, A.; and Brunskill, E. +2020. +Provably Good Batch Off-Policy Reinforcement +Learning Without Great Exploration. Advances in Neural +Information Processing Systems, 33. +Malik, A.; Kuleshov, V.; Song, J.; Nemer, D.; Seymour, H.; +and Ermon, S. 2019. +Calibrated model-based deep rein- +forcement learning. In International Conference on Machine +Learning, 4314–4323. PMLR. +Matsushima, T.; Furuta, H.; Matsuo, Y.; Nachum, O.; and +Gu, S. 2020. +Deployment-efficient reinforcement learn- +ing via model-based offline optimization. +arXiv preprint +arXiv:2006.03647. + +Munos, R. 2003. Error bounds for approximate policy itera- +tion. In ICML, volume 3, 560–567. +Nachum, O.; Chow, Y.; Dai, B.; and Li, L. 2019. DualDICE: +Behavior-Agnostic Estimation of Discounted Stationary +Distribution Corrections. +Paine, T. L.; Paduraru, C.; Michi, A.; Gulcehre, C.; Zolna, +K.; Novikov, A.; Wang, Z.; and de Freitas, N. 2020. Hyper- +parameter selection for offline reinforcement learning. arXiv +preprint arXiv:2007.09055. +Papamakarios, G.; Nalisnick, E. T.; Rezende, D. J.; Mo- +hamed, S.; and Lakshminarayanan, B. 2021. Normalizing +Flows for Probabilistic Modeling and Inference. J. Mach. +Learn. Res., 22(57): 1–64. +Quillen, D.; Jang, E.; Nachum, O.; Finn, C.; Ibarz, J.; and +Levine, S. 2018. Deep reinforcement learning for vision- +based robotic grasping: A simulated comparative evaluation +of off-policy methods. +In 2018 IEEE International Con- +ference on Robotics and Automation (ICRA), 6284–6291. +IEEE. +Schulman, J.; Moritz, P.; Levine, S.; Jordan, M.; and Abbeel, +P. 2016. High-dimensional continuous control using gener- +alized advantage estimation. In International Conference on +Learning Representations. +Singh, A.; Yu, A.; Yang, J.; Zhang, J.; Kumar, A.; and +Levine, S. 2020. COG: Connecting New Skills to Past Expe- +rience with Offline Reinforcement Learning. arXiv preprint +arXiv:2010.14500. +Swaminathan, A.; and Joachims, T. 2015. +Batch learn- +ing from logged bandit feedback through counterfactual risk +minimization. The Journal of Machine Learning Research, +16(1): 1731–1755. +Swazinna, P.; Udluft, S.; and Runkler, T. 2020. Overcoming +Model Bias for Robust Offline Deep Reinforcement Learn- +ing. arXiv preprint arXiv:2008.05533. +Thomas, P.; Theocharous, G.; and Ghavamzadeh, M. 2015. +High confidence policy improvement. In International Con- +ference on Machine Learning, 2380–2388. PMLR. +Thomas, P. S.; da Silva, B. C.; Barto, A. G.; Giguere, S.; +Brun, Y.; and Brunskill, E. 2019. +Preventing undesirable +behavior of intelligent machines. Science, 366(6468): 999– +1004. +Thomas, P. S.; Theocharous, G.; Ghavamzadeh, M.; Du- +rugkar, I.; and Brunskill, E. 2017. Predictive Off-Policy Pol- +icy Evaluation for Nonstationary Decision Problems, with +Applications to Digital Marketing. In AAAI, 4740–4745. +Uehara, M.; Huang, J.; and Jiang, N. 2020. Minimax weight +and q-function learning for off-policy evaluation. +In In- +ternational Conference on Machine Learning, 9659–9668. +PMLR. +van Hasselt, H. P.; Hessel, M.; and Aslanides, J. 2019. When +to use parametric models in reinforcement learning? In Wal- +lach, H.; Larochelle, H.; Beygelzimer, A.; d'Alch´e-Buc, F.; +Fox, E.; and Garnett, R., eds., Advances in Neural Informa- +tion Processing Systems, volume 32. Curran Associates, Inc. +Voloshin, C.; Jiang, N.; and Yue, Y. 2021. Minimax Model +Learning. In International Conference on Artificial Intelli- +gence and Statistics, 1612–1620. PMLR. +Xie, T.; and Jiang, N. 2020. +Batch value-function +approximation with only realizability. +arXiv preprint +arXiv:2008.04990. +Xie, T.; Ma, Y.; and Wang, Y. 2019. Towards optimal off- +policy evaluation for reinforcement learning with marginal- +ized importance sampling. Advances in neural information +processing systems. +Yin, M.; and Wang, Y.-X. 2020. +Asymptotically effi- +cient off-policy evaluation for tabular reinforcement learn- +ing. In International Conference on Artificial Intelligence +and Statistics, 3948–3958. PMLR. +Yu, T.; Kumar, A.; Rafailov, R.; Rajeswaran, A.; Levine, +S.; and Finn, C. 2021. +Combo: Conservative of- +fline model-based policy optimization. +arXiv preprint +arXiv:2102.08363. +Yu, T.; Thomas, G.; Yu, L.; Ermon, S.; Zou, J.; Levine, S.; +Finn, C.; and Ma, T. 2020. MOPO: Model-based Offline +Policy Optimization. arXiv preprint arXiv:2005.13239. +Zhan, X.; Zhu, X.; and Xu, H. 2021. +Model-Based Of- +fline Planning with Trajectory Pruning. +arXiv preprint +arXiv:2105.07351. +Zhang, R.; Dai, B.; Li, L.; and Schuurmans, D. 2019. Gen- +DICE: Generalized Offline Estimation of Stationary Values. +In International Conference on Learning Representations. +Zhang, S.; Liu, B.; and Whiteson, S. 2020. Gradientdice: +Rethinking generalized offline estimation of stationary val- +ues. +In International Conference on Machine Learning, +11194–11203. PMLR. + +Missing Proofs +High Probability Events +In this section, we introduce concentration inequalities and define the high probability events. +Define the following quantities +L(π, g, T) = E(s,a,s′)∼µ +� +wπ,T (s, a)(Ex∼T (s,a)[g(x)] − Ex∼T ⋆(s,a)[g(x)]) +� +, +(13) +l(π, g, T) = E(s,a,s′)∼D[wπ,T (s, a)(f g +T (s, a) − g(s′))]. +(14) +Recall that ι = log(2|G||T ||Π|/δ). Consider the event +E = +� +|L(π, g, T) − l(π, g, T)| ≤ 2Vmax +� +ζι +n , +∀π ∈ Π, g ∈ G, T ∈ T +� +. +(15) +In the following, we show that +Pr (E) ≥ 1 − δ. +(16) +Recall that D = {(si, ai, s′ +i)}n +i=1 where (si, ai, s′ +i) ∼ µ are i.i.d. samples from distribution µ. For fixed π ∈ Π, g ∈ G, T ∈ T , +we have E[ˆl(π, g, T)] = l(π, g, T). Meanwhile, note that +|wπ,T (s, a)(f g +T (s, a) − g(s′))| ≤ ζVmax, +(17) +E(s,a,s′)∼µ[wπ,T (s, a)2(f g +T (s, a) − g(s′))2] +(18) +≤ E(s,a,s′)∼ρπ +T [wπ,T (s, a)(f g +T (s, a) − g(s′))2] ≤ V 2 +maxζ. +(19) +By Bernstein inequality, with probability at least 1 − δ/(|G||T ||Π|), +|L(π, g, T) − l(π, g, T)| ≤ +� +2V 2 +maxζ log(2|G||T ||Π|/δ) +n ++ ζVmax +3n +log(2|G||T ||Π|/δ) +(20) +Recall that ι = log(2|G||T ||Π|/δ). When n ≥ ζ we have +|L(π, g, T) − l(π, g, T)| ≤ 2Vmax +� +ζι +n . +(21) +Note that when n < ζ, E trivially holds. As a result, applying union bound we prove Eq. (16). +Proof of Lemma 3 +Proof. In the following, we consider a fixed policy π and dynamics T ∈ T . We use w to denote wπ,T when the context is clear. +By basic algebra we get +���E(s,a)∼ρπ +T [Gπ +T (s, a)] +��� +(22) +≤ +����E(s,a)∼ρπ +T +� +I +�ρπ +T (s, a) +ˆµ(s, a) ≤ ζ +� +Gπ +T (s, a) +����� + E(s,a)∼ρπ +T +� +I +�ρπ +T (s, a) +ˆµ(s, a) > ζ +� +|Gπ +T (s, a)| +� +(23) +≤ +��E(s,a)∼ˆµ[w(s, a)Gπ +T (s, a)] +�� + VmaxE(s,a)∼ρπ +T +� +I +�ρπ +T (s, a) +ˆµ(s, a) > ζ +�� +. +(24) +Note that +E(s,a)∼ˆµ[w(s, a)Gπ +T (s, a)] = +� +s,a +ˆµ(s, a)w(s, a)Gπ +T (s, a) +(25) += +� +s,a +(ˆµ(s, a) − µ(s, a) + µ(s, a))w(s, a)Gπ +T (s, a) +(26) += +� +s,a +µ(s, a)w(s, a)Gπ +T (s, a) + +� +s,a +(ˆµ(s, a) − µ(s, a))w(s, a)Gπ +T (s, a) +(27) +≤ E(s,a)∼µ[w(s, a)Gπ +T (s, a)] + +� +s,a +|ˆµ(s, a) − µ(s, a)|ζVmax +(28) +≤ E(s,a)∼µ[w(s, a)Gπ +T (s, a)] + ζVmaxTV (ˆµ, µ) . +(29) + +Continuing Eq. (24) we get +���E(s,a)∼ρπ +T [Gπ +T (s, a)] +��� +(30) +≤ +��E(s,a)∼µ[w(s, a)Gπ +T (s, a)] +�� + VmaxE(s,a)∼ρπ +T +� +I +�ρπ +T (s, a) +ˆµ(s, a) > ζ +�� ++ ζVmaxTV (ˆµ, µ) . +(31) +Consequently, in the following we prove +��E(s,a)∼µ[w(s, a)Gπ +T (s, a)] +�� ≤ sup +g∈G +ℓw(g, T) + ϵV (T, π) + 2Vmax +� +ζι +n . +Let Lw(g, T) = +��E(s,a,s′)∼µ +� +w(s, a)(Ex∼T (s,a)[g(x)] − Ex∼T ⋆(s,a)[g(x)]) +��� be the population error. Recall that under the high +probability event E in Eq. (15), for any g ∈ G and T ∈ T +|Lw(g, T) − ℓw(g, T)| ≤ 2Vmax +� +ζι +n . +(32) +Now by the definition of Gπ +T (s, a), for any g ∈ G we have +��E(s,a)∼µ[w(s, a)Gπ +T (s, a)] +�� +(33) += +��E(s,a)∼µ +� +w(s, a) +� +Es′∼T (s,a)[V π +T ⋆(s′)] − Es′∼T ⋆(s,a)[V π +T ⋆(s′)] +���� +(34) +≤ +��E(s,a)∼µ +� +w(s, a) +� +Es′∼T (s,a)[g(s′)] − Es′∼T ⋆(s,a)[g(s′)] +���� +(35) ++ +��E(s,a)∼µ +� +w(s, a) +� +Es′∼T (s,a)[g(s′) − V π +T ⋆(s′)] + Es′∼T ⋆(s,a)[g(s′) − V π +T ⋆(s′)] +����. +(36) +Define +ˆg = argmin +g∈G +��E(s,a)∼µ +� +w(s, a) +� +Es′∼T (s,a)[g(s′) − V π +T ⋆(s′)] + Es′∼T ⋆(s,a)[g(s′) − V π +T ⋆(s′)] +����. +Since g is arbitrarily, continuing Eq. (36) and recalling Definition 2 we get +��E(s,a)∼µ[w(s, a)Gπ +T (s, a)] +�� +(37) +≤ +��E(s,a)∼µ +� +w(s, a) +� +Es′∼T (s,a)[ˆg(s′)] − Es′∼T ⋆(s,a)[ˆg(s′)] +���� + ϵV (T, π) +(38) +≤ sup +g∈G +��E(s,a)∼µ +� +w(s, a) +� +Es′∼T (s,a)[g(s′)] − Es′∼T ⋆(s,a)[g(s′)] +���� + ϵV (T, π). +(39) +Combining Eq. (39) and Eq. (32) we get, +��E(s,a)∼µ[w(s, a)Gπ +T (s, a)] +�� ≤ sup +g∈G +Lw(g, T) + ϵV (T, π) +(40) +≤ sup +g∈G +ℓw(g, T) + ϵV (T, π) + 2Vmax +� +ζι +n . +(41) +Now plugging in Eq. (31) we get, +���E(s,a)∼ρπ +T [Gπ +T (s, a)] +��� +≤ sup +g∈G +ℓw(g, T) + ϵV (T, π) + 2Vmax +� +ζι +n + VmaxE(s,a)∼ρπ +T +� +I +�ρπ +T (s, a) +ˆµ(s, a) > ζ +�� ++ ζVmaxTV (ˆµ, µ) . +Finally, combining with simulation lemma (Lemma 1) we finish the proof. +Proof of Lemma 5 +Proof of Lemma 5. Consider a fixed π ∈ Π. When the context is clear, we use ϵρ and ϵµ to denote ϵρ(π) and ϵµ(π) respectively. +Consider the dynamics +ˆT = argmin +T ∈T +E(s,a)∼ρπ +T ⋆ [TV (T(s, a), T ⋆(s, a))]. +(42) +By the definition of ϵρ we get +E(s,a)∼ρπ +T ⋆ +� +TV +� +ˆT(s, a), T ⋆(s, a) +�� +≤ ϵρ. + +Applying Lemma 9 we get +��ρπ +ˆT − ρπ +T ⋆ +�� +1 ≤ +ϵρ +(1 − γ). +(43) +The rest of the proof is organized in the following way. We bound the three terms in RHS of Eq. (4) respectively as follows +η( ˆT, π) ≥ η(T ⋆, π) − Vmax +1 − γ ϵρ, +(44) +sup +g∈G +ℓw(g, ˆT) ≤ 2Vmaxϵρ +1 − γ ++ 2Vmax +� +ζι +n + ζVmaxTV (ˆµ, µ) , +(45) +E(s,a)∼ρπ +ˆ +T +� +I +� +ρπ +ˆT (s, a) +ˆµ(s, a) > ζ +�� +≤ ϵµ + +3ϵρ +(1 − γ). +(46) +Then we combine these inequalities together to prove Lemma 5. +Step 1: Proving Eq. (44). Note that for every T and π, η(T, π) = +1 +1−γ ⟨ρπ +T , r⟩ where r is the reward function. Then we have +η(T ⋆, π) − η( ˆT, π) = +1 +1 − γ +� +ρπ +T ⋆ − ρπ +ˆT , r +� +≤ +1 +1 − γ +��ρπ +T ⋆ − ρπ +ˆT +�� +1 ∥r∥∞ . +(47) +Combining with Eq. (43) we get Eq. (44). +Step 2: Proving Eq. (45). For any fixed function g ∈ G. Let w = wπ, ˆT be a shorthand. Define +Lw(g, T) = +��E(s,a,s′)∼µ[w(s, a)(f g +T (s, a) − g(s′))] +�� +to be the population error. Then we have +Lw(g, ˆT) += +���E(s,a)∼µ +� +w(s, a) +� +Es′∼ ˆT (s,a)[g(s′)] − Es′∼T ⋆(s,a)[g(s′)] +����� +≤ +���E(s,a)∼ˆµ +� +w(s, a) +� +Es′∼ ˆT (s,a)[g(s′)] − Es′∼T ⋆(s,a)[g(s′)] +����� + ζVmaxTV (ˆµ, µ) += +�����E(s,a)∼ρπ +ˆ +T +� +I +� +ρπ +ˆT (s, a) +ˆµ(s, a) ≤ ζ +� � +Es′∼ ˆT (s,a)[g(s′)] − Es′∼T ⋆(s,a)[g(s′)] +������� + ζVmaxTV (ˆµ, µ) +≤ VmaxE(s,a)∼ρπ +ˆ +T +� +I +� +ρπ +ˆT (s, a) +ˆµ(s, a) ≤ ζ +� +TV +� +ˆT(s, a), T ⋆(s, a) +�� ++ ζVmaxTV (ˆµ, µ) +≤ VmaxE(s,a)∼ρπ +T ⋆ +� +TV +� +ˆT(s, a), T ⋆(s, a) +�� ++ Vmaxϵρ +1 − γ + ζVmaxTV (ˆµ, µ) +(By Eq. (43)) +≤ Vmax +� +ϵρ + +ϵρ +1 − γ +� ++ ζVmaxTV (ˆµ, µ) ≤ 2Vmaxϵρ +1 − γ ++ ζVmaxTV (ˆµ, µ) . +Under event E we have +ℓw(g, ˆT) ≤ Lw(g, ˆT) + 2Vmax +� +ζι +n . +(48) +Because g is arbitrary, we get Eq. (45). +Step 3: Proving Eq. (46). Note that +E(s,a)∼ρπ +ˆ +T +� +I +�ρˆπ +T (s, a) +ˆµ(s, a) > ζ +�� +(49) += E(s,a)∼ρπ +ˆ +T +� +I +� +ρπ +ˆT (s, a) +ρπ +T ⋆(s, a) +ρπ +T ⋆(s, a) +ˆµ(s, a) +> ζ +�� +(50) +≤ E(s,a)∼ρπ +ˆ +T +� +I +� +ρπ +ˆT (s, a) +ρπ +T ⋆(s, a) > 2 +�� ++ E(s,a)∼ρπ +ˆ +T +� +I +�ρπ +T ⋆(s, a) +ˆµ(s, a) +> ζ/2 +�� +. +(51) + +With the help of Lemma 8, we can upper bound the first term of Eq. (51) by the total variation between ρπ +ˆT and ρπ +T ⋆. Combining +Lemma 8 and Eq. (43) we get +E(s,a)∼ρπ +ˆ +T +� +I +� ρˆπ +T (s, a) +ρπ +T ⋆(s, a) > 2 +�� +≤ +2ϵρ +1 − γ . +(52) +On the other hand, by combining Eq. (43) and the definition of ϵµ we get +E(s,a)∼ρπ +ˆ +T +� +I +�ρπ +T ⋆(s, a) +ˆµ(s, a) +> ζ/2 +�� +≤ E(s,a)∼ρπ +T ⋆ +� +I +�ρπ +T ⋆(s, a) +ˆµ(s, a) +> ζ/2 +�� ++ +ϵρ +1 − γ ≤ ϵµ + +ϵρ +1 − γ . +Consequently, we get Eq. (46). +Now we stitch Eq. (43), Eq. (44) and Eq. (45) together. Combining with the definition of lb( ˆT, π) in Eq. (4), we have +lb( ˆT, π) = η( ˆT, π) − +1 +1 − γ +� +sup +g∈G +���ℓwπ,T (g, ˆT) +��� + VmaxE(s,a)∼ρπ +T +� +I +� +ρπ +ˆT (s, a) +ˆµ(s, a) > ζ +�� ++ 2ζVmaxTV (ˆµ, µ) +� +≥ η(T ⋆, π) − Vmaxϵρ +1 − γ − 2Vmaxϵρ +(1 − γ)2 − 2Vmax +1 − γ +� +ζι +n − Vmax +1 − γ +� 3ϵρ +1 − γ + ϵµ +� +− 2ζVmaxTV (ˆµ, µ) +1 − γ +≥ η(T ⋆, π) − 6Vmaxϵρ +(1 − γ)2 − Vmaxϵµ +1 − γ − 2Vmax +1 − γ +� +ζι +n − 2ζVmaxTV (ˆµ, µ) +1 − γ +. +Note that ˆT ∈ T , we have +max +T ∈T lb(T, π) ≥ lb( ˆT, π), +(53) +which finishes the proof. +Proof of Theorem 4 +Proof of Theorem 4. Let ˆT, ˆπ ← argmaxT ∈T ,π∈Π lb(T, π) be the dynamics and policy that maximizes the lower bound. Note +that ˆπ is the output of Algorithm 1. +Now under the event E, by Lemma 5, for any policy π we have +max +T ∈T lb(T, π) ≥ η(T ⋆, π) − 6Vmaxϵρ(π) +(1 − γ)2 +− Vmaxϵµ(π) +1 − γ +− 2Vmax +1 − γ +� +ζι +n − 2ζVmaxTV (ˆµ, µ) +1 − γ +. +(54) +On the other hand, under the event E, by Lemma 3 we get +η(T ⋆, π) ≥ lb( ˆT, ˆπ) − ϵV ( ˆT, ˆπ) +1 − γ +− 2Vmax +1 − γ +� +ζι +n . +(55) +By the optimality of ˆT, ˆπ, we have lb( ˆT, ˆπ) ≥ supT ∈T lb(T, π) for any π. As a result, combining with Eq. (54) and Eq. (55) +we get +η(T ⋆, ˆπ) ≥ lb( ˆT, ˆπ) − ϵV ( ˆT, ˆπ) +1 − γ +− 2Vmax +1 − γ +� +ζι +n +(56) +≥ sup +π∈Π +sup +T ∈T +lb(T, π) − ϵV ( ˆT, ˆπ) +1 − γ +− 2Vmax +1 − γ +� +ζι +n +(57) +≥ sup +π +� +η(T ⋆, π) − 6Vmaxϵρ(π) +(1 − γ)2 +− Vmaxϵµ(π) +1 − γ +� +− ϵV ( ˆT, ˆπ) +1 − γ +− 4Vmax +1 − γ +� +ζι +n − 2ζVmaxTV (ˆµ, µ) +1 − γ +. +(58) +Proof of Theorem 6 +Proof of Theorem 6. Note that for any fixed θ ∈ Rd, the transition function for state s1, · · · , sd are identical. As a result, +Qπ +Tθ(si, aj) = Qπ +Tθ(si′, aj), ∀i, i′ ∈ [d] for any policy π. Recall that πθ is the optimal policy of Tθ (with ties breaking uniformly +at random). Therefore, πθ(s0) = 1/A and πθ(si) = πθ(si′), ∀i, i′ ∈ [d]. +By the definition of the ground-truth dynamics T ⋆ in Eqs. (9)-(10), we have Qπθ +T ⋆(si, aj) = I [i = j] +γ +1−γ . Therefore, +η(T ⋆, πθ) = γ +A +d +� +i=1 +Qπθ +T ⋆(si, πθ(si)) ≤ γ +A max +a +d +� +i=1 +Qπθ +T ⋆(si, a) ≤ +γ2 +A(1 − γ). +(59) + +Since maxπ η(T ⋆, π) = +γ2 +1−γ , we have +max +π +η(T ⋆, π) − η(T ⋆, πθ) ≥ (A − 1)γ2 +A(1 − γ) . +OPE Error of MML +In this section, we show that the off-policy estimation error in Voloshin, Jiang, and Yue (2021) can be large when the dynamics +model class is misspecified in Proposition 7. +The MML algorithm requires an density ratio class W : S × A → R+ and prove that when wπ,T ∈ W and V π +T ⋆ ∈ G, +|η(T, π) − η(T ⋆, π)| ≤ γ min +T ∈T +max +w∈W,g∈G |ℓw(g, T)|. +(60) +Unfortunately, this is suboptimal since the error may not converge to zero even given infinite data: +Proposition 7. Consider the set the dynamics class T = {Tθ : θ ∈ Sd−1, θi ≥ 0, ∀i ∈ [d]}. Let Π = {πx : x ∈ [d]} where +πx(si) = ax for 0 ≤ i ≤ d and πx(sg) = πx(sb) = a1. Let W be the density ratio class induced by π running on {T ⋆} ∪ T . +Even with G = {V πx +T ⋆ : x ∈ [d]} and infinite number of data, we have +min +T ∈T +max +w∈W,g∈G |ℓw(g, T)| ≥ +γ +8(1 − γ). +(61) +In contrast, the error terms in Theorem 4 converge to 0 when ζ > poly(d, 1/(1 − γ)) and n → ∞ in the same setting. +Proof of Proposition 7. Recall that we set the dynamics class T = {Tθ : θ ∈ Sd−1}. Let Π = {πx : x ∈ [d]} where +πx(si) = ax for 0 ≤ i ≤ d and πx(sg) = πx(sb) = a1. Let W be the density ratio induced by π. For any x ∈ [d], we can +compute +ρπx +T ⋆(s0, ai) = (1 − γ)I [i = x] , +ρπx +T ⋆(si, aj) = γ(1 − γ)I [i = x, j = x] , +(62) +ρπx +T ⋆(sg, aj) = γ2(1 − γ)I [j = 1] , +ρπx +T ⋆(sb, aj) = 0. +(63) +Let µ be uniform distribution over 3d + d2 state action pairs. Then we can define W = {wx : x ∈ [d]} where wx(s, a) ≜ +1 +1−γ +ρπx +T ⋆(s,a) +µ(s,a) . +Now for any fixed θ ∈ Sd−1, θ ≥ 0, consider +max +w∈W,g∈G |ℓw(g, Tθ)|. +(64) +Let x = argmini θi. We claim that +ℓwx(V πx +T ⋆ , Tθ) ≥ +γ +8(1 − γ). +Indeed, with infinite data we have +ℓwx(V πx +T ⋆ , Tθ) = +��E(s,a)∼µ +� +wx(s, a) +� +Es′∼T (s,a)[V πx +T ⋆ (s′)] − Es′∼T ⋆(s,a)[V πx +T ⋆ (s′)] +���� += +1 +1 − γ +���E(s,a)∼ρπx +T ⋆ +�� +Es′∼T (s,a)[V πx +T ⋆ (s′)] − Es′∼T ⋆(s,a)[V πx +T ⋆ (s′)] +�����. +Recall that Tθ = T ⋆ for states s0, sg, sb. As a result, we continue the equation by +1 +1 − γ +���E(s,a)∼ρπx +T ⋆ +�� +Es′∼T (s,a)[V πx +T ⋆ (s′)] − Es′∼T ⋆(s,a)[V πx +T ⋆ (s′)] +����� += γ +��Es′∼T (sx,ax)[V πx +T ⋆ (s′)] − Es′∼T ⋆(sx,ax)[V πx +T ⋆ (s′)] +�� +(by the definition of ρ) += γ +���� +1 +2(1 + θx)V πx +T ⋆ (sg) + 1 +2(1 − θx)V πx +T ⋆ (sb) − V πx +T ⋆ (sg) +���� +(by the definition of Tθ) += γ +2 (1 − θx)(V πx +T ⋆ (sg) − V πx +T ⋆ (sb)). +By basic algebra, V πx +T ⋆ (sg) = (1 − γ)−1 and V πx +T ⋆ (sb) = 0. As a result, we get +ℓwx(V πx +T ⋆ , Tθ) ≥ +γ +2(1 − γ)(1 − θx). +(65) +Recall that x = argmini θi. Since θ ∈ Sd−1 and θi ≥ 0, ∀i, we have 1 = �d +i=1 θ2 +i ≥ dθ2 +x. As a result, when d > 2 we have +θx ≤ 1/ +√ +2. Therefore +ℓwx(V πx +T ⋆ , Tθ) ≥ +γ +2(1 − γ)(1 − θx) ≥ +γ +8(1 − γ). +(66) + +Helper Lemmas +In this section, we present several helper lemmas used in Appendix . +Lemma 8. For two distribution p, q over x ∈ X, if we have ∥p − q∥1 ≤ ϵ, then for any ζ > 1, +Ex∼p +� +I +�p(x) +q(x) > ζ +�� +≤ +ζ +ζ − 1ϵ. +Proof. Define E(x) = I +� +p(x) +q(x) > ζ +� +. Note that under event E(x) we have +p(x) > q(x)ζ =⇒ p(x) − q(x) > q(x)(ζ − 1). +(67) +As a result, +ϵ ≥ ∥p − q∥1 ≥ +� +|p(x) − q(x)|E(x) dx +(68) +≥ +� +(ζ − 1)q(x)E(x) dx = Ex∼q[E(x)](ζ − 1) +(69) +≥ (Ex∼p[E(x)] − ϵ)(ζ − 1). +(70) +By algebraic manipulation we get Ex∼p[E(x)] ≤ +ζ +ζ−1ϵ. +Lemma 9. Consider a fixed policy π and two dynamics model T, ¯T. Suppose +E(s,a)∼ρπ +T +� +TV +� +T(s, a), ¯T(s, a) +�� +≤ ϵ, +we get +��ρπ +T − ρπ +¯T +�� +1 ≤ +1 +1 − γ ϵ. +(71) +Proof. First of all let G, ¯G be the transition kernel from S × A to S × A induced by T, π and ¯T, π respectively. Then for any +distribution ρ ∈ ∆(S × A) we have +��Gρ − ¯Gρ +�� +1 ≤ E(s,a)∼ρ +� +TV +� ¯T(s, a), T(s, a) +�� +. +(72) +Let ρh (or ¯ρh) be the state-action distribution on step h under dynamics T (or ¯T). Then we have +ρh − ¯ρh = +� +Gh − ¯Gh� +ρ0 = +h−1 +� +h′=0 +¯Gh−h′−1� +G − ¯G +� +Gh′ρ0. +(73) +As a result, +∥ρh − ¯ρh∥1 ≤ +h−1 +� +h′=0 +��� ¯Gh−h′−1� +G − ¯G +� +Gh′ρ0 +��� +1 +(74) +≤ +h−1 +� +h′=0 +��� +� +G − ¯G +� +Gh′ρ0 +��� +1 ≤ +h−1 +� +h′=0 +E(s,a)∼ρh′ +� +TV +� ¯T(s, a), T(s, a) +�� +. +(75) +It follows that +��ρπ +T − ρπ +¯T +�� +1 ≤ (1 − γ) +∞ +� +h=0 +γh ∥ρh − ¯ρh∥1 +(76) +≤(1 − γ) +∞ +� +h=0 +γh +h−1 +� +h′=0 +E(s,a)∼ρh′ +� +TV +� ¯T(s, a), T(s, a) +�� +(77) +≤(1 − γ) +∞ +� +h=0 +γh +1 − γ E(s,a)∼ρh +� +TV +� ¯T(s, a), T(s, a) +�� +(78) += +∞ +� +h=0 +γhE(s,a)∼ρh +� +TV +� ¯T(s, a), T(s, a) +�� +(79) += +1 +1 − γ E(s,a)∼ρπ +T +� +TV +� ¯T(s, a), T(s, a) +�� +. +(80) + +LQR Experimental Details +Data generation +The +offline +dataset +is +generated +by +running +several +πv +under +the +true +dynamics +with +v +∈ +{−1, −0.75, −0.5, −0.25, 0, 0.25, 0.5, 0.75} and added noise N(0, 0.5) to the policy. As a result, the behavior dataset +covers most of the state-action space. The dataset contains 2000 trajectories with length 20 from each policy. +Implementation +We compute the density ratio by approximating the behavior distribution µ and the state-action distribution ρπ +T respectively. By +discretizing the state-action space into 10 × 10 bins uniformly, the distribution µ(s, a) is approximated by the frequency of the +corresponding bin. For ρπ +T , we first collect 2000 trajectories of policy π under T and compute the distribution similarly. Because +all the function classes are finite, we enumerate over the function classes to compute lb(T, π) for every pair of dynamics and +policy. +Hyperparameters +In the experiments, we use the following hyperparameters. +• Cutoff threshold in Line 3 of Alg. 1: ζ = 50. +• Random seeds for three runs: 1, 2, 3. +• State noise: η ∼ N(0, 0.05). +• Policy noise: N(0, 0.01). +• Discount factor: γ = 0.9. +• Mean of initial state: 0.5. +• Noise added to initial state: 0.2. +• Number of trajectories per policy: 2000. +We do not require parameter tuning for optimization procedures. We tried cutoff threshold with ζ ∈ {10, 20, 50} and number +of trajectories in {20, 500, 2000}. Smaller cutoff leads to an over-pessimistic lower bound, and fewer trajectories introduce +variance to the final result. +Computing resources +These experiments run on a machine with 2 CPUs, 4GB RAM, and Ubuntu 20.04. We don’t require GPU resources. We use +Python 3.9.5 and numpy 1.20.2. +D4RL Experimental Details +Tasks +Hopper. The Hopper task is to make a hopper with three joints and four body parts hop forward as fast as possible. The state +space is 11-dimension, the action is a 3-dimensional continuous space. +HalfCheetah. The HalfCheetah task is to make a 2D robot with 7 rigid links, including 2 legs and a torso run forward as fast +as possible. The state space is 17-dimension, the action is a 6-dimensional continuous space. +Model Choice and Hyperparameters +For all the dynamics, each model is parametrized as a 4-layer feedforward neural network with 200 hidden units. For the +SAC (Haarnoja et al. 2018) updates (serving as the policy gradient updates subroutine), the function approximations used for +the policy and value function are 2-layer feedforward neural networks with 256 hidden units. +The hyperparameter choices for behavior density modeling are based on the training progress of the normalizing flow model. +We pre-select a few (less than 10) combinations of hyperparameters and pick the set that gives us the lowest training loss. +Usually, this is not the best practice. However, the small number of combinations (non-exhaustive search) and small model size +reduced our concern for training set overfitting. +MOPO (Yu et al. 2020): +• Batch size: 100. +• Rollout horizon: 5. +• Lambda: 1. +MBLB: +• Random seeds for five runs: 1, 2, 3, 4, 5. + +• Number of trajectories to sample: 100. +• Rollout horizon: 5. +• Batch size: 32. +• Cutoff threshold in Line 3 of Alg. 1: ζ = 5. +• Discount factor γ: 0.99. +• GAE λ: 0.95. +• g function latent size: 8. +MML: +• Random seeds for five runs: 1, 2, 3, 4, 5. +• Batch size: 32. +• Basis function class: square, polynomial +• Ratio-Value function parametrization: linear, reproducing kernel hilbert space (RKHS) +For MML, we first need to make a decision on how to parametrize h(s, a, s′). If we choose a linear parametrization such as +h(s, a, s′) = ψ(s, a, s′)T θ, we need to decide what ψ is. There are two obvious choices: ψ(x) = [x, x2, 1] (square basis func- +tion), or a polynomial basis function with degree 2: given x = [x1, x2, ..., xd], ψ(x) = [x2 +1, x1x2, x1x3, ..., x2 +2, x2x3, ..., x2 +d], +which can be efficiently computed as the upper triangular entries of xxT . If we choose the ratio-value function parametrization +to be RKHS, then we use radial basis function (RBF) as K((s, a, s′), (˜s, ˜a, ˜s′)). +Computing resources +These experiments run on a machine with 4 CPUs, 10GB RAM, and Ubuntu 20.04. We don’t require GPU resources. We use +Python 3.9.5 and numpy 1.20.2. +Algorithms +We describe the MML and MBLB algorithms in this section. Algorithm 2 describes how we compute MBLB. Note that we +compute three components of lower bound explicitly. Algorithm 3 describes how we compute MML with linear parametrization. +Algorithm 4 describes how we compute MML with RKHS parametrization. +Algorithm 2: MBLB: Model-based Lower Bound +Input: offline RL data D; set of dynamics, policy pairs +[(π1, T1), ..., (πK, TK)], Vmax, γ, ζ. +Output: optimal policy π∗ +ˆµ(·, ·) = trainFlow (D) +scores = [] +for i ← 1...K do +Qπi = trainFQE (Sample (D, Ti, πi), πi) +ρTi +πi(·, ·) = trainFlow (Sample (D, Ti, πi)) +η = E(s,a)∼D[Qπi(s, πi(s))] +Initialize (θ) +L = 0; ∆ = 0 +for (s, a, s′) ∈ D do +w = max(min( +ρ +Ti +πi(s,a) +ˆµ(s,a) , ζ), 0) +ℓ = −|w · (Ex∼Ti(s)[gθ(x)] − gθ(s′))| +θ = θ + ∇θℓ +∆ = ∆ − Vmax · I +� +ρ +Ti +πi(s,a) +ˆµ(s,a) > ζ +� +L = L + ℓ +end +score = +1 +|D|(η + +1 +1−γ (∆ + L)) +scores ← score +end +i = argmax(scores) +return πi + +Algorithm 3: MML-Linear: Minimax Model Learning Bound +Input: offline RL data D; set of dynamics, policy pairs +[(π1, T1), ..., (πK, TK)]. +Output: optimal policy π∗ +scores = [] +for i ← 1...K do +Initialize (θ) +L = 0 +for (s, a, s′) ∈ D do +ℓ = −(Ex∼Ti(s)[ψ(s, a, x)T θ] − ψ(s, a, s′)T θ) +θ = θ + ∇θℓ +L = L + ℓ +end +score = +L +|D| +scores ← score +end +i = argmax(scores) +return πi +Algorithm 4: MML-RKHS: Minimax Model Learning Bound +Input: offline RL data D; set of dynamics, policy pairs +[(π1, T1), ..., (πK, TK)], kernel K. +Output: optimal policy π∗ +scores = [] +for i ← 1...K do +L = 0 +for (s, a, s′), (˜s, ˜a, ˜s′) ∈ D do +ℓ1 = Ex∼T (s),˜x∼T (˜s)[K((s, a, x), (˜s, ˜a, ˜x))] +ℓ2 = −2Ex∼T (s)[K((s, a, x), (˜s, ˜a, ˜s′))] +ℓ3 = K((s, a, s′), (˜s, ˜a, ˜s′)) +L = L + ℓ1 + ℓ2 + ℓ3 +end +score = +L +|D| +scores ← score +end +i = argmax(scores) +return πi +D4RL Additional Experiments +Ablation Study +We conduct an ablation study in Table A1 where we evaluate the final performance of the policies selected using either FQE +with TD-1 estimation or FQE with GAE estimation. We observe that using GAE for offline policy selection allows for picking +better policies on average. +MBLB with RKHS +In this section, we derive the closed-form solution to supg∈G ℓw(g, T) when the test function g belongs to a reproducing kernel +Hilbert space (RKHS), and empirically evaluate the MBLB method with RKHS parameterization. +Let K : S ×S → R be a symmetric and positive definite kernel and HK its corresponding RKHS with inner product ⟨·, ·⟩HK. +Then we have the following lemma. +Lemma 10. When G = {g ∈ HK : ⟨g, g⟩HK ≤ 1}, we have +sup +g∈G +ℓw(g, T)2 = Es,a,s′∼D,x∼T (s,a)E˜s,˜a,˜s′∼D,˜x∼T (˜s,˜a) [w(s, a)w(˜s, ˜a)(K(x, ˜x) + K(s′, ˜s′) − K(x, ˜s′) − K(˜x, s′)] +(81) +Proof. Let Kx ≜ K(x, ·) ∈ HK. By the reproducing property, we have ⟨Kx, Ky⟩HK = K(x, y) and ⟨Kx, g⟩HK = g(x). As a + +Dataset Type +Environment +FQE +(TD-1) +FQE +(GAE) +medium +hopper +507.8 +(549.6) +533.5 +(532.6) +med-expert +hopper +149.3 +(146.2) +261.1 +(157.9) +expert +hopper +39.0 +(34.6) +120.7 +(78.7) +medium +halfcheetah +1802.5 +(1011.9) +2117.4 +(1215.6) +med-expert +halfcheetah +302.1 +(605.2) +394.9 +(632.0) +Table A1: We report the mean and (standard deviation) of the selected policy’s environment performance across 3 random seeds +using different variants of FQE. +result, +sup +g∈G +ℓw(g, T)2 = +sup +g:⟨g,g⟩HK ≤1 +Es,a,s′∼D,x∼T (s,a)[w(s, a)(⟨Kx, g⟩HK − ⟨Ks′, g⟩HK)]2 +(82) += +sup +g:⟨g,g⟩HK ≤1 +� +Es,a,s′∼D,x∼T (s,a)[w(s, a)(Kx − Ks′)], g +�2 +HK +(83) += ∥Es,a,s′∼D,x∼T (s,a)[w(s, a)(Kx − Ks′)]∥2 +HK +(Cauchy-Schwarz) += +� +Es,a,s′∼D,x∼T (s,a)[w(s, a)(Kx − Ks′)], E˜s,˜a,˜s′∼D,˜x∼T (˜s,˜a)[w(˜s, ˜a)(K˜x − K˜s′)] +� +HK +(84) += Es,a,s′∼D,x∼T (s,a)E˜s,˜a,˜s′∼D,˜x∼T (˜s,˜a)[⟨w(s, a)(Kx − Ks′), w(˜s, ˜a)(K˜x − K˜s′)⟩HK] +(85) += Es,a,s′∼D,x∼T (s,a)E˜s,˜a,˜s′∼D,˜x∼T (˜s,˜a)[w(s, a)w(˜s, ˜a)(K(x, ˜x) + K(s′, ˜s′) − K(x, ˜s′) − K(˜x, s′)]. +(86) +Table A2 presents the performance of the MBLB algorithm with RKHS parameterization. On most of the environments, +MBLB-RKHS performs better than/comparable with MML-RKHS. However, MBLB-Quad consistently outperforms MBLB- +RKHS on all the environments. We suspect that MBLB-RKHS could outperform MBLB-Quad with different choices of kernels +because the quadratic parameterization can be seen as a special case of RKHS parameterization (with quadratic kernels). +Dataset Type +Env +MOPO +MML +(Squared) +MML +(Polynomial) +MML +(RKHS) +MBLB +(Linear) +MBLB +(Quad) +MBLB +(RKHS) +medium +hopper +175.4 +(95.3) +379.4 +(466.4) +375.6 +(459.5) +375.0 +(459.9) +591.7 +(523.1) +808.5 +(502.7) +317.8 +(476.4) +med-expert +hopper +183.8 +(94.4) +160.9 +(131.5) +116.5 +(148.4) +61.4 +(35.0) +261.1 +(157.9) +242.5 +(134.0) +208.1 +(144.3) +expert +hopper +80.4 +(63.4) +93.8 +(87.9) +61.6 +(61.9) +70.0 +(56.2) +118.2 +(61.6) +121.0 +(72.5) +120.9 +(61.8) +medium +halfcheetah +599.8 +(668.4) +1967.6 +(1707.5) +2625.1 +(937.2) +3858.2 +(1231.1) +3290.4 +(1753.1) +2484.2 +(1526.8) +2229.7 +(1949.8) +med-expert +halfcheetah +-486.6 +(48.1) +-188.5 +(137.2) +-77.0 +(252.5) +-343.2 +(225.2) +207.4 +(509.5) +192.8 +(432.0) +-2.1 +(690.6) +Table A2: We report the mean and (standard deviation) of selected policy’s simulator environment performance across 5 random +seeds. MML and MBLB are used as model-selection procedures where they select the best policy for each seed. Our method is +choosing the most near-optimal policy across the datasets. + +0.0 +0.2 +0.4 +0.6 +0.8 +1.0 +Normalized Score (τ) +0.00 +0.25 +0.50 +0.75 +1.00 +Fraction of runs with score > τ +MBLB +MML +MOPO +Figure A1: Performance profile between three methods. + diff --git a/DdE2T4oBgHgl3EQfoQiw/content/2301.04017v1.pdf b/DdE2T4oBgHgl3EQfoQiw/content/2301.04017v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a8077f5969d9e657f0a1f6255960fc9a6a1af341 --- /dev/null +++ b/DdE2T4oBgHgl3EQfoQiw/content/2301.04017v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd45e0ea00df38d25d220134f77827964036bbd33ec4fe1dc8801e5856a789bd +size 1498985 diff --git a/DdE2T4oBgHgl3EQfoQiw/vector_store/index.faiss b/DdE2T4oBgHgl3EQfoQiw/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..03f41a3183eff8cfdbc8346d8833b7ac4decb056 --- /dev/null +++ b/DdE2T4oBgHgl3EQfoQiw/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea14c078524703282769e10b5b22eff99e4b3f7d4e6ab3473cf960e19ec8cb8e +size 3932205 diff --git a/DdE2T4oBgHgl3EQfoQiw/vector_store/index.pkl b/DdE2T4oBgHgl3EQfoQiw/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9f23d795f8cd0380a387fe60ca61dae6a8a23121 --- /dev/null +++ b/DdE2T4oBgHgl3EQfoQiw/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d65f20b4d3a3d2dac74406b34118a592d316949aade63844aca04702c4d8f293 +size 178979 diff --git a/EdFKT4oBgHgl3EQfZy6F/vector_store/index.faiss b/EdFKT4oBgHgl3EQfZy6F/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..0667a742c893c3d8b25e1a0c55ce43346cd4a268 --- /dev/null +++ b/EdFKT4oBgHgl3EQfZy6F/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fb468a6af382278afaddde6f469b0f09608f6d1349fb8fd76a258529655891e +size 2162733 diff --git a/EdFKT4oBgHgl3EQfZy6F/vector_store/index.pkl b/EdFKT4oBgHgl3EQfZy6F/vector_store/index.pkl new file mode 100644 index 0000000000000000000000000000000000000000..17de02fb8096bc92ea2d8ea48f0e6b0d606025ff --- /dev/null +++ b/EdFKT4oBgHgl3EQfZy6F/vector_store/index.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:246d92fa65f64c42814f1d4bde839de4f54a7918db6cf0dd903fda2abb9643ba +size 84081 diff --git a/FNFJT4oBgHgl3EQfCyyx/content/tmp_files/2301.11431v1.pdf.txt b/FNFJT4oBgHgl3EQfCyyx/content/tmp_files/2301.11431v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..f72626fa63efbc775ce4deb216fceed731be5292 --- /dev/null +++ b/FNFJT4oBgHgl3EQfCyyx/content/tmp_files/2301.11431v1.pdf.txt @@ -0,0 +1,1411 @@ +Semidefinite Relaxations for Robust Multiview Triangulation +Linus H¨arenstam-Nielsen1, Niclas Zeller2, Daniel Cremers1 +1Technical University of Munich, 2Karlsruhe University of Applied Sciences +linus.nielsen@tum.de, niclas.zeller@h-ka.de, cremers@tum.de +Abstract +We propose the first convex relaxation for multiview tri- +angulation that is robust to both noise and outliers. To this +end, we extend existing semidefinite relaxation approaches +to loss functions that include a truncated least squares cost +to account for outliers. We propose two formulations, one +based on epipolar constraints and one based on the frac- +tional reprojection equations. The first is lower dimensional +and remains tight under moderate noise and outlier levels, +while the second is higher dimensional and therefore slower +but remains tight even under extreme noise and outlier lev- +els. We demonstrate through extensive experiments that the +proposed approach allows us to compute provably optimal +reconstructions and that empirically the relaxations remain +tight even under significant noise and a large percentage of +outliers. +1. Introduction +Triangulation refers to the problem of recovering the 3D +location of a set of points from their observed 2D locations +in two or more images under known camera transforma- +tions. +Since the 2D projections are typically noisy (due +to lens distortions or inaccurate feature point localization), +the optimal solution is often phrased as a non-convex opti- +mization problem. While solutions are mostly computed us- +ing faster but sub-optimal local optimization methods, there +have been some efforts to compute globally optimal triangu- +lations [1, 4]. While these works show that one can obtain +globally optimal solutions for triangulation problems with +noisy input, their practical value remains limited as they are +not well adapted to the challenges of real-world data where +even a single outlier can deteriorate the result. +Despite their often slower runtime, globally optimal +methods offer several advantages: Firstly, in safety-critical +systems it may be required to complement the computed so- +lution with some guarantee that it really is the best solution +or at least within a bound of the optimal solution. Secondly, +in many offline applications runtime is actually not critical +and then one may want to trade off better accuracy for extra +(a) 22 views, no outliers +(b) 22 views, 19 outliers +Figure 1. +Example of a triangulated point from the Reichstag +dataset. Blue point: ground truth. Red point: non-robust global +optimum found by the relaxation found by [1] (Eq. (T)). Green +point: robust global optimum found by our proposed relaxation in +Eq. (RT). +runtime. Thirdly, globally optimal solutions of real-world +problems can serve as ground truth for assessing the perfor- +mance of local optimization methods. +In this work, we revisit the problem of computing prov- +ably optimal triangulations in the presense of outliers. To +this end, we develop two possible convex relaxations for +the truncated least squares cost function so as to combine +the robustness with the capacity to compute globally opti- +mal solutions. Our main contributions can be summarized +as follows: +• We extend the convex triangulation methods from [1] +and [4] with a truncated least squares cost function and +derive the corresponding convex relaxations. +• We show that the relaxations are always tight in the +noise-free and outlier-free case by explicitly construct- +ing the globally optimal Lagrange multipliers (which +1 +arXiv:2301.11431v1 [cs.CV] 26 Jan 2023 + +furthermore satisfy the corank 1, restricted slater and +non-branch point criteria required for local stability +with respect to noise). +• We validate empirically that both relaxations remain +tight even under large amounts of noise and high out- +lier ratios. +To the best of our knowledge, this is the first example of +a successful semidefinite relaxation of a robust estimation +problem with reprojection errors. +2. Related work +Triangulation is a core subroutine for structure from mo- +tion and therefore has been studied extensively. For two +views there are many solution variants, including comput- +ing the roots of a degree 6 polynomial [9] for the repro- +jection error, and [15] for the angular error. Multiview tri- +angulation is typically performed using non-optimal meth- +ods such as local search or the linear method from [9]. +Robust triangulation is typically tackled using RANSAC +[13, 16, 20] where a 2-view solver is used repeatedly for +randomly sampled pairs of views until an inlier set can be +established. +Semidefinite relaxations have been used to obtain certi- +fiably optimal algorithms for many computer vision prob- +lems. Examples include semidefinite relaxations for parti- +tioning, grouping and restoration [14], for minimizing re- +projection error [12], for multiview triangulation [1, 4], for +essential matrix estimation [28], for hand-eye calibration +[7, 22, 23], for robust point cloud registration [24, 26, 27], +and for 3D shape from 2D landmarks [25]. +The work +[26] also considers outlier-robust estimation applied to rota- +tion averaging, mesh registration, absolute pose registration +and category-level object pose+shape estimation. Solving +semidefinite relaxations is typically slow and memory in- +tensive, stemming from the fact that the number of vari- +ables is the square of the number of variables in the orig- +inal problem. However there has been recent interest in +developing solvers that can scale to larger problems. In- +cluding [6] which uses a reformulation in terms of eigen- +value optimization based on [10] which can take advantage +of GPUs, and [26] which uses efficient non-global solvers +for speeding up the convergence of the global solver. +In a limited number of cases, semidefinite relaxations +can be shown to always solve the original problem when +excluding degenerate configurations. +Including the dual +quaternion formulation of hand-eye calibration [7] and 2- +view triangulation using epipolar constraints [1]. In both +cases the problem has two quadratic constraints one of +which equals zero. Another case is the rotation alignment +problem which has a closed form solution in terms of an +eigenvalue decomposition (quaternion formulation) or sin- +gular value decomposition (rotation matrix formulation). +Outlier-robust estimation is inapproximable in general +[2], so one typically has to rely on empirical experiments +to validate how stable the relaxation is. Though it is some +times possible to find sets of special cases where the relax- +ation can be shown to be always tight (or non-tight), as in +the recent work [19] for robust rotation alignment of point +clouds. +3. Notation and preliminaries +For t, s ∈ R3 we write [t]× for the 3×3 skew-symmetric +matrix such that t × s = [t]×s. Sk is the set of k × k +real symmetric matrices. (a; b) denotes the vertical con- +catenation of vectors a and b and for a collection of vec- +tors a1, . . . , an the subscript-free version denotes the cor- +responding stacked vector a = (a1; . . . ; an). +We use a +bar to denote the homogeneous version of a vector, that is +¯a := (a; 1). When dimensionality is understood we define +ei to be the ith unit vector and Ei = eieT +i . For a vector of +monomials m = (m1; . . . ; md) we define em +mi as the unit +vector whose only non-zero entry corresponds to the index +of mi in m, meaning em +mi = ei ∈ Rd. For a vector x ∈ Rk +we define: +Mx := +� I +−x +−xT +∥x∥2 +� +∈ Sk+1 +(1) +such that for y ∈ Rk we have ¯yT Mx¯y = ∥x − y∥2. The +operator ⊗ denotes the Kronecker product, and ⊕ denotes +the tensor sum. For example, for 2 × 2 matrices A and B: +A ⊕ B = +�A +0 +0 +B +� +, A ⊗ B = +�a11B +a12B +a21B +a22B +� +. +(2) +3.1. Semidefinite relaxations +As a general strategy, we aim to solve the triangu- +lation problem by relaxing a Quadratically Constrained +Quadratic Program, which has the following form: +min +z∈Rd +zT Mz +s.t. +zT Ez = 1 +zT Aiz = 0, +i = 1, . . . , k. +(3) +This is a very general formulation with applications in com- +puter vision but it is NP-hard to solve in most cases, so an +imperfect method is typically necessary. One such strat- +egy is to lift the problem from Rd to Sd by introducing a +new variable Z = zzT and using the fact that zT Mz = +tr(MzzT ) = tr(MZ) to arrive at: +min +Z∈Sd +tr(MZ) +s.t. +tr(EZ) = 1 +tr(AiZ) = 0, +i = 1, . . . , k +Z ≽ 0. +(4) +2 + +Eq. (4) is a relaxation of Eq. (3) since if z satisfies the con- +straints of Eq. (3) we always have that Z = zzT satisfies +the constraints of Eq. (4) with the same objective value. +However, the converse is unfortunately not always true. In +particular, if ˆZ is optimal for Eq. (4) we can obtain a cor- +responding solution ˆz for Eq. (3) with the same objective +value if and only if ˆZ is rank one. In this case we have +ˆZ = ˆzˆzT and we then say that the relaxation is tight. +The main advantage of working with the relaxation +Eq. (4) as opposed to the original problem Eq. (3) is that the +relaxation is a convex optimization problem, in particular +a semidefinite program, for which a variety of polynomial- +time solvers are available, including [3, 17]. We can verify +whether a potential solution to Eq. (3) is optimal by comput- +ing the corresponding Lagrange multipliers, as summarized +in the following fact: +Fact 1. If ˆz ∈ Rd satisfies the constraints of Eq. (3) (primal +feasibility) and there are Lagrange multipliers ˆλ ∈ R, ˆξ ∈ +Rk and a corresponding multiplier matrix S(ˆλ, ˆξ) = M + +�k +i=1 ˆξiAi − ˆλE satisfying: +i) Dual feasibility: S(ˆλ, ˆξ) ≽ 0 +ii) Complementarity: S(ˆλ, ˆξ)ˆz = 0 +then the relaxation Eq. (4) is tight and ˆz is optimal for +Eq. (3). +If the relaxation is not tight we can at best expect an +optimal ˆZ to generate an approximation of the optimal ˆz. +Therefore, a key metric to consider when applying a re- +laxation is the percentage of encountered problem cases +in which it remains tight. Fortunately, [5] shows that the +relaxation is well behaved for problems that are close in +parameter-space to solutions where the multiplier matrix +has corank 11, which we will show later occurs in the noise- +free case. We restate the main result in loose terms here: +Fact 2. If we, in addition to the conditions in Fact 1, have +that S(λ, µ) is corank 1 and ACQ (which is a smoothness +condition, see [5] Definition 3.1) holds, then the relaxation +Eq. (4) is locally stable, meaning that it will remain tight +also for perturbed objective functions M + ε ˜ +M for small +enough ε. +The practical usefulness of Fact 2 comes from the con- +sideration that it’s often possible to show that the relaxation +is tight and the stability conditions hold for noise-free mea- +surements. This means that there is some surrounding re- +gion of noisy measurements for which the relaxation is tight +as well. There is also a version of Fact 2 which covers per- +turbations to the constraints, however, we will not make use +of it here. +1corank(A) = n - rank(A) for an n × n matrix A. +4. Relaxations for multiview triangulation +Given n views of a point X from cameras located at Pi = +(Ri, ti) ∈ SE(3) with intrinsic matrices Ki ∈ R3×3, and +with, possibly noisy, observations denoted as ˜xi ∈ R2, the +n-view triangulation problem with reprojection error is de- +fined as: +min +X∈R3 +n +� +i=1 +∥˜xi − π(Ki, Pi, X)∥2 +(5) +where π(Ki, Pi, X) is the reprojection of the point X ∈ R3 +to camera i. This is a nonconvex problem but it is not yet in +QCQP form as in Eq. (3) since π(Ki, Pi, X) is not quadratic +in X. In the next section we will recap two ways in which +it can be converted to a QCQP, from which we can generate +the corresponding semidefinite relaxations. +4.1. Triangulation with epipolar constraints +As described in [1] we can reformulate Eq. (5) as a poly- +nomial optmization problem of degree 2 by reparametrizing +X in terms of it’s n reprojections xi subject to the epipolar +constraints: +min +xi∈R2 +n +� +i=1 +∥xi − ˜xi∥2 +s.t. +¯xT +i Fij ¯xj = 0 +i, j = 1, . . . , n +i ̸= j +(6) +where Fij = K−T +i +[tij]×RijK−1 +j +is the fundamental matrix +corresponding to the relative transformation between poses +i and j. Since the estimated reprojections xi all satisfy the +epipolar constraints, the solution of Eq. (5) can be recovered +exactly from Eq. (6) using the linear method from [9]. +Using the parametrization z = (x; 1) = ¯x the semidefi- +nite relaxation of Eq. (6) is: +min +Z∈S2n+1 +tr(M˜xZ) +s.t. +tr(En+1Z) = 1 +tr( ¯FijZ) = 0, +i = 1, . . . , k +Z ≽ 0 +(T) +where ¯Fij +∈ +S2n+1 is defined such that ¯xT ¯Fij ¯x += +¯xT +i Fij ¯xj. It was shown already in both [1] and [5] that +Eq. (T) is a locally stable relaxation for noise-free measure- +ments, whenever the views are not co-planar. In particu- +lar, since noise-free observations ˜x by definition satisfy the +constraints of the original problem Eq. (T), the solution is +obtained by setting z = (˜x; 1), and since M˜x is positive +semidefinite and corank 1, the conditions of Fact 1 are sat- +isfied by setting all Lagrange multipliers to zero, such that +ˆS = M˜x. +3 + +(a) 3 views +(b) 5 views +(c) 7 views +(d) 3 views, 1 outlier +(e) 5 views, 2 outliers +(f) 7 views, 4 outliers +Figure 2. Examples of simulated triangulation problems from Sec. 5.1 with σ = 50px for various number of views and outliers. Blue +point: ground truth, Red point: non-robust global optimum found by the relaxation found by [4] (Eq. (TF)). Green point: robust global +optimum found by our proposed relaxation in Eq. (RTF). With no outliers the robust and non-robust methods give the same result. +4.2. Triangulation with fraction constraints +As an alternative to Eq. (6) we can also solve Eq. (5) by +explicitly parametrizing the 3D point X in homogeneous +coordinates and multiplying out the fractional equations: +min +¯ +X∈R4,xi∈R2 +n +� +i=1 +∥xi − ˜xi∥2 +s.t. +¯XT ¯X = 1 +xk +i bT +i ¯X − aT +ik ¯X = 0 +i = 1, . . . , n +k = 1, 2 +(7) +Where ai1, ai2 and bi are given by the rows of the corre- +sponding camera matrix Ki +� +RT +i +−RT +i ti +� +. A naive ap- +proach to relaxing Eq. (7) would be to use the parametriza- +tion z = (x; ¯X), but unfortunately, as shown in [4], this +leads to a problem whose optimal value is always zero. To +circumvent this issue, [4] proposes parametrizing the prob- +lem in terms of all possible products between the elements +of x and X. They also show through experiments that while +the resulting relaxation has more parameters and constraints +than Eq. (T), it is also tight in a significantly wider range of +cases, leading to a tradeoff between reliability and compu- +tation time. +4 + +We will use a similar relaxation, though we will skip +the initial change of variables to get a slightly different +but equivalent formulation which can be extended to the +robust case more conveniently. We start by setting z = +(x ⊗ ¯X; ¯X) = ¯x ⊗ ¯X and then we multiply each repro- +jection constraint in Eq. (7) with zj to get 8n + 4 quadratic +constraints: +(xk +i bT +i ¯X − aT +ik ¯X)zj = zT (e¯x +xk +i ⊗ bi − e¯x +1 ⊗ aik)eT +j z += 0 +(8) +Note that in Eq. (8) we have made use of the unit vector no- +tation from Sec. 3, meaning in particular e¯x +xk +i = e2i+k and +e¯x +1 = e2n+1. We also need to indoduce constraints to pre- +serve the fact that z comes from a (2n + 1) × 4 kronecker +product. When Z = zzT is rank one, it turns out that this +condition is equivalent to Z being composed of 2n+1 sym- +metric 4×4 blocks, see [4] for more details. We will denote +this constraint as Z ∈ kron(2n + 1, 4). The relaxation of +can now be written as2: +min +Z∈S8n+4 ++ +tr(Z(M˜x ⊗ I4)) +s.t. +tr(Z(08n×8n ⊕ I4)) = 1 +Z ∈ kron(2n + 1, 4) +tr(Z(e¯x +xk +i ⊗ bi − e¯x +1 ⊗ aik)eT +j ) = 0 +i = 1, . . . n, +k = 1, 2 +j = 1, . . . , 8n + 4. +(TF) +We have now introduced two relaxations for the multiview +triangulation problem. In the next two sections we will ex- +tend each to the robust case. +4.3. Robust triangulation with epipolar constraints +Now that we have introduced the two main relaxations +of Eq. (6) we move to the the main contribution of this pa- +per, which is to introduce the corresponding truncated least +squares (TLS) extensions. Similarly to [26] we will use the +fact that the TLS cost function can be written as a minimiza- +tion problem by introducing a binary decision variable for +each residual +ρi(r2 +i ) = min(r2 +i , ci) = +min +θi∈{0,1} θir2 +i + (1 − θi)ci +(9) +where ci > 0 is the square of the inlier threshold. Meaning +that the TLS extension of Eq. (6) can be written as: +min +xi∈R2,θi∈R +n +� +i=1 +� +θi∥xi − ˜xi∥2 + (1 − θi)ci +� +s.t. +¯xT +i Fij ¯xj = 0, +θ2 +i − θi = 0. +i, j = 1, . . . , n +i ̸= j. +(10) +2The cost functions in Eq. (7) and Eq. (TF) are equivalent, since ( ¯ +X ⊗ +¯x)T (M˜x ⊗ I4)( ¯ +X ⊗ ¯x) = (¯xT M˜x¯x) ¯ +XT ¯ +X. +However this cost function is a 3rd degree polynomial in +the variables as it contains terms like θi∥xi∥2, so we can’t +apply the relaxation directly. But we can obtain a 2nd order +formulation by noting that θ2 +i = θi implies θi∥xi − ˜xi∥2 = +∥θixi − θi˜xi∥2 and making the substitution yi = θixi: +min +yi∈R2,θi∈R +n +� +i=1 +� +∥yi − θi˜xi∥2 + (1 − θi)ci +� +s.t. +(yi; θi)T Fij(yj; θj) = 0 +θ2 +i − θi = 0 +θiyi = yi +i, j = 1, . . . , n, +i ̸= j. +(11) +The last set of constraints θiyi = yi is redundant but we’ve +found that it is necessary for the relaxation to remain tight +in the presence of noise. We can recover the solution to +Eq. (10) from Eq. (11) by triangulating the estimated inliers +and setting each xi to be the reprojection of the resulting +point onto view i. +Using the parametrization z = (y; θ; 1) the semidefinite +relaxation of Eq. (11) is: +min +Z∈S3n+1 ++ +tr(M c +˜xZ) +s.t. +tr( ¯FijZ) = 0 +Zθi,θi − Z1,θi = 0 +Zθi,yi − Z1,yi = 0 +tr(E3n+1Z) = 1 +i, j = 1, . . . , n +i ̸= j +(RT) +where M c +˜x is the robust extension of M˜x, defined as: +M c +˜x = +� +� +I +−B(˜x) +0 +−B(˜x)T +diag(∥˜xi∥2) +−c +0 +−cT +�n +i=0 ci +� +� , +B(˜x) = +� +� +� +� +� +˜x1 +0 +. . . +0 +0 +˜x2 +. . . +0 +... +... +... +0 +0 +0 +0 +˜xn +� +� +� +� +� . +(12) +and Zmi,mj is the entry of Z corresponding to the index of +the monomials mi and mj in z. As shown in [2] solving +Eq. (11) in the presence of outliers is NP hard even in the +noise-free case. However, in the noise-free and outlier-free +case we can show that the relaxation is tight with a corank 1 +multiplier matrix, meaning that the relaxation is also locally +stable with respect to noise, assuming ACQ holds: +Theorem 1. Assuming ACQ holds, the relaxation Eq. (RT) +is tight locally stable for noise-free and outlier-free mea- +surements ˜xi, i = 1, . . . , n. +5 + +Proof. Partiton the lagrange multipliers as ξ = (ϕ; µ; η), +where ϕij ∈ R µi ∈ R2 and η ∈ R corresponds to the +constraints (yi; θi)T Fij(yj; θj) = 0, θiyi = yi and θ2 +i = θi +respectively. Then we have: +S(λ, ϕ, µ, η) = +F(ϕ) + +� +� +I +−B(˜xi − µi) +−µ +∗ +diag(∥˜xi∥2 + 2ηi) +− 1 +2c − η +∗ +∗ +�n +i=1 ci − λ +� +� . (13) +Where F(ϕ) = � +ij ϕij ¯Fij. Now let ˆλ = ˆϕij = ˆµi = 0 +and ˆηi = 1 +2ci to get: +ˆS = S(ˆλ, ˆϕ, ˆµ, ˆη) = S(0, 0, 0, 1 +2c) = +� +� +I +−B(˜xi) +0 +∗ +diag(∥˜xi∥2 + ci) +−c +∗ +∗ +�n +i=1 ci +� +� . +(14) +This way, with ˆz = (˜x; 1n; 1) we have ˆSˆz = 0. And fur- +thermore, for arbitrary x, θ, α: +(x; θ; α)T ˆS(x; θ; α) = += +n +� +i=0 +� +∥xi∥2 − 2θi˜xi + θ2 +i (∥˜xi∥2+ci) − 2ciθiα + ciα2 +� += +n +� +i=0 +� +∥xi − θi˜xi∥2 + ci(α − θi)2 +� +≥ 0 +so ˆS is positive semidefinite. +So the relaxation is tight +by Fact 1. +And since the only nonzero solution to +(x; θ; α)T ˆS(x; θ; α) = 0 up to scale is (x; θ; α) = ˆz we +have that ˆS is corank 1. So, assuming ACQ holds, the re- +laxation is locally stable by Fact 2. +In the following section we will furthermore introduce a +higher order relaxation which can handle higher noise and +outlier levels. +4.4. Robust triangulation with fraction constraints +Since the fractional constraints in Eq. (TF) are more sta- +ble with respect to noise than the epipolar constraints in +Eq. (T), we might also expect that extending Eq. (TF) to +handle outliers will result in a relaxation which is more sta- +ble than Eq. (RT). In this section we will show how the +robust extension can be formulated, and as we will see in +Sec. 5 it is indeed significantly more stable with respect to +both noise and outliers. +In order to extend Eq. (TF) to handle outliers we will +proceed in a similar manner as in the case with epipolar +constraints. Starting by writing the cost function in terms of +Problem +Relaxation +Robust +Constraints +Variables +Eq. (6) +Eq. (T) + +1 +2n2 − 1 +2n + 1 +2n + 1 +Eq. (11) +Eq. (RT) + +1 +2n2 + 2.5n + 1 +3n + 1 +Eq. (7) +Eq. (TF) + +28n2 + 14n + 1 +8n + 4 +Eq. (15) +Eq. (RTF) + +51n2 + 65n + 1 +12n + 4 +Table 1. Summary of relaxations for the triangulation problem and +its robust extension. +the 2nd order variables yi = θixi: +min +¯ +X∈R4,xi∈R2 +n +� +i=1 +� +∥yi − θi˜xi∥2 + (1 − θi)ci +� +s.t. +¯XT ¯X = 1 +yk +i bT +i ¯X − aT +ik ¯X = 0 +θ2 +i − θi = 0 +θiyi = yi +i = 1, . . . , n +k = 1, 2. +(15) +For convenience we will denote the vertical concatenation +of y and θ as (y; θ) = yθ. For the relaxation we will then +use the parametrization z = (yθ⊗ ¯X; ¯X) = ¯yθ⊗ ¯X and gen- +erate redundant constraints from θ2 +i − θi = 0 and θiyi = yi +by multiplying each equation by ¯Xs ¯Xt for s, t = 1, . . . , 4. +Resulting in the following relaxation: +min +Z∈S12n+4 ++ +tr(Z((M c +˜x ⊗ I4) ⊕ 04×4)) +s.t. +tr(Z(012n×12n ⊕ I4)) = 1 +Z ∈ kron(3n + 1, 4) +tr(Z(e¯yθ +yk +i ⊗ bi − e¯yθ +θi ⊗ aik)eT +j ) = 0 +Z ¯ +Xsθi, ¯ +Xtθi − Z ¯ +Xs, ¯ +Xtθi = 0 +Z ¯ +Xsθi, ¯ +Xtyi − Z ¯ +Xs, ¯ +Xtyi = 0 +i = 1, . . . n, +k = 1, 2 +s, t = 1, . . . , 4 +j = 1, . . . , 12n + 4. +(RTF) +Similarly to the epipolar case we are able to show that the +relaxation is tight in the noise and outlier-free case by ex- +plicitly constructing the globally optimal Lagrange multi- +pliers. Using the same approach we are also able to show +part of the criteria required for local stability with respect to +noise in the outlier-free case. See Appendix A for details. +With this we have 4 relaxations for the triangulation +problem corresponding to the non-robust and robust case +with the epipolar and the fractional parametrization. We +summarize the relaxations and their number of variables and +constraints in Tab. 1. +6 + +0 +20 +40 +60 +80 +100 +noise level ( ) +20 +40 +60 +80 +100 +% tight relaxations +3 views +0 outliers +1 outliers +2 outliers +3 outliers +4 outliers +5 outliers +robust epipolar (RT) +robust fractional (RTF) +0 +20 +40 +60 +80 +100 +noise level ( ) +20 +40 +60 +80 +100 +% tight relaxations +5 views +0 +20 +40 +60 +80 +100 +noise level ( ) +20 +40 +60 +80 +100 +% tight relaxations +7 views +0 +20 +40 +60 +80 +100 +noise level ( ) +10 +2 +10 +1 +100 +101 +average error +3 views +0 +20 +40 +60 +80 +100 +noise level ( ) +10 +2 +10 +1 +100 +101 +average error +5 views +0 +20 +40 +60 +80 +100 +noise level ( ) +10 +2 +10 +1 +100 +101 +average error +7 views +Figure 3. Average number of tight relaxation (top) and estimation error (bottom) for 3, 5 and 7 views for the robust epipolar relaxation +Eq. (RT) and the robust fractional relaxation Eq. (RTF). We generate experiments for various noise levels and number of outliers as +described in Sec. 5.1. +4.5. Rounding in the non-tight case +For non-tight cases the optimal ˆZ will have rank of at +least 2, which means we can’t recover the optimal solution +ˆz for the original problem Eq. (3). However we can still +construct an approximate solution through a rounding pro- +cedure. We start by setting ˆz to be the eigenvector corre- +sponding to the minimal eigenvalue, normalized such that +ˆzT Eˆz = 1 as in Eq. (3). We then apply a different pro- +cedure for each problem depending on the constraints. For +Eq. (T) we triangulate the resulting ˆxi (which in this case +will generally not satisfy the epipolar constraints) using the +linear method from [9] after rounding the smallest singu- +lar value of the data matrix to 0. For Eq. (RT) we do the +same except that we first determine the inlier parameters ˆθi +by rounding the corresponding entries of ˆz to 0 or 1. For +Eq. (TF) and Eq. (RTF) we compute the best-fitting tensor +product decomposition of ˆz using a singular value decom- +position, as described in [21] and use the same method as in +the epipolar case for determining the inlier parameters. +5. Experiments +We +implement +all +relaxations +using +CVXPY +[8] +with +the +solver +MOSEK +[3] +using +the +setting +0 +20 +40 +60 +80 +100 +noise level ( ) +0 +20 +40 +60 +80 +100 +% tight relaxations +25 views +0 outliers +10 outliers +20 outliers +25 outliers +0 +20 +40 +60 +80 +100 +noise level ( ) +0 +20 +40 +60 +80 +100 +% tight relaxations +30 views +0 +20 +40 +60 +80 +100 +noise level ( ) +10 +2 +10 +1 +100 +average error +25 views +0 +20 +40 +60 +80 +100 +noise level ( ) +10 +2 +10 +1 +100 +average error +30 views +Figure 4. Average number of tight relaxation (top) and estimation +error(bottom) for 25 and 30 views using Eq. (RT). +MSK DPAR INTPNT CO TOL REL GAP += +10−14 for +the simulated experiments and 10−10 for the Reichstag +7 + +5 +10 +15 +20 +25 +30 +number of views +10 +1 +100 +101 +solver time (s) +epipolar (T) +fractional (TF) +robust epipolar (RT) +robust fractional (RTF) +Figure 5. Average computation time for each solver, averaged over +all noise levels and number of outliers. +experiments, all other parameters are left on their defaults. +We find that working in units of pixels results in poorly +conditioned solutions, leading to ˆz not satisfying the +constraints to high accuracy even in cases where the +problem is known to be tight. To avoid this issue we use +the change of variables xi → +1 +W xi and adjust the intrinsics +accordingly. Since the scaling is the same for each point +the optimal solution remains unchanged, but we get much +closer to rank one solutions in practice due to the improved +numerical stability. +5.1. Simulated experiments +We simulate triangulation problems as initially proposed +in [18] by placing n cameras on a sphere of radius 2 and +sample a point to be triangulated from the unit cube, see +Fig. 2 for some examples. The same setup was also used for +experiments in [1, 4]. For the reprojection model we simu- +late a pinhole camera with dimensions with width W = +2108 and height H = 1162, focal length f = 1012.0027 +and principal point p = (1054, 581). We simulate noisy +observations by adding Gaussian noise with standard devi- +ation σ to the ground truth image coordinates. When gener- +ating an outlier we select a view at random and replace the +measurement with a random point in the image. +We run the experiment for each method at various dif- +ferent noise levels and number of outliers. For each noise +level we run Eq. (RT) 375 times and Eq. (RTF) 60 times +for n = 3, 5 and 7 views and in each case add up to n − 2 +outliers. The percentage of tight relaxations and the estima- +tion error can be seen in Fig. 3. We also run Eq. (RT) 30 +times each for n = 25 and 30 with 0, 10, 20 and 25 out- +liers, the results of which can be seen in Fig. 4. We don’t +run Eq. (RTF) for these cases since we run into memory +limitations with MOSEK. +From Fig. 3 we can see that in general the fractional +relaxation in Eq. (15) is significantly more stable than the +epipolar relaxation Eq. (RT). In fact, across all experiments +the fractional relaxation is tight in 99.8% of cases. How- +ever, we can also note that the epipolar relaxation remains +viable for lower noise levels, for instance in the case with +n = 7 the relaxations perform similarly in terms of aver- +age estimation error up until σ ≈ 60px and 3 outliers, after +which the percentage of tight relaxations drop drastically. +As can be seen from the average solver timings in Fig. 5 +0 +1 +2 +3 +4 +5 +number of outliers +75 +80 +85 +90 +95 +100 +% tight relaxations +3, 5, 7 views +3 views +5 views +7 views +25 views +30 views +robust epipolar (RT) +robust fractional (RTF) +0 +5 +10 +15 +20 +25 +number of outliers +0 +20 +40 +60 +80 +100 +% tight relaxations +25, 30 views +0 +1 +2 +3 +4 +5 +number of outliers +10 +2 +10 +1 +100 +101 +error (m) +3, 5, 7 views +0 +5 +10 +15 +20 +25 +number of outliers +10 +2 +10 +1 +100 +101 +error (m) +25, 30 views +Figure 6. Average number of tight relaxation (top) and estimation +error(bottom) for Eq. (RT) and Eq. (RTF) on the Reichstag dataset +as descriped in section Sec. 5.2. +the fractional relaxations is also over one order of magni- +tude slower than the epipolar relaxation, meaning that it +might be preferable to use Eq. (RT) in cases where the qual- +ity of observations is known to be high. +5.2. Reichstag dataset +We also validate our relaxations on the Reichstag dataset +from [11]. The dataset consits of 75 views of roughly 18k +3D points. We use the ground truth correspondences es- +timated by structure from motion as detailed in [11] and +generate each triangulation problem by selecting n views +which all observe a common point. We then add up to n−2 +outliers by replacing the ground truth observations with a +randomly selected keypoints in the same image. See Fig. 1 +for an example point with n = 22 views and 19 outliers. +For n = 3, 5 and 7 views we run Eq. (RT) 375 times and +Eq. (RTF) 60 times for each possible number of outliers. +And similarly we run Eq. (RT) 120 times for n = 25 and 30 +views. The results are summarized in Fig. 6. +Similarly to the simulated experiments we can note that +the percentage of tight relaxations (and mean error) de- +creases (and increases) steadily as more outliers are added, +with a sharp drop when the number of inliers gets close +to 2, with the fractional method outperforming the epipo- +lar method. +6. Conclusion +We proposed a global optimization framework robust +multiview triangulation. To this end we derive semidefi- +nite relaxations for triangulation losses that incorporate a +truncated quadratic cost making them robust to both noise +and outliers. On synthetic and real data we confirm that +provably optimal triangulations can be computed and relax- +ations remain empirically tight despite significant amounts +of noise and outliers. +References +[1] Chris Aholt, Sameer Agarwal, and Rekha Thomas. A qcqp +approach to triangulation. In European Conference on Com- +8 + +puter Vision, pages 654–667. Springer, 2012. 1, 2, 3, 8 +[2] Pasquale Antonante, Vasileios Tzoumas, Heng Yang, and +Luca Carlone. Outlier-robust estimation: Hardness, mini- +mally tuned algorithms, and applications. IEEE Transactions +on Robotics, 38(1):281–301, 2022. 2, 5 +[3] MOSEK ApS. The MOSEK optimization toolbox for Python +manual. Version 10.0., 2022. 3, 7 +[4] Diego Cifuentes. A convex relaxation to compute the nearest +structured rank deficient matrix. SIAM Journal on Matrix +Analysis and Applications, 42(2):708–729, 2021. 1, 2, 4, 5, +8, 10 +[5] Diego Cifuentes, Sameer Agarwal, Pablo A Parrilo, and +Rekha R Thomas. On the local stability of semidefinite relax- +ations. Mathematical Programming, 193(2):629–663, 2022. +3, 10 +[6] Sumanth Dathathri, +Krishnamurthy Dvijotham, +Alexey +Kurakin, Aditi Raghunathan, Jonathan Uesato, Rudy R +Bunel, Shreya Shankar, Jacob Steinhardt, Ian Goodfellow, +Percy S Liang, et al. Enabling certification of verification- +agnostic networks via memory-efficient semidefinite pro- +gramming. Advances in Neural Information Processing Sys- +tems, 33:5318–5331, 2020. 2 +[7] Amit Dekel, Linus Harenstam-Nielsen, and Sergio Cac- +camo. Optimal least-squares solution to the hand-eye cali- +bration problem. In Proceedings of the IEEE/CVF Confer- +ence on Computer Vision and Pattern Recognition (CVPR), +June 2020. 2 +[8] Steven Diamond and Stephen Boyd. CVXPY: A Python- +embedded modeling language for convex optimization. Jour- +nal of Machine Learning Research, 17(83):1–5, 2016. 7 +[9] Richard I. Hartley and Peter Sturm. Triangulation. Computer +Vision and Image Understanding, 68(2):146–157, 1997. 2, +3, 7 +[10] Christoph Helmberg and Franz Rendl. +A spectral bundle +method for semidefinite programming. SIAM Journal on Op- +timization, 10(3):673–696, 2000. 2 +[11] Yuhe Jin, Dmytro Mishkin, Anastasiia Mishchuk, Jiri Matas, +Pascal Fua, Kwang Moo Yi, and Eduard Trulls. +Image +Matching across Wide Baselines: From Paper to Practice. +International Journal of Computer Vision, 2020. 8 +[12] Fredrik Kahl and Didier Henrion. Globally optimal estimates +for geometric reconstruction problems. International Jour- +nal of Computer Vision, 74(1):3–15, 2007. 2 +[13] Lai Kang, Lingda Wu, and Yee-Hong Yang. Robust multi- +view L2 triangulation via optimal inlier selection and 3D +structure refinement. Pattern Recognition, 47(9):2974–2992, +September 2014. 2 +[14] Jens Keuchel, Christoph Schnorr, Christian Schellewald, and +Daniel Cremers. +Binary partitioning, perceptual group- +ing, and restoration with semidefinite programming. IEEE +Transactions on Pattern Analysis and Machine Intelligence, +25(11):1364–1379, 2003. 2 +[15] Seong Hun Lee and Javier Civera. Closed-form optimal two- +view triangulation based on angular errors. +pages 2681– +2689, 10 2019. 2 +[16] Seong Hun Lee and Javier Civera. Robust uncertainty-aware +multiview triangulation. CoRR, abs/2008.01258, 2020. 2 +[17] Brendan O’Donoghue, Eric Chu, Neal Parikh, and Stephen +Boyd. Conic optimization via operator splitting and homoge- +neous self-dual embedding. Journal of Optimization Theory +and Applications, 169(3):1042–1068, June 2016. 3 +[18] Carl Olsson, Fredrik Kahl, and Richard Hartley. +Projec- +tive least-squares: Global solutions with local optimization. +In 2009 IEEE Conference on Computer Vision and Pattern +Recognition, pages 1216–1223, 2009. 8 +[19] Liangzu Peng, Mahyar Fazlyab, and Ren´e Vidal. Semidefi- +nite relaxations of truncated least-squares in robust rotation +search: Tight or not. In European Conference on Computer +Vision, pages 673–691. Springer, 2022. 2 +[20] Johannes L. Sch¨onberger and Jan-Michael Frahm. Structure- +from-motion revisited. +In Proceedings of the IEEE/CVF +Conference on Computer Vision and Pattern Recognition +(CVPR), pages 4104–4113, 2016. 2 +[21] C. F. Van Loan and N. Pitsianis. Approximation with Kro- +necker Products, pages 293–314. Springer Netherlands, Dor- +drecht, 1993. 7 +[22] Emmett Wise, Matthew Giamou, Soroush Khoubyarian, Ab- +hinav Grover, and Jonathan Kelly. +Certifiably optimal +monocular hand-eye calibration. In 2020 IEEE International +Conference on Multisensor Fusion and Integration for Intel- +ligent Systems (MFI), pages 271–278. IEEE, 2020. 2 +[23] Thomas Wodtko, Markus Horn, Michael Buchholz, and +Klaus Dietmayer. Globally optimal multi-scale monocular +hand-eye calibration using dual quaternions. In 2021 Inter- +national Conference on 3D Vision (3DV), pages 249–257. +IEEE, 2021. 2 +[24] Heng Yang and Luca Carlone. A quaternion-based certifi- +ably optimal solution to the wahba problem with outliers. In +Proceedings of the IEEE/CVF International Conference on +Computer Vision (ICCV), October 2019. 2 +[25] Heng Yang and Luca Carlone. In perfect shape: Certifiably +optimal 3d shape reconstruction from 2d landmarks. In Pro- +ceedings of the IEEE/CVF Conference on Computer Vision +and Pattern Recognition (CVPR), June 2020. 2 +[26] Heng Yang and Luca Carlone. Certifiably optimal outlier- +robust geometric perception: Semidefinite relaxations and +scalable global optimization. IEEE Transactions on Pattern +Analysis and Machine Intelligence, 2022. 2, 5 +[27] Heng Yang, Jingnan Shi, and Luca Carlone. Teaser: Fast +and certifiable point cloud registration. IEEE Transactions +on Robotics, 37(2):314–333, 2020. 2 +[28] Ji Zhao. An efficient solution to non-minimal case essential +matrix estimation. IEEE Transactions on Pattern Analysis +and Machine Intelligence, 2020. 2 +9 + +A. Local stability of fractional method +In this section we will prove two of the criteria required +for local stability for the robust fractional method Eq. (RTF) +for noise-free and outlier-free measurements. Local stabil- +ity for the non-robust case was shown already in [4] but we +will provide an alternate proof here in our notation, since +it will lead into the extension to the robust case. For this +we will need the stronger version of Fact 2, which we will +restate here loosely (see [5] Theorem 4.5 for more details). +Using the definition A(ξ) = �k +i=1 ξiAi: +Fact 3. If we, in addition to the conditions in Fact 1, have +that: +(i) (ACQ) ACQ holds +(ii) (smoothness) The the constraint set is smooth with re- +spect to pertubations to the constraints +(iii) (non-branch point) The nullspace of the multiplier ma- +trix and the tangent space of the constrant-set at the +optimum don’t intersect nontrivially: ker( ˆS) ∩ Tˆz = +{0} +(iv) (restricted slater) There exists ξ′, λ′ such that A(ξ′)− +λ′E is positive definite on the subspace of vectors z⊥ +for which ˆSz⊥ = 0 and ˆzT z⊥ ̸= 0. In other words the +part of the nullspace of ˆS which is orthogonal to the +solution ˆz. +The +tangent +space +in +(iii) +is +given +by +Tˆz += +ker(ˆzT A1; . . . ; ˆzT Ak; ˆzT E). +A.1. Non-robust version +We will show (iii-iv) for a version of Eq. (TF) with some- +what less constraints, noting that if we show (iii-iv) for the +problem with less constraints we can then add in the remain- +ing constraints back in and set the corresponding multipliers +to zero to show that (iii-iv) holds for the original problem as +well. Note however that since we don’t show (i-ii) the full +proof is incomplete and is left for future work. +Theorem 2. Assuming (i-ii) holds, the fractional relax- +ation Eq. (TF) is tight and locally stable for noise-free and +outlier-free measurements ˜xi, i = 1, . . . , n. +Proof. We start by partitioning the Lagrange multipliers as +ξ = (ϕ; α). Where ϕ = (ϕ1; . . . ; ϕ2n), and each ϕi ∈ R4 +contains the multipliers corresponding to ith reprojection +constraint multiplied by the entries of ¯X (recall that there +are two reprojection constraints per observation). Note that +in the original formulation we also multiply by all the en- +tries of x ⊗ ¯X as well, but as we will see these are not +necessary for the proof to hold. And α corresponds to the +kronecker product constraints. +Since the observations ˜x are noise free we can denote the +corresponding unique3 3D point in homogeneous coordi- +nates as ˆX ∈ R4, normalized such that ∥ ˆX∥ = 1. It will be +convenient to introduce the reparametrization u = ˜x which +is the same as the observation vector, except partitioned +such that u = (u1; . . . ; u2n), ui ∈ R, i.e. u2i+k = ˜xik +for i = 1, . . . , n, k = 1, 2. The primal optimum is then ob- +tained at ˆz = ¯u ⊗ ˆX, which is verified by setting ˆξ = ˆλ = 0 +to get ˆSˆz = (M˜x ⊗ I4)(¯u ⊗ ˆX) = (M˜x¯u) ⊗ ˆX = 0. +We then note that, due to the properties of the kronecker +product4 and that M˜x is positive semidefintie with corank 1, +we have that M˜x ⊗ I4 is positive semidefinite with corank +4. So the conditions of Fact 1 are satisfied. +Since the nullspace ker( ˆS) is 4-dimensional and contains +the four orthogonal vectors ˆz = ¯u ⊗ ˆX and ˆzl = ¯u ⊗ ˆXl +where ˆXT ˆXl = 0, ˆXT +l ˆXk = 0 for k ̸= l = 1, 2, 3 we can +parametrize z⊥ from (iv) as z⊥ = ¯u⊗ ˆX⊥ where ˆXT +⊥ ˆX = 0. +For (iii) we need to show that the vectors that span +ker( ˆS) are not in Tˆz, i.e. for any z ∈ ker( ˆS) either that +ˆzT Aiz ̸= 0 for some constraint i, or that ˆzT Ez ̸= 0. This is +the case since ˆzT Eˆz = 1 ̸= 0 and, letting Kijst be the kro- +necker constraint matrix corresponding to index st of block +ij, ˆzT Kijstzl = uiuj( ˆXs ˆXlt − ˆXt ˆXls) is nonzero for at +least some index ijst unless u = 0 or ˆX and ˆXl are paral- +lel, which is not the case by construction. +To show (iv), we set α′ = λ′ = 0 and ϕ′ +i = uibi − ai, +and verify that with z⊥ as above: +zT +⊥A(ϕ′, 0)z⊥ = +2n +� +i=1 +ˆXT +⊥ϕ′ +i(uibi − ai) ˆX⊥ += +2n +� +i=1 +((uibi − ai)T ˆX⊥)2 > 0 +(16) +where the final strict inequality follows from the fact that +each term is strictly positive as (uibi − ai)T ˆX = 0 by the +original constraints and ˆX⊥ is orthogonal to ˆX. +We note that, while not all constraints used in Eq. (TF) +are required for (iii-iv) to hold, we have found some cases +where adding the additional constraints results in a tighter +relaxation in the presence of noise, so we used the full set +of constraints in our experiments. +A.2. Robust version +We now move on to the robust fractional method +Theorem 3. Assuming (i-ii) holds, the fractional relax- +ation Eq. (RTF) is tight and locally stable for noise-free and +outlier-free measurements ˜xi, i = 1, . . . , n. +3assuming the observations are not degenerate, e.g. not all on a line. +4For matrices A ∈ Sn, B ∈ Sm with eigenvalues αi, βj the eigen- +values of the kronecker product A ⊗ B are given by the products of the +eigenvalues αiβj for i = 1, . . . , n, j = 1, . . . , m. +10 + +Proof. Partition +the +Lagrange +multipliers +as +ξ += +(ϕ; µ; η; α), where as in Theorem 2 ϕ corresponds to the +reprojection constraints and α corresponds to the kronecker +constraints. We let µ ∈ R32n correspond to the constraints +¯Xs ¯Xt(yikθi − yik) = 0 for s, t = 1, 2, 3, 4, k = 1, 2 +and i = 1, . . . , n. +And finally we similarly have that +η ∈ R16n = (η1; . . . ; ηn), ηi ∈ R16 corresponds to the +constraints ¯Xs ¯Xt(θ2 +i − θi) = 0. For each view i we collect +the corresponding subset of η into a 4×4 matrix Hi defined +such that ¯XT Hi ¯X = �4 +s,t=1 ηist ¯Xs ¯Xt. +To verify the global optimum we start by setting ˆz = +¯uθ ⊗ ˆX where uθ = (˜x; 1n). We then note that the con- +straint matrices for for the ηi-constraints can be written as a +kronecker product to get: +S(0, 0, η, 0) = M c +˜x ⊗ I4 + +n +� +i=1 +Ti ⊗ Hi +(17) +where each Ti ∈ S3n+1 is defined such that ¯yT +θ Ti¯yθ = θ2 +i − +θi for arbitrary yθ as in Sec. 4.4. We then set ˆη such that +ˆHi = ciI4 and ˆϕ = ˆµ = ˆα = ˆλ = 0 to get: +ˆS = S(0, 0, ˆη, 0) = (M c +˜x + +n +� +i=1 +ciTi) ⊗ I4. +(18) +Now, by the same argument as in Theorem 1 the matrix +M c +˜x + �n +i=1 ciTi is positive semidefinite with corank 1, so +ˆS is positive semidefinite with corank 4. Meaning that the +conditions of Fact 1 are satisfied. (iii) also follows using +the same argument based on the kronecker constraints as in +Theorem 2. +Finally, for (iv) we note that ker( ˆS) is spanned by ˆz and +ˆzl = ¯uθ ⊗ ˆXl, l = 1, 2, 3, so by setting µ′ = η′ = α′ = +λ′ = 0 and ϕ′ +i = uibi − ai restricted slater for ˆS follows in +the same way as in Eq. (16). +11 + diff --git a/FNFJT4oBgHgl3EQfCyyx/content/tmp_files/load_file.txt b/FNFJT4oBgHgl3EQfCyyx/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..fb98c00410b679b5c3f04309e7cefc8e4de7bc6f --- /dev/null +++ b/FNFJT4oBgHgl3EQfCyyx/content/tmp_files/load_file.txt @@ -0,0 +1,804 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf,len=803 +page_content='Semidefinite Relaxations for Robust Multiview Triangulation Linus H¨arenstam-Nielsen1, Niclas Zeller2, Daniel Cremers1 1Technical University of Munich, 2Karlsruhe University of Applied Sciences linus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='nielsen@tum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='de, niclas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='zeller@h-ka.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='de, cremers@tum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='de Abstract We propose the first convex relaxation for multiview tri- angulation that is robust to both noise and outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' To this end, we extend existing semidefinite relaxation approaches to loss functions that include a truncated least squares cost to account for outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We propose two formulations, one based on epipolar constraints and one based on the frac- tional reprojection equations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The first is lower dimensional and remains tight under moderate noise and outlier levels, while the second is higher dimensional and therefore slower but remains tight even under extreme noise and outlier lev- els.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We demonstrate through extensive experiments that the proposed approach allows us to compute provably optimal reconstructions and that empirically the relaxations remain tight even under significant noise and a large percentage of outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Introduction Triangulation refers to the problem of recovering the 3D location of a set of points from their observed 2D locations in two or more images under known camera transforma- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Since the 2D projections are typically noisy (due to lens distortions or inaccurate feature point localization), the optimal solution is often phrased as a non-convex opti- mization problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' While solutions are mostly computed us- ing faster but sub-optimal local optimization methods, there have been some efforts to compute globally optimal triangu- lations [1, 4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' While these works show that one can obtain globally optimal solutions for triangulation problems with noisy input, their practical value remains limited as they are not well adapted to the challenges of real-world data where even a single outlier can deteriorate the result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Despite their often slower runtime, globally optimal methods offer several advantages: Firstly, in safety-critical systems it may be required to complement the computed so- lution with some guarantee that it really is the best solution or at least within a bound of the optimal solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Secondly, in many offline applications runtime is actually not critical and then one may want to trade off better accuracy for extra (a) 22 views, no outliers (b) 22 views, 19 outliers Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Example of a triangulated point from the Reichstag dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Blue point: ground truth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Red point: non-robust global optimum found by the relaxation found by [1] (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (T)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Green point: robust global optimum found by our proposed relaxation in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' runtime.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Thirdly, globally optimal solutions of real-world problems can serve as ground truth for assessing the perfor- mance of local optimization methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In this work, we revisit the problem of computing prov- ably optimal triangulations in the presense of outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' To this end, we develop two possible convex relaxations for the truncated least squares cost function so as to combine the robustness with the capacity to compute globally opti- mal solutions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Our main contributions can be summarized as follows: We extend the convex triangulation methods from [1] and [4] with a truncated least squares cost function and derive the corresponding convex relaxations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We show that the relaxations are always tight in the noise-free and outlier-free case by explicitly construct- ing the globally optimal Lagrange multipliers (which 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='11431v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='CV] 26 Jan 2023 furthermore satisfy the corank 1, restricted slater and non-branch point criteria required for local stability with respect to noise).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We validate empirically that both relaxations remain tight even under large amounts of noise and high out- lier ratios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' To the best of our knowledge, this is the first example of a successful semidefinite relaxation of a robust estimation problem with reprojection errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Related work Triangulation is a core subroutine for structure from mo- tion and therefore has been studied extensively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For two views there are many solution variants, including comput- ing the roots of a degree 6 polynomial [9] for the repro- jection error, and [15] for the angular error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Multiview tri- angulation is typically performed using non-optimal meth- ods such as local search or the linear method from [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Robust triangulation is typically tackled using RANSAC [13, 16, 20] where a 2-view solver is used repeatedly for randomly sampled pairs of views until an inlier set can be established.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Semidefinite relaxations have been used to obtain certi- fiably optimal algorithms for many computer vision prob- lems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Examples include semidefinite relaxations for parti- tioning, grouping and restoration [14], for minimizing re- projection error [12], for multiview triangulation [1, 4], for essential matrix estimation [28], for hand-eye calibration [7, 22, 23], for robust point cloud registration [24, 26, 27], and for 3D shape from 2D landmarks [25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The work [26] also considers outlier-robust estimation applied to rota- tion averaging, mesh registration, absolute pose registration and category-level object pose+shape estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Solving semidefinite relaxations is typically slow and memory in- tensive, stemming from the fact that the number of vari- ables is the square of the number of variables in the orig- inal problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' However there has been recent interest in developing solvers that can scale to larger problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In- cluding [6] which uses a reformulation in terms of eigen- value optimization based on [10] which can take advantage of GPUs, and [26] which uses efficient non-global solvers for speeding up the convergence of the global solver.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In a limited number of cases, semidefinite relaxations can be shown to always solve the original problem when excluding degenerate configurations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Including the dual quaternion formulation of hand-eye calibration [7] and 2- view triangulation using epipolar constraints [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In both cases the problem has two quadratic constraints one of which equals zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Another case is the rotation alignment problem which has a closed form solution in terms of an eigenvalue decomposition (quaternion formulation) or sin- gular value decomposition (rotation matrix formulation).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Outlier-robust estimation is inapproximable in general [2], so one typically has to rely on empirical experiments to validate how stable the relaxation is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Though it is some times possible to find sets of special cases where the relax- ation can be shown to be always tight (or non-tight), as in the recent work [19] for robust rotation alignment of point clouds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Notation and preliminaries For t, s ∈ R3 we write [t]× for the 3×3 skew-symmetric matrix such that t × s = [t]×s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Sk is the set of k × k real symmetric matrices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' b) denotes the vertical con- catenation of vectors a and b and for a collection of vec- tors a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , an the subscript-free version denotes the cor- responding stacked vector a = (a1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' an).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We use a bar to denote the homogeneous version of a vector, that is ¯a := (a;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' When dimensionality is understood we define ei to be the ith unit vector and Ei = eieT i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For a vector of monomials m = (m1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' md) we define em mi as the unit vector whose only non-zero entry corresponds to the index of mi in m, meaning em mi = ei ∈ Rd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For a vector x ∈ Rk we define: Mx := � I −x −xT ∥x∥2 � ∈ Sk+1 (1) such that for y ∈ Rk we have ¯yT Mx¯y = ∥x − y∥2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The operator ⊗ denotes the Kronecker product, and ⊕ denotes the tensor sum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For example, for 2 × 2 matrices A and B: A ⊕ B = �A 0 0 B � , A ⊗ B = �a11B a12B a21B a22B � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (2) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Semidefinite relaxations As a general strategy, we aim to solve the triangu- lation problem by relaxing a Quadratically Constrained Quadratic Program, which has the following form: min z∈Rd zT Mz s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' zT Ez = 1 zT Aiz = 0, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (3) This is a very general formulation with applications in com- puter vision but it is NP-hard to solve in most cases, so an imperfect method is typically necessary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' One such strat- egy is to lift the problem from Rd to Sd by introducing a new variable Z = zzT and using the fact that zT Mz = tr(MzzT ) = tr(MZ) to arrive at: min Z∈Sd tr(MZ) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' tr(EZ) = 1 tr(AiZ) = 0, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , k Z ≽ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (4) 2 Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (4) is a relaxation of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (3) since if z satisfies the con- straints of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (3) we always have that Z = zzT satisfies the constraints of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (4) with the same objective value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' However, the converse is unfortunately not always true.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In particular, if ˆZ is optimal for Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (4) we can obtain a cor- responding solution ˆz for Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (3) with the same objective value if and only if ˆZ is rank one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In this case we have ˆZ = ˆzˆzT and we then say that the relaxation is tight.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The main advantage of working with the relaxation Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (4) as opposed to the original problem Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (3) is that the relaxation is a convex optimization problem, in particular a semidefinite program, for which a variety of polynomial- time solvers are available, including [3, 17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We can verify whether a potential solution to Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (3) is optimal by comput- ing the corresponding Lagrange multipliers, as summarized in the following fact: Fact 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' If ˆz ∈ Rd satisfies the constraints of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (3) (primal feasibility) and there are Lagrange multipliers ˆλ ∈ R, ˆξ ∈ Rk and a corresponding multiplier matrix S(ˆλ, ˆξ) = M + �k i=1 ˆξiAi − ˆλE satisfying: i) Dual feasibility: S(ˆλ, ˆξ) ≽ 0 ii) Complementarity: S(ˆλ, ˆξ)ˆz = 0 then the relaxation Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (4) is tight and ˆz is optimal for Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' If the relaxation is not tight we can at best expect an optimal ˆZ to generate an approximation of the optimal ˆz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Therefore, a key metric to consider when applying a re- laxation is the percentage of encountered problem cases in which it remains tight.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Fortunately, [5] shows that the relaxation is well behaved for problems that are close in parameter-space to solutions where the multiplier matrix has corank 11, which we will show later occurs in the noise- free case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We restate the main result in loose terms here: Fact 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' If we, in addition to the conditions in Fact 1, have that S(λ, µ) is corank 1 and ACQ (which is a smoothness condition, see [5] Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1) holds, then the relaxation Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (4) is locally stable, meaning that it will remain tight also for perturbed objective functions M + ε ˜ M for small enough ε.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The practical usefulness of Fact 2 comes from the con- sideration that it’s often possible to show that the relaxation is tight and the stability conditions hold for noise-free mea- surements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' This means that there is some surrounding re- gion of noisy measurements for which the relaxation is tight as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' There is also a version of Fact 2 which covers per- turbations to the constraints, however, we will not make use of it here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1corank(A) = n - rank(A) for an n × n matrix A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Relaxations for multiview triangulation Given n views of a point X from cameras located at Pi = (Ri, ti) ∈ SE(3) with intrinsic matrices Ki ∈ R3×3, and with, possibly noisy, observations denoted as ˜xi ∈ R2, the n-view triangulation problem with reprojection error is de- fined as: min X∈R3 n � i=1 ∥˜xi − π(Ki, Pi, X)∥2 (5) where π(Ki, Pi, X) is the reprojection of the point X ∈ R3 to camera i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' This is a nonconvex problem but it is not yet in QCQP form as in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (3) since π(Ki, Pi, X) is not quadratic in X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In the next section we will recap two ways in which it can be converted to a QCQP, from which we can generate the corresponding semidefinite relaxations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Triangulation with epipolar constraints As described in [1] we can reformulate Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (5) as a poly- nomial optmization problem of degree 2 by reparametrizing X in terms of it’s n reprojections xi subject to the epipolar constraints: min xi∈R2 n � i=1 ∥xi − ˜xi∥2 s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ¯xT i Fij ¯xj = 0 i, j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n i ̸= j (6) where Fij = K−T i [tij]×RijK−1 j is the fundamental matrix corresponding to the relative transformation between poses i and j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Since the estimated reprojections xi all satisfy the epipolar constraints, the solution of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (5) can be recovered exactly from Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (6) using the linear method from [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Using the parametrization z = (x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1) = ¯x the semidefi- nite relaxation of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (6) is: min Z∈S2n+1 tr(M˜xZ) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' tr(En+1Z) = 1 tr( ¯FijZ) = 0, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , k Z ≽ 0 (T) where ¯Fij ∈ S2n+1 is defined such that ¯xT ¯Fij ¯x = ¯xT i Fij ¯xj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' It was shown already in both [1] and [5] that Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (T) is a locally stable relaxation for noise-free measure- ments, whenever the views are not co-planar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In particu- lar, since noise-free observations ˜x by definition satisfy the constraints of the original problem Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (T), the solution is obtained by setting z = (˜x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1), and since M˜x is positive semidefinite and corank 1, the conditions of Fact 1 are sat- isfied by setting all Lagrange multipliers to zero, such that ˆS = M˜x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 3 (a) 3 views (b) 5 views (c) 7 views (d) 3 views, 1 outlier (e) 5 views, 2 outliers (f) 7 views, 4 outliers Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Examples of simulated triangulation problems from Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1 with σ = 50px for various number of views and outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Blue point: ground truth, Red point: non-robust global optimum found by the relaxation found by [4] (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (TF)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Green point: robust global optimum found by our proposed relaxation in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RTF).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' With no outliers the robust and non-robust methods give the same result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Triangulation with fraction constraints As an alternative to Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (6) we can also solve Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (5) by explicitly parametrizing the 3D point X in homogeneous coordinates and multiplying out the fractional equations: min ¯ X∈R4,xi∈R2 n � i=1 ∥xi − ˜xi∥2 s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ¯XT ¯X = 1 xk i bT i ¯X − aT ik ¯X = 0 i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n k = 1, 2 (7) Where ai1, ai2 and bi are given by the rows of the corre- sponding camera matrix Ki � RT i −RT i ti � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' A naive ap- proach to relaxing Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (7) would be to use the parametriza- tion z = (x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ¯X), but unfortunately, as shown in [4], this leads to a problem whose optimal value is always zero.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' To circumvent this issue, [4] proposes parametrizing the prob- lem in terms of all possible products between the elements of x and X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' They also show through experiments that while the resulting relaxation has more parameters and constraints than Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (T), it is also tight in a significantly wider range of cases, leading to a tradeoff between reliability and compu- tation time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 4 We will use a similar relaxation, though we will skip the initial change of variables to get a slightly different but equivalent formulation which can be extended to the robust case more conveniently.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We start by setting z = (x ⊗ ¯X;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ¯X) = ¯x ⊗ ¯X and then we multiply each repro- jection constraint in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (7) with zj to get 8n + 4 quadratic constraints: (xk i bT i ¯X − aT ik ¯X)zj = zT (e¯x xk i ⊗ bi − e¯x 1 ⊗ aik)eT j z = 0 (8) Note that in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (8) we have made use of the unit vector no- tation from Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 3, meaning in particular e¯x xk i = e2i+k and e¯x 1 = e2n+1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We also need to indoduce constraints to pre- serve the fact that z comes from a (2n + 1) × 4 kronecker product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' When Z = zzT is rank one, it turns out that this condition is equivalent to Z being composed of 2n+1 sym- metric 4×4 blocks, see [4] for more details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We will denote this constraint as Z ∈ kron(2n + 1, 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The relaxation of can now be written as2: min Z∈S8n+4 + tr(Z(M˜x ⊗ I4)) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' tr(Z(08n×8n ⊕ I4)) = 1 Z ∈ kron(2n + 1, 4) tr(Z(e¯x xk i ⊗ bi − e¯x 1 ⊗ aik)eT j ) = 0 i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' n, k = 1, 2 j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , 8n + 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (TF) We have now introduced two relaxations for the multiview triangulation problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In the next two sections we will ex- tend each to the robust case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Robust triangulation with epipolar constraints Now that we have introduced the two main relaxations of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (6) we move to the the main contribution of this pa- per, which is to introduce the corresponding truncated least squares (TLS) extensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Similarly to [26] we will use the fact that the TLS cost function can be written as a minimiza- tion problem by introducing a binary decision variable for each residual ρi(r2 i ) = min(r2 i , ci) = min θi∈{0,1} θir2 i + (1 − θi)ci (9) where ci > 0 is the square of the inlier threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Meaning that the TLS extension of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (6) can be written as: min xi∈R2,θi∈R n � i=1 � θi∥xi − ˜xi∥2 + (1 − θi)ci � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ¯xT i Fij ¯xj = 0, θ2 i − θi = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' i, j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n i ̸= j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (10) 2The cost functions in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (7) and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (TF) are equivalent, since ( ¯ X ⊗ ¯x)T (M˜x ⊗ I4)( ¯ X ⊗ ¯x) = (¯xT M˜x¯x) ¯ XT ¯ X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' However this cost function is a 3rd degree polynomial in the variables as it contains terms like θi∥xi∥2, so we can’t apply the relaxation directly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' But we can obtain a 2nd order formulation by noting that θ2 i = θi implies θi∥xi − ˜xi∥2 = ∥θixi − θi˜xi∥2 and making the substitution yi = θixi: min yi∈R2,θi∈R n � i=1 � ∥yi − θi˜xi∥2 + (1 − θi)ci � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (yi;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' θi)T Fij(yj;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' θj) = 0 θ2 i − θi = 0 θiyi = yi i, j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n, i ̸= j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (11) The last set of constraints θiyi = yi is redundant but we’ve found that it is necessary for the relaxation to remain tight in the presence of noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We can recover the solution to Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (10) from Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (11) by triangulating the estimated inliers and setting each xi to be the reprojection of the resulting point onto view i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Using the parametrization z = (y;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' θ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1) the semidefinite relaxation of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (11) is: min Z∈S3n+1 + tr(M c ˜xZ) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' tr( ¯FijZ) = 0 Zθi,θi − Z1,θi = 0 Zθi,yi − Z1,yi = 0 tr(E3n+1Z) = 1 i, j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n i ̸= j (RT) where M c ˜x is the robust extension of M˜x, defined as: M c ˜x = � � I −B(˜x) 0 −B(˜x)T diag(∥˜xi∥2) −c 0 −cT �n i=0 ci � � , B(˜x) = � � � � � ˜x1 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 0 0 ˜x2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 0 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 0 0 0 0 ˜xn � � � � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (12) and Zmi,mj is the entry of Z corresponding to the index of the monomials mi and mj in z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' As shown in [2] solving Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (11) in the presence of outliers is NP hard even in the noise-free case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' However, in the noise-free and outlier-free case we can show that the relaxation is tight with a corank 1 multiplier matrix, meaning that the relaxation is also locally stable with respect to noise, assuming ACQ holds: Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Assuming ACQ holds, the relaxation Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT) is tight locally stable for noise-free and outlier-free mea- surements ˜xi, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 5 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Partiton the lagrange multipliers as ξ = (ϕ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' µ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' η), where ϕij ∈ R µi ∈ R2 and η ∈ R corresponds to the constraints (yi;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' θi)T Fij(yj;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' θj) = 0, θiyi = yi and θ2 i = θi respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Then we have: S(λ, ϕ, µ, η) = F(ϕ) + � � I −B(˜xi − µi) −µ ∗ diag(∥˜xi∥2 + 2ηi) − 1 2c − η ∗ ∗ �n i=1 ci − λ � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (13) Where F(ϕ) = � ij ϕij ¯Fij.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Now let ˆλ = ˆϕij = ˆµi = 0 and ˆηi = 1 2ci to get: ˆS = S(ˆλ, ˆϕ, ˆµ, ˆη) = S(0, 0, 0, 1 2c) = � � I −B(˜xi) 0 ∗ diag(∥˜xi∥2 + ci) −c ∗ ∗ �n i=1 ci � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (14) This way, with ˆz = (˜x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1n;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1) we have ˆSˆz = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' And fur- thermore, for arbitrary x, θ, α: (x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' θ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' α)T ˆS(x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' θ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' α) = = n � i=0 � ∥xi∥2 − 2θi˜xi + θ2 i (∥˜xi∥2+ci) − 2ciθiα + ciα2 � = n � i=0 � ∥xi − θi˜xi∥2 + ci(α − θi)2 � ≥ 0 so ˆS is positive semidefinite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' So the relaxation is tight by Fact 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' And since the only nonzero solution to (x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' θ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' α)T ˆS(x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' θ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' α) = 0 up to scale is (x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' θ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' α) = ˆz we have that ˆS is corank 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' So, assuming ACQ holds, the re- laxation is locally stable by Fact 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In the following section we will furthermore introduce a higher order relaxation which can handle higher noise and outlier levels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Robust triangulation with fraction constraints Since the fractional constraints in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (TF) are more sta- ble with respect to noise than the epipolar constraints in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (T), we might also expect that extending Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (TF) to handle outliers will result in a relaxation which is more sta- ble than Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In this section we will show how the robust extension can be formulated, and as we will see in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 5 it is indeed significantly more stable with respect to both noise and outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In order to extend Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (TF) to handle outliers we will proceed in a similar manner as in the case with epipolar constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Starting by writing the cost function in terms of Problem Relaxation Robust Constraints Variables Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (6) Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (T) \x17 1 2n2 − 1 2n + 1 2n + 1 Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (11) Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT) \x13 1 2n2 + 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='5n + 1 3n + 1 Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (7) Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (TF) \x17 28n2 + 14n + 1 8n + 4 Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (15) Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RTF) \x13 51n2 + 65n + 1 12n + 4 Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Summary of relaxations for the triangulation problem and its robust extension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' the 2nd order variables yi = θixi: min ¯ X∈R4,xi∈R2 n � i=1 � ∥yi − θi˜xi∥2 + (1 − θi)ci � s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ¯XT ¯X = 1 yk i bT i ¯X − aT ik ¯X = 0 θ2 i − θi = 0 θiyi = yi i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n k = 1, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (15) For convenience we will denote the vertical concatenation of y and θ as (y;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' θ) = yθ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For the relaxation we will then use the parametrization z = (yθ⊗ ¯X;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ¯X) = ¯yθ⊗ ¯X and gen- erate redundant constraints from θ2 i − θi = 0 and θiyi = yi by multiplying each equation by ¯Xs ¯Xt for s, t = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Resulting in the following relaxation: min Z∈S12n+4 + tr(Z((M c ˜x ⊗ I4) ⊕ 04×4)) s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' tr(Z(012n×12n ⊕ I4)) = 1 Z ∈ kron(3n + 1, 4) tr(Z(e¯yθ yk i ⊗ bi − e¯yθ θi ⊗ aik)eT j ) = 0 Z ¯ Xsθi, ¯ Xtθi − Z ¯ Xs, ¯ Xtθi = 0 Z ¯ Xsθi, ¯ Xtyi − Z ¯ Xs, ¯ Xtyi = 0 i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' n, k = 1, 2 s, t = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , 4 j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , 12n + 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RTF) Similarly to the epipolar case we are able to show that the relaxation is tight in the noise and outlier-free case by ex- plicitly constructing the globally optimal Lagrange multi- pliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Using the same approach we are also able to show part of the criteria required for local stability with respect to noise in the outlier-free case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' See Appendix A for details.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' With this we have 4 relaxations for the triangulation problem corresponding to the non-robust and robust case with the epipolar and the fractional parametrization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We summarize the relaxations and their number of variables and constraints in Tab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='noise level ( ) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='% tight relaxations ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='3 views ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 outliers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1 outliers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='2 outliers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='3 outliers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='4 outliers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='5 outliers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='robust epipolar (RT) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='robust fractional (RTF) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='noise level ( ) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='% tight relaxations ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='5 views ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='noise level ( ) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='% tight relaxations ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='7 views ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='noise level ( ) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='101 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='average error ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='3 views ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='noise level ( ) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='101 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='average error ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='5 views ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='noise level ( ) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='101 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='average error ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='7 views ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Average number of tight relaxation (top) and estimation error (bottom) for 3, 5 and 7 views for the robust epipolar relaxation Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT) and the robust fractional relaxation Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RTF).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We generate experiments for various noise levels and number of outliers as described in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Rounding in the non-tight case For non-tight cases the optimal ˆZ will have rank of at least 2, which means we can’t recover the optimal solution ˆz for the original problem Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' However we can still construct an approximate solution through a rounding pro- cedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We start by setting ˆz to be the eigenvector corre- sponding to the minimal eigenvalue, normalized such that ˆzT Eˆz = 1 as in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We then apply a different pro- cedure for each problem depending on the constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (T) we triangulate the resulting ˆxi (which in this case will generally not satisfy the epipolar constraints) using the linear method from [9] after rounding the smallest singu- lar value of the data matrix to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT) we do the same except that we first determine the inlier parameters ˆθi by rounding the corresponding entries of ˆz to 0 or 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (TF) and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RTF) we compute the best-fitting tensor product decomposition of ˆz using a singular value decom- position, as described in [21] and use the same method as in the epipolar case for determining the inlier parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Experiments ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='We ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='implement ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='all ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='relaxations ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='using ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='CVXPY ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='[8] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='with ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='the ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='solver ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='MOSEK ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='[3] ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='using ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='the ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='setting ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='noise level ( ) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='% tight relaxations ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='25 views ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 outliers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='10 outliers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 outliers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='25 outliers ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='noise level ( ) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='% tight relaxations ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='30 views ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='noise level ( ) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='average error ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='25 views ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='60 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='80 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='noise level ( ) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='average error ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='30 views ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Average number of tight relaxation (top) and estimation error(bottom) for 25 and 30 views using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' MSK DPAR INTPNT CO TOL REL GAP = 10−14 for the simulated experiments and 10−10 for the Reichstag 7 5 10 15 20 25 30 number of views 10 1 100 101 solver time (s) epipolar (T) fractional (TF) robust epipolar (RT) robust fractional (RTF) Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Average computation time for each solver, averaged over all noise levels and number of outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' experiments, all other parameters are left on their defaults.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We find that working in units of pixels results in poorly conditioned solutions, leading to ˆz not satisfying the constraints to high accuracy even in cases where the problem is known to be tight.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' To avoid this issue we use the change of variables xi → 1 W xi and adjust the intrinsics accordingly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Since the scaling is the same for each point the optimal solution remains unchanged, but we get much closer to rank one solutions in practice due to the improved numerical stability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Simulated experiments We simulate triangulation problems as initially proposed in [18] by placing n cameras on a sphere of radius 2 and sample a point to be triangulated from the unit cube, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 for some examples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The same setup was also used for experiments in [1, 4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For the reprojection model we simu- late a pinhole camera with dimensions with width W = 2108 and height H = 1162, focal length f = 1012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0027 and principal point p = (1054, 581).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We simulate noisy observations by adding Gaussian noise with standard devi- ation σ to the ground truth image coordinates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' When gener- ating an outlier we select a view at random and replace the measurement with a random point in the image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We run the experiment for each method at various dif- ferent noise levels and number of outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For each noise level we run Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT) 375 times and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RTF) 60 times for n = 3, 5 and 7 views and in each case add up to n − 2 outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The percentage of tight relaxations and the estima- tion error can be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We also run Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT) 30 times each for n = 25 and 30 with 0, 10, 20 and 25 out- liers, the results of which can be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We don’t run Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RTF) for these cases since we run into memory limitations with MOSEK.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' From Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 3 we can see that in general the fractional relaxation in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (15) is significantly more stable than the epipolar relaxation Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In fact, across all experiments the fractional relaxation is tight in 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='8% of cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' How- ever, we can also note that the epipolar relaxation remains viable for lower noise levels, for instance in the case with n = 7 the relaxations perform similarly in terms of aver- age estimation error up until σ ≈ 60px and 3 outliers, after which the percentage of tight relaxations drop drastically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' As can be seen from the average solver timings in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 5 0 1 2 3 4 5 number of outliers 75 80 85 90 95 100 % tight relaxations 3, 5, 7 views 3 views 5 views 7 views 25 views 30 views robust epipolar (RT) robust fractional (RTF) 0 5 10 15 20 25 number of outliers 0 20 40 60 80 100 % tight relaxations 25, 30 views 0 1 2 3 4 5 number of outliers 10 2 10 1 100 101 error (m) 3, 5, 7 views 0 5 10 15 20 25 number of outliers 10 2 10 1 100 101 error (m) 25, 30 views Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Average number of tight relaxation (top) and estimation error(bottom) for Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT) and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RTF) on the Reichstag dataset as descriped in section Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' the fractional relaxations is also over one order of magni- tude slower than the epipolar relaxation, meaning that it might be preferable to use Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT) in cases where the qual- ity of observations is known to be high.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Reichstag dataset We also validate our relaxations on the Reichstag dataset from [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The dataset consits of 75 views of roughly 18k 3D points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We use the ground truth correspondences es- timated by structure from motion as detailed in [11] and generate each triangulation problem by selecting n views which all observe a common point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We then add up to n−2 outliers by replacing the ground truth observations with a randomly selected keypoints in the same image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' See Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1 for an example point with n = 22 views and 19 outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For n = 3, 5 and 7 views we run Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT) 375 times and Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RTF) 60 times for each possible number of outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' And similarly we run Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RT) 120 times for n = 25 and 30 views.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The results are summarized in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Similarly to the simulated experiments we can note that the percentage of tight relaxations (and mean error) de- creases (and increases) steadily as more outliers are added, with a sharp drop when the number of inliers gets close to 2, with the fractional method outperforming the epipo- lar method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Conclusion We proposed a global optimization framework robust multiview triangulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' To this end we derive semidefi- nite relaxations for triangulation losses that incorporate a truncated quadratic cost making them robust to both noise and outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' On synthetic and real data we confirm that provably optimal triangulations can be computed and relax- ations remain empirically tight despite significant amounts of noise and outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' References [1] Chris Aholt, Sameer Agarwal, and Rekha Thomas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' A qcqp approach to triangulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In European Conference on Com- 8 puter Vision, pages 654–667.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Springer, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1, 2, 3, 8 [2] Pasquale Antonante, Vasileios Tzoumas, Heng Yang, and Luca Carlone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Outlier-robust estimation: Hardness, mini- mally tuned algorithms, and applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' IEEE Transactions on Robotics, 38(1):281–301, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2, 5 [3] MOSEK ApS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The MOSEK optimization toolbox for Python manual.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Version 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=', 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 3, 7 [4] Diego Cifuentes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' A convex relaxation to compute the nearest structured rank deficient matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' SIAM Journal on Matrix Analysis and Applications, 42(2):708–729, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1, 2, 4, 5, 8, 10 [5] Diego Cifuentes, Sameer Agarwal, Pablo A Parrilo, and Rekha R Thomas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' On the local stability of semidefinite relax- ations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Mathematical Programming, 193(2):629–663, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 3, 10 [6] Sumanth Dathathri, Krishnamurthy Dvijotham, Alexey Kurakin, Aditi Raghunathan, Jonathan Uesato, Rudy R Bunel, Shreya Shankar, Jacob Steinhardt, Ian Goodfellow, Percy S Liang, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Enabling certification of verification- agnostic networks via memory-efficient semidefinite pro- gramming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Advances in Neural Information Processing Sys- tems, 33:5318–5331, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [7] Amit Dekel, Linus Harenstam-Nielsen, and Sergio Cac- camo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Optimal least-squares solution to the hand-eye cali- bration problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In Proceedings of the IEEE/CVF Confer- ence on Computer Vision and Pattern Recognition (CVPR), June 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [8] Steven Diamond and Stephen Boyd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' CVXPY: A Python- embedded modeling language for convex optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Jour- nal of Machine Learning Research, 17(83):1–5, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 7 [9] Richard I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Hartley and Peter Sturm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Triangulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Computer Vision and Image Understanding, 68(2):146–157, 1997.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2, 3, 7 [10] Christoph Helmberg and Franz Rendl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' A spectral bundle method for semidefinite programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' SIAM Journal on Op- timization, 10(3):673–696, 2000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [11] Yuhe Jin, Dmytro Mishkin, Anastasiia Mishchuk, Jiri Matas, Pascal Fua, Kwang Moo Yi, and Eduard Trulls.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Image Matching across Wide Baselines: From Paper to Practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' International Journal of Computer Vision, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 8 [12] Fredrik Kahl and Didier Henrion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Globally optimal estimates for geometric reconstruction problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' International Jour- nal of Computer Vision, 74(1):3–15, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [13] Lai Kang, Lingda Wu, and Yee-Hong Yang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Robust multi- view L2 triangulation via optimal inlier selection and 3D structure refinement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Pattern Recognition, 47(9):2974–2992, September 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [14] Jens Keuchel, Christoph Schnorr, Christian Schellewald, and Daniel Cremers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Binary partitioning, perceptual group- ing, and restoration with semidefinite programming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' IEEE Transactions on Pattern Analysis and Machine Intelligence, 25(11):1364–1379, 2003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [15] Seong Hun Lee and Javier Civera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Closed-form optimal two- view triangulation based on angular errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' pages 2681– 2689, 10 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [16] Seong Hun Lee and Javier Civera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Robust uncertainty-aware multiview triangulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' CoRR, abs/2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='01258, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [17] Brendan O’Donoghue, Eric Chu, Neal Parikh, and Stephen Boyd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Conic optimization via operator splitting and homoge- neous self-dual embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Journal of Optimization Theory and Applications, 169(3):1042–1068, June 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 3 [18] Carl Olsson, Fredrik Kahl, and Richard Hartley.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Projec- tive least-squares: Global solutions with local optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 1216–1223, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 8 [19] Liangzu Peng, Mahyar Fazlyab, and Ren´e Vidal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Semidefi- nite relaxations of truncated least-squares in robust rotation search: Tight or not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In European Conference on Computer Vision, pages 673–691.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Springer, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [20] Johannes L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Sch¨onberger and Jan-Michael Frahm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Structure- from-motion revisited.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4104–4113, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [21] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Van Loan and N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Pitsianis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Approximation with Kro- necker Products, pages 293–314.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Springer Netherlands, Dor- drecht, 1993.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 7 [22] Emmett Wise, Matthew Giamou, Soroush Khoubyarian, Ab- hinav Grover, and Jonathan Kelly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Certifiably optimal monocular hand-eye calibration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In 2020 IEEE International Conference on Multisensor Fusion and Integration for Intel- ligent Systems (MFI), pages 271–278.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' IEEE, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [23] Thomas Wodtko, Markus Horn, Michael Buchholz, and Klaus Dietmayer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Globally optimal multi-scale monocular hand-eye calibration using dual quaternions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In 2021 Inter- national Conference on 3D Vision (3DV), pages 249–257.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' IEEE, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [24] Heng Yang and Luca Carlone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' A quaternion-based certifi- ably optimal solution to the wahba problem with outliers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [25] Heng Yang and Luca Carlone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In perfect shape: Certifiably optimal 3d shape reconstruction from 2d landmarks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In Pro- ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [26] Heng Yang and Luca Carlone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Certifiably optimal outlier- robust geometric perception: Semidefinite relaxations and scalable global optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2, 5 [27] Heng Yang, Jingnan Shi, and Luca Carlone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Teaser: Fast and certifiable point cloud registration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' IEEE Transactions on Robotics, 37(2):314–333, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 [28] Ji Zhao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' An efficient solution to non-minimal case essential matrix estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 2 9 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Local stability of fractional method In this section we will prove two of the criteria required for local stability for the robust fractional method Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RTF) for noise-free and outlier-free measurements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Local stabil- ity for the non-robust case was shown already in [4] but we will provide an alternate proof here in our notation, since it will lead into the extension to the robust case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For this we will need the stronger version of Fact 2, which we will restate here loosely (see [5] Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='5 for more details).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Using the definition A(ξ) = �k i=1 ξiAi: Fact 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' If we,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' in addition to the conditions in Fact 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' have that: (i) (ACQ) ACQ holds (ii) (smoothness) The the constraint set is smooth with re- spect to pertubations to the constraints (iii) (non-branch point) The nullspace of the multiplier ma- trix and the tangent space of the constrant-set at the optimum don’t intersect nontrivially: ker( ˆS) ∩ Tˆz = {0} (iv) (restricted slater) There exists ξ′,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' λ′ such that A(ξ′)− λ′E is positive definite on the subspace of vectors z⊥ for which ˆSz⊥ = 0 and ˆzT z⊥ ̸= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' In other words the part of the nullspace of ˆS which is orthogonal to the solution ˆz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The tangent space in (iii) is given by Tˆz = ker(ˆzT A1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ˆzT Ak;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ˆzT E).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Non-robust version We will show (iii-iv) for a version of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (TF) with some- what less constraints, noting that if we show (iii-iv) for the problem with less constraints we can then add in the remain- ing constraints back in and set the corresponding multipliers to zero to show that (iii-iv) holds for the original problem as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Note however that since we don’t show (i-ii) the full proof is incomplete and is left for future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Assuming (i-ii) holds, the fractional relax- ation Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (TF) is tight and locally stable for noise-free and outlier-free measurements ˜xi, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We start by partitioning the Lagrange multipliers as ξ = (ϕ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' α).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Where ϕ = (ϕ1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ϕ2n), and each ϕi ∈ R4 contains the multipliers corresponding to ith reprojection constraint multiplied by the entries of ¯X (recall that there are two reprojection constraints per observation).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Note that in the original formulation we also multiply by all the en- tries of x ⊗ ¯X as well, but as we will see these are not necessary for the proof to hold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' And α corresponds to the kronecker product constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Since the observations ˜x are noise free we can denote the corresponding unique3 3D point in homogeneous coordi- nates as ˆX ∈ R4, normalized such that ∥ ˆX∥ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' It will be convenient to introduce the reparametrization u = ˜x which is the same as the observation vector, except partitioned such that u = (u1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' u2n), ui ∈ R, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' u2i+k = ˜xik for i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n, k = 1, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' The primal optimum is then ob- tained at ˆz = ¯u ⊗ ˆX, which is verified by setting ˆξ = ˆλ = 0 to get ˆSˆz = (M˜x ⊗ I4)(¯u ⊗ ˆX) = (M˜x¯u) ⊗ ˆX = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We then note that, due to the properties of the kronecker product4 and that M˜x is positive semidefintie with corank 1, we have that M˜x ⊗ I4 is positive semidefinite with corank 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' So the conditions of Fact 1 are satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Since the nullspace ker( ˆS) is 4-dimensional and contains the four orthogonal vectors ˆz = ¯u ⊗ ˆX and ˆzl = ¯u ⊗ ˆXl where ˆXT ˆXl = 0, ˆXT l ˆXk = 0 for k ̸= l = 1, 2, 3 we can parametrize z⊥ from (iv) as z⊥ = ¯u⊗ ˆX⊥ where ˆXT ⊥ ˆX = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For (iii) we need to show that the vectors that span ker( ˆS) are not in Tˆz, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' for any z ∈ ker( ˆS) either that ˆzT Aiz ̸= 0 for some constraint i, or that ˆzT Ez ̸= 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' This is the case since ˆzT Eˆz = 1 ̸= 0 and, letting Kijst be the kro- necker constraint matrix corresponding to index st of block ij, ˆzT Kijstzl = uiuj( ˆXs ˆXlt − ˆXt ˆXls) is nonzero for at least some index ijst unless u = 0 or ˆX and ˆXl are paral- lel, which is not the case by construction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' To show (iv), we set α′ = λ′ = 0 and ϕ′ i = uibi − ai, and verify that with z⊥ as above: zT ⊥A(ϕ′, 0)z⊥ = 2n � i=1 ˆXT ⊥ϕ′ i(uibi − ai) ˆX⊥ = 2n � i=1 ((uibi − ai)T ˆX⊥)2 > 0 (16) where the final strict inequality follows from the fact that each term is strictly positive as (uibi − ai)T ˆX = 0 by the original constraints and ˆX⊥ is orthogonal to ˆX.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We note that, while not all constraints used in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (TF) are required for (iii-iv) to hold, we have found some cases where adding the additional constraints results in a tighter relaxation in the presence of noise, so we used the full set of constraints in our experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Robust version We now move on to the robust fractional method Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Assuming (i-ii) holds, the fractional relax- ation Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (RTF) is tight and locally stable for noise-free and outlier-free measurements ˜xi, i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 3assuming the observations are not degenerate, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' not all on a line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 4For matrices A ∈ Sn, B ∈ Sm with eigenvalues αi, βj the eigen- values of the kronecker product A ⊗ B are given by the products of the eigenvalues αiβj for i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n, j = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 10 Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Partition the Lagrange multipliers as ξ = (ϕ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' µ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' η;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' α), where as in Theorem 2 ϕ corresponds to the reprojection constraints and α corresponds to the kronecker constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We let µ ∈ R32n correspond to the constraints ¯Xs ¯Xt(yikθi − yik) = 0 for s, t = 1, 2, 3, 4, k = 1, 2 and i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' , n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' And finally we similarly have that η ∈ R16n = (η1;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' ηn), ηi ∈ R16 corresponds to the constraints ¯Xs ¯Xt(θ2 i − θi) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' For each view i we collect the corresponding subset of η into a 4×4 matrix Hi defined such that ¯XT Hi ¯X = �4 s,t=1 ηist ¯Xs ¯Xt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' To verify the global optimum we start by setting ˆz = ¯uθ ⊗ ˆX where uθ = (˜x;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 1n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We then note that the con- straint matrices for for the ηi-constraints can be written as a kronecker product to get: S(0, 0, η, 0) = M c ˜x ⊗ I4 + n � i=1 Ti ⊗ Hi (17) where each Ti ∈ S3n+1 is defined such that ¯yT θ Ti¯yθ = θ2 i − θi for arbitrary yθ as in Sec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' We then set ˆη such that ˆHi = ciI4 and ˆϕ = ˆµ = ˆα = ˆλ = 0 to get: ˆS = S(0, 0, ˆη, 0) = (M c ˜x + n � i=1 ciTi) ⊗ I4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (18) Now, by the same argument as in Theorem 1 the matrix M c ˜x + �n i=1 ciTi is positive semidefinite with corank 1, so ˆS is positive semidefinite with corank 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Meaning that the conditions of Fact 1 are satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (iii) also follows using the same argument based on the kronecker constraints as in Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' Finally, for (iv) we note that ker( ˆS) is spanned by ˆz and ˆzl = ¯uθ ⊗ ˆXl, l = 1, 2, 3, so by setting µ′ = η′ = α′ = λ′ = 0 and ϕ′ i = uibi − ai restricted slater for ˆS follows in the same way as in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' (16).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} +page_content=' 11' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FNFJT4oBgHgl3EQfCyyx/content/2301.11431v1.pdf'} diff --git a/FdFLT4oBgHgl3EQfGC-x/content/tmp_files/2301.11990v1.pdf.txt b/FdFLT4oBgHgl3EQfGC-x/content/tmp_files/2301.11990v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..145386aa25e975026b05e2ac9627ef9374e1eca5 --- /dev/null +++ b/FdFLT4oBgHgl3EQfGC-x/content/tmp_files/2301.11990v1.pdf.txt @@ -0,0 +1,1336 @@ +Alignment with human representations supports robust few-shot learning +Ilia Sucholutsky 1 Thomas L. Griffiths 1 2 +Abstract +Should we care whether AI systems have repre- +sentations of the world that are similar to those +of humans? We provide an information-theoretic +analysis that suggests that there should be a U- +shaped relationship between the degree of rep- +resentational alignment with humans and perfor- +mance on few-shot learning tasks. We confirm +this prediction empirically, finding such a relation- +ship in an analysis of the performance of 491 com- +puter vision models. We also show that highly- +aligned models are more robust to both adversar- +ial attacks and domain shifts. Our results suggest +that human-alignment is often a sufficient, but not +necessary, condition for models to make effective +use of limited data, be robust, and generalize well. +1. Introduction +As AI systems are increasingly deployed in settings that +involve interactions with humans, exploring the extent to +which these systems are aligned with humans becomes more +significant. While this exploration has largely focused on the +alignment of the values of AI systems with humans (Gabriel, +2020; Kirchner et al., 2022), the alignment of their represen- +tations is also important. Representing the world in the same +way is a precursor to being able to express common values +and to comprehensible generalization. To the extent that +humans have accurate representations of the world, repre- +sentational alignment is also an effective source of inductive +bias that might make it possible to learn from limited data. +As a motivating example, imagine a meeting between a +16th century alchemist and a 21st century chemist. They +live in the same physical world and are intimately familiar +with the materials that comprise it, but they would have +significant difficulty expressing their values and generaliz- +ing the results of an experiment they observe together. The +alchemist would likely learn poorly from examples of a +reaction demonstrated by the chemist, not having the right +inductive biases for the way the world actually works. The +1Department of Computer Science, Princeton University, USA +2Department of Psychology, Princeton University, USA. Corre- +spondence to: Ilia Sucholutsky . +alchemist and the chemist lack representational alignment – +they represent the world in fundamentally different ways – +and this impedes generalization and learning. +In this paper, we provide a theoretical and empirical inves- +tigation of the consequences of representational alignment +with humans for AI systems, using popular computer vision +tasks as a way to study this phenomenon. We define rep- +resentational alignment as the degree to which the latent +representations of a model match the latent representations +of humans for the same set of stimuli, and refer to models +that are representationally aligned with humans as being +“human-aligned.” Several recent papers have proposed ways +to measure (Marjieh et al., 2022a;b), explain (Muttenthaler +et al., 2022; Kumar et al., 2022), and even improve (Pe- +terson et al., 2018; Fel et al., 2022) the representational +alignment of models. However, many models that score low +on these alignment metrics still have high performance on +downstream tasks like image classification (Kumar et al., +2022; Muttenthaler et al., 2022; Fel et al., 2022). +So, are there any real advantages (or disadvantages) to us- +ing human-aligned models? To answer this question, we +develop an information-theoretic framework for analyzing +representational alignment. This framework enables us to +make predictions about emergent behavior in human-aligned +models. In particular, our framework predicts that few-shot +transfer learning performance should have a U-shaped re- +lationship with alignment. To verify the predictions of our +framework and to probe for additional properties that arise +as a consequence of alignment, we conduct a series of ex- +periments in which we assess the downstream properties +of human-aligned models compared to their non-aligned +counterparts. From our experiments comparing 491 large +computer-vision models to over 425,000 human judgments +(across 1200 participants), we identify three properties: +• Models that have either high or low alignment with +humans are better at few-shot transfer learning than +models with medium alignment, even when correcting +for performance on the pre-training dataset. +• Human-aligned models are more robust to adversar- +ial examples, even when correcting for classification +performance on the pre-training dataset. +• Human-aligned models are more robust to domain shift, +arXiv:2301.11990v1 [cs.LG] 27 Jan 2023 + +Alignment with human representations supports robust few-shot learning +Figure 1. Schematic of representational alignment between two agents, Alice and Bob. A: Shared data (x) is shown to both agents (images +are from the animal subset of the similarity datasets from Peterson et al. (2018)). B: Both agents form representations (fA(x) and fB(x)) +of the objects they observe. C: Agents are asked to produce pairwise similarity matrices corresponding to their representations. The +similarity judgments can then be compared to measure alignment between the agents. +even when correcting for classification performance on +the pre-training dataset. +The U-shaped relationship between alignment and few-shot +learning helps to explain why previous results have not +consistently observed benefits of representational alignment. +Overall, our results suggest that representational alignment +can provide real and significant benefits for downstream +tasks, but that it may be a sufficient rather than necessary +condition for these benefits to emerge. +2. Related Work +With AI systems entering mainstream usage, aligning these +models with human values and understanding is increas- +ingly important (Gabriel, 2020; Kirchner et al., 2022). This +concept, referred to as AI alignment, is an important but still +largely open problem (Yudkowsky, 2016), in part due to the +difficulty of formalizing this broad concept (Soares & Fall- +enstein), or even reaching consensus on a definition (Kirch- +ner et al., 2022). In this paper, we focus on formalizing a +specific aspect of AI alignment: representational alignment. +Representational alignment is a measure of agreement be- +tween the representations of two learning agents (one of +whom is typically a human). There are numerous names, +definitions, measures, and uses of this form of alignment +across various fields, including cognitive science, neuro- +science, and machine learning. Some of the other names +include latent space alignment (Tucker et al., 2022), con- +cept(ual) alignment (Stolk et al., 2016; Muttenthaler et al., +2022), system alignment (Goldstone & Rogosky, 2002; +Roads & Love, 2020; Aho et al., 2022), representational +similarity analysis (RSA) (Kriegeskorte et al., 2008), and +model alignment (Marjieh et al., 2022b). +Shepard (1980) proposed that human representations can be +recovered by using behavioral data to measure the similarity +of a set of stimuli and then finding embeddings that sat- +isfy those similarity associations using methods like multi- +dimensional scaling (MDS). Similarly, in neuroscience, Rep- +resentational Similarity Analysis (RSA) is a popular tech- +nique for relating neural, behavioral, and computational +representations of the same set of stimuli via similarity anal- +ysis (Kriegeskorte et al., 2008). While similarity analysis +has clearly proven itself to be a powerful method, exhaus- +tively collecting pairwise similarity judgments is expensive +(O(N 2) judgments for N stimuli) and there have been nu- +merous proposals aiming to develop more efficient methods +of recovering human representations. +Jamieson & Nowak (2011) proposed an active learning +scheme for querying for human judgments using triplets +of the form “is a closer to b than to c?” and derived bounds +on the number of required queries for lossless completion +of the full similarity matrix using such queries. When an +approximate solution is acceptable, Peterson et al. (2018) +showed that pre-trained computer vision models can be used +to approximate human perceptual similarity judgments over +images. Marjieh et al. (2022a) showed that human percep- +tual similarity can be more accurately, but still efficiently, +approximated from human-produced natural language de- + +Allce +Chimp +Eagle +Wolf +Lemur +Eagle +Wolf +Lemur +Chimp +BobAlignment with human representations supports robust few-shot learning +Figure 2. Schematic of triplet-based supervised learning where one agent acts as the teacher and the other as the student. A: A new object +is shown only to the Teacher. B: The Teacher forms a representation of the object and sends the Student a triplet relating the new object to +two objects previously observed by both agents. C: The Student interprets the triplet in their own representation space and eliminates the +half-plane where the new object cannot be located (shaded in red) according to the triplet. +scriptions of the stimuli of interest (for example by using +large language models to estimate similarity over pairs of +these descriptions). Marjieh et al. (2022b) extended this +result to a variety of domains (vision, audio, and video) and +measured alignment for hundreds of pre-trained models. +Several recent studies have also attempted to identify what +design choices lead to improved representational alignment +in models (Kumar et al., 2022; Muttenthaler et al., 2022; Fel +et al., 2022), although Moschella et al. (2022) found that +even with variation in design choices, many models trained +on the same dataset end up learning similar ‘relative repre- +sentations’ (embeddings projected into a relational form like +a similarity matrix), or in other words, converge to the same +representational space. Tucker et al. (2022) showed that rep- +resentational alignment emerges not only in static settings +like image classification, but also dynamic reinforcement +learning tasks involving human-robot interaction. Several +studies have also focused on studying alignment specifically +in humans, both between different people and for a single +person but across multiple tasks and domains (Goldstone & +Rogosky, 2002; Roads & Love, 2020). +Although several recent papers have proposed ways to mea- +sure (Marjieh et al., 2022a;b), explain (Muttenthaler et al., +2022; Kumar et al., 2022), and even improve (Peterson et al., +2018; Fel et al., 2022) the representational alignment of mod- +els, few have focused on studying the downstream impact +of a model being representationally aligned with humans, +and many studies simply rely on the intuition that better +alignment leads to better performance to justify pursuing in- +creased alignment. While there is recent evidence to suggest +that alignment may help humans learn across domains and +perform zero-shot generalization (Aho et al., 2022), there +is also evidence to suggest that alignment may not always +be beneficial for models, with models scoring low on align- +ment metrics achieving higher performance on downstream +tasks like image classification (Kumar et al., 2022; Mut- +tenthaler et al., 2022; Fel et al., 2022). Our goal in this +paper is to conduct an in-depth study into the downstream +effects of representational alignment. Our theoretical and +empirical results both validate and explain the apparently +conflicting results seen in previous literature as special cases +of the (more complex than previously suspected) effects of +representational alignment. +3. Theory +If we want to measure representational alignment across +different architectures with potentially mismatched embed- +ding coordinate spaces and dimensionalities, then we need a +definition of representation spaces that enables comparisons +of such spaces. Inspired by the cognitive science litera- +ture on using non-metric similarity triplets to recover hid- +den psychological representations, and building on rigorous +computational theory that analyzes such triplets (Jamieson +& Nowak, 2011), we propose a triplet-based definition of +representation learning. We summarize representational +alignment under our framework in the schematic shown in +Figure 1. +Definition 3.1. As proposed by Jamieson & Nowak (2011), +for an ordered set of n objects in d dimensions represented +as a vector x ∈ Rnd, a similarity triplet corresponding to +the question ‘is xi closer to xj than to xk?’ is a membership +query of the form 1x∈rijk where rijk = {x ∈ Rnd : |xi − +xj| < |xi − xk|}. +Definition 3.2. For an ordered set of objects x ∈ Rnd and +model M with embeddings fM : Rd → RdM , the triplet- + +Got it! +Chimp +Eagle +● Wolf +Lemur +oTiger +Eagle +Wolf +Lemur +Chimp +The new object is closer +to Wolf than to Lemur!Alignment with human representations supports robust few-shot learning +based representation space of M is the set of all triplets +SM(x) = {(i, j, k) ∈ {1, ..., n}3 : 1x∈rM +ijk, i ̸= j, j ̸= +k, k ̸= i} where rM +ijk = {x ∈ Rnd : |fM(xi) − fM(xj)| < +|fM(xi) − fM(xk)|}. +Remark 3.3. Since switching the order of j and k in a triplet +deterministically flips the query between 0 and 1, it is easy +to see that, for a set of n objects, a representation space is +determined by a set of just n(n−1)(n−2)/2 unique triplets. +Thus, for a set of n objects, SM(x) can be represented as a +binary vector of length n(n − 1)(n − 2)/2 and this is the +interpretation we use in the remainder of this section. +Definition 3.4. Consider a training set x ∈ Rnd and a +corresponding target representation space ST (x). We define +representation learning with a parametrized model Mθ as +optimizing the objective minθ ℓ(ST (x), SMθ(x)) where ℓ +is a loss function penalizing divergence between the target +and learned representation spaces. +Sucholutsky et al. (2022) showed that many machine learn- +ing objectives and associated supervision signals, ranging +from similarity judgments for contrastive learning to labels +for classification, can be converted into similarity triplets +making them compatible with this definition of representa- +tion learning. Using this definition, we also immediately get +an intuitive definition of alignment between two agents (e.g. +a model and a person) as the rate of agreement on matched +triplets (also known as simple match coefficient). +Definition 3.5. Consider a dataset x ∈ Rnd and two agents, +A and B, with corresponding representation spaces SA(x) +and SB(x). We define representational misalignment be- +tween A and B as DR(A, B; x) = ||SA(x)−SB(x)||1 +n(n−1)(n−2)/2 . Rep- +resentational alignment is then simply 1 − DR(A, B; x). +Definition 3.6. Consider a single set of three points t = +(xi, xj, xk) ∈ X ⊆ R3d sampled uniformly at random, +and two agents, A and B. We define stochastic representa- +tional misalignment between A and B as DP (A, B; X) = +P(1x∈rA +ijk = 1x∈rB +ijk). Stochastic representational align- +ment is then simply 1 − DP (A, B; X). +Since each triplet can be shown to correspond to one bit +of information (Jamieson & Nowak, 2011), and we have a +probabilistic definition of alignment, we can now turn to +information theory to define what it means for one agent to +supervise the learning of another (potentially misaligned) +agent. We visualize supervised learning under our frame- +work in the schematic in Figure 1. +For perfectly aligned models, Jamieson & Nowak (2011) +derive the following result. +Lemma 3.7. Consider input space X ⊆ Rnd, shared data +x ∼ X, new object c ∈ Rd, and models A and B with +DP (A, B; X) = 0. Ω(d log(n)) triplet queries are required +for B to identify the location of c relative to all objects in x. +Proposition 3.8. Consider input space X ⊆ Rnd, shared +data x ∼ X, new object c ∈ Rd, and models A and B +with DP (A, B; X) = ϵ. Ω( +d log(n) +1+ϵ log(ϵ)+(1−ϵ) log(1−ϵ)) triplet +queries are required for B to learn c. +Proof. Consider a communication game involving agents +Alice and Bob, a dataset x that they both have their own +representations for (we call this ‘shared data’), and a new +object c that only Alice can see. Alice can only communi- +cate with Bob by giving binary responses to triplet queries +based on Alice’s own representations of the objects. The +goal of the game is for Bob to learn the location of c with +respect to the other objects in x. DP (A, B; X) = ϵ is the +probability for any triplet drawn from x ∪ c to be flipped +in Bob’s representation space relative to Alice’s represen- +tation space. This is equivalent to Alice and Bob commu- +nicating over a binary symmetric channel where the prob- +ability of a bit flip is ϵ. The capacity of this channel is +Cϵ = 1 − H(ϵ, 1 − ϵ) = 1 + ϵ log(ϵ) + (1 − ϵ) log(1 − ϵ). +For error-free communication over this channel, the highest +achievable rate is R < Cϵ, meaning at least +1 +Cϵ bits are +required to communicate each useful bit over this channel. +By Lemma 3.7, Ω(d log(n)) useful bits are required for Bob +to learn the location of c over a channel with no error, and +thus a total of Ω( d log(n) +Cϵ +) bits are required. +Theorem 3.9 (Alignment and few-shot learning). Con- +sider input space X ⊆ Rnd, shared data x ∼ X, new +objects c ∈ Rmd and three models, A, B1, and B2 with +DP (A, B1; X) = ϵB1 and DP (A, B2; X) = ϵB2. +If +|0.5 − ϵB1| < |0.5 − ϵB2|, then B2 requires fewer queries +to learn c than B1 does. +Proof. From Proposition 3.8, it immediately follows that B1 +requires Ω( md log(n) +CϵB1 +) queries and B2 requires Ω( md log(n) +CϵB2 +) +queries. Cϵ : [0, 1] → [0, 1] is a symmetric convex function +with a minimum of 0 at ϵ = 0.5 and maxima of 1 at ϵ = 0, 1. +Thus, if f |0.5 − ϵB1| < |0.5 − ϵB2|, then Ω( md log(n) +CϵB1 +) > +Ω( md log(n) +CϵB2 +) . +Remark 3.10. Sucholutsky et al. (2022) showed that com- +monly used supervision signals, like hard and soft classifi- +cation labels, can be converted into sets of triplet queries. +Reducing the number of required queries to learn the loca- +tion of a new object is equivalent to reducing the number +of required labels. It follows from Theorem 3.9 that very +high or very low alignment leads to high few-shot learning +performance on downstream tasks like classification (where +learning a new class can be formulated as learning the lo- +cation of the centroid or boundaries of that class using a +small number of labels), while medium alignment leads to +low few-shot learning performance. Intuitively, this comes +as a result of mutual information between the teacher and + +Alignment with human representations supports robust few-shot learning +student representations being minimized when alignment is +at 0.5. In other words, if Bob knows that most bits coming +from Alice are flipped (i.e. DP (A, B) = ϵ > 0.5), then +going forward, Bob can flip all the bits coming from Alice +to achieve a lower error rate (i.e. DP ( ¯A, B) = 1−ϵ < 0.5). +Remark 3.11. We note that generalizing under domain-shift +can be considered a form of zero-shot transfer learning and +selecting adversarial examples can be seen as selecting ob- +jects that maximize representational disagreement between +a model and a human. Under these interpretations, our +framework also predicts that highly-aligned models are ro- +bust to both domain shift and adversarial examples though +the framing in these cases is less intuitive than for the few- +shot learning case. We share some preliminary theoretical +analyses of robustness properties in the Appendix. +4. Experiments +Our theoretical analysis shows how representational align- +ment can serve as a source of inductive bias that reduces +the number of bits that need to be acquired from data. In +particular, Theorem 3.9 predicts an unexpected U-shaped +relationship between alignment and few-shot learning per- +formance. We now present a series of experiments designed +to test this prediction and examine whether it extends to +robustness to adversarial examples and domain shift. +4.1. Methods +Models +The pre-trained models evaluated in this paper are +taken from the PyTorch Image Models package (Wightman, +2019). The full list of 491 models used in our experiments +can be found in the Appendix. +Data +All of the models used in this paper were pre-trained +on ImageNet-1k (Russakovsky et al., 2015) and had their +performance evaluated on the ImageNet-1k validation set. +Adversarial robustness was measured on the ImageNet-A +dataset of adversarial examples that have been found to foil +most ImageNet models (Hendrycks et al., 2021b). Zero-shot +domain shift robustness was measured on the ImageNet-R +dataset of renditions (e.g., paintings, toys, statues, etc.) +of ImageNet classes (Hendrycks et al., 2021a) and the +ImageNet-Sketch dataset of black-and-white sketches of the +ImageNet classes (Wang et al., 2019). All ImageNet and Im- +ageNet variant results come from the PyTorch Image Mod- +els package (Wightman, 2019). Few-shot transfer learning +performance was evaluated on the CIFAR100 (Krizhevsky +et al., 2009) test set with n ∈ {1, 5, 10, 20, 40, 80} exam- +ples per class used for few-shot learning and the remaining +examples used for evaluation. We measure representational +alignment of models by computing the three metrics de- +scribed below on the six image datasets from Peterson et al. +(2018) and their respective sets of similarity judgments con- +sisting of over 425,000 judgments across 1200 participants, +as described by Marjieh et al. (2022a). We average each of +the three alignment metrics over the six datasets. +Alignment metrics +The representational alignment met- +rics we consider are correlation over pairwise similarity +judgments and agreement between similarity triplets (i.e., +the proportion of triplet queries that have the same response +for the matched sets of representations). Similarity triplets +provide non-metric information since in each query we dis- +card the actual pairwise distances and only retain which +distance is larger. Analogously, in the pairwise case, if +we use Spearman rank correlation then we are comparing +the relative magnitudes of pairwise distances (of which the +triplets form a subset) and discarding the magnitudes. On +the other hand, Pearson correlation over pairwise similari- +ties takes into account the actual magnitudes of the pairwise +similarity judgments which, if accurate, can help provide +more information about fine-grained differences that affect +representational alignment. We use all three metrics in our +analyses and refer to them as Spearman pairwise alignment, +Pearson pairwise alignment, and triplet alignment. +Few-shot transfer learning +We measure few-shot trans- +fer learning performance using three methods: linear prob- +ing and two classifier heads. For linear probing, we take +the embeddings coming from the penultimate layer of a +model and fit a logistic regression using ‘scikit-learn’ (Pe- +dregosa et al., 2011). For the classifier heads, we use a +one- and two-hidden layer neural network implemented in +PyTorch (Paszke et al., 2019), both with an input dropout +rate of 0.8 and the latter with a ReLU hidden layer. +Correcting correlation for ImageNet performance +For +each of our experiments, ImageNet-1k performance can be +a confounding variable when trying to understand the rela- +tionship between alignment and a downstream property. To +account for this, when measuring correlation between align- +ment and other properties, we compute partial correlation +with ImageNet-1k Top-1 validation accuracy as a covariate +using the ‘Pingouin’ Python package (Vallat, 2018). +4.2. Results +Few-shot transfer learning +We find weak, but mostly +statistically significant, positive correlations between our +three alignment metrics and n-shot learning performance for +small values of n (the correlations generally grow weaker +and statistical significance disappears as n increases) as +shown in Figure 4. For each alignment metric, we compare +the n-shot transfer learning performance of the five models +with the highest alignment, the five models with the lowest +alignment, and the five models with the nearest-to-the-mean +alignment as shown in Figure 3. We find that according to +all three alignment metrics, the most aligned models have +far better n-shot learning performance at all levels of n. + +Alignment with human representations supports robust few-shot learning +Figure 3. Average n-shot transfer learning performance using lin- +ear probing, a 1-layer classification head, and a 2-layer classifica- +tion head on CIFAR100 of the five models with highest, lowest, +and closest-to-the-mean levels for each of Pearson (ρP ), Spearman +(ρS), and triplet alignment. +Figure 4. Pearson correlation (corrected for ImageNet Top-1 ac- +curacy) with 95% confidence intervals between n-shot transfer +learning performance using linear probing, a 1-layer classification +head, and a 2-layer classification head on CIFAR100 and each of +Pearson (ρP ), Spearman (ρS), and triplet alignment. +Adversarial robustness +We also find weak, but statisti- +cally significant, positive correlations between our three +Figure 5. Pearson correlation (with 95% confidence intervals) be- +tween n-shot transfer learning performance using linear probing, +a 1-layer classification head, and a 2-layer classification head on +CIFAR100 and each of Pearson (ρP ), Spearman (ρS), and triplet +z2 alignment metrics. +Figure 6. Comparing 1-shot learning performance on CIFAR100 +to Pearson pairwise alignment for all 491 models. +alignment metrics and both Top-1 and Top-5 accuracy on +the ImageNet-A dataset, when correcting for ImageNet-1k +performance, as shown in Table 3. For each alignment +metric, we also compare the five models with the highest +alignment, the five models with the lowest alignment, and +the five models with the nearest-to-the-mean alignment as +seen in Table 1. We find that according to all three align- +ment metrics, the most aligned models perform far better on +both Top-1 and Top-5 predictions for ImageNet-A. + +Pearson +Spearman +Triplets +80 +Linear probe +60 +40 +20 +Test accuracy (%) +80 +1-layer NN +60 +40 +20 +80 +2-layer NN +60 +High 5 +40 +- +Mid 5 +2 +20 +Low 5 +0 +50 +0 +50 +0 +50 +Training examples per class (n)Pearson +Spearman +Triplets +0.2 +Linear probe +0.1 +0.0 +-0.1 +accura +0.2 +test +1-layer NN +0.1 +0.0 +Correl +-0.1 +0.2 +2-layer NN +0.1 +0.0 +0.1 +0 +50 +0 +50 +0 +50 +Training examples per class (n)Pearson +Spearman +Triplets +0.4 - +Linear probe +0.2 +accuracy +0.0 +Correlation with test +0.4 +1-layer NN +0.2 +0.0 +0.4 +2-layer NN +0.2 +0.0 +0 +50 +50 +0 +50 +Training examples per class (n)60 +0.04p +3.17pp+96.7 +CIFAR100 1-shot test accuracy (%) +50 +40 +30 +20 +10 +25 +30 +35 +40 +45 +50 +55 +60 +65 +70 +Pearson pairwise alignment(pp)Alignment with human representations supports robust few-shot learning +Table 1. Average top-1 and top-5 performance on ImageNet-A of +the five models with highest, lowest, and closest-to-the-mean levels +of Pearson (ρP ), Spearman (ρS), and triplet alignment. +ρp +ρs +TRIPLETS +IMAGENET-A +(TOP 1 ACC) +HIGH 5 +59.93 +46.83 +44.56 +MID 5 +10.26 +12.30 +18.35 +LOW 5 +30.89 +21.80 +29.99 +IMAGENET-A +(TOP 5 ACC) +HIGH 5 +84.92 +73.86 +72.20 +MID 5 +36.54 +38.14 +46.63 +LOW 5 +59.10 +49.36 +57.70 +Table 2. Average top-1 and top-5 performance on ImageNet-R and +ImageNet-Sketch (-S) of the five models with highest, lowest, and +closest-to-the-mean levels of Pearson (ρP ), Spearman (ρS), and +triplet alignment. +ρp +ρs +TRIPLETS +IMAGENET-R +(TOP 1 ACC) +HIGH 5 +61.57 +53.78 +54.27 +MID 5 +39.16 +40.01 +41.80 +LOW 5 +51.33 +46.76 +50.16 +IMAGENET-R +(TOP 5 ACC) +HIGH 5 +75.92 +68.61 +68.82 +MID 5 +55.08 +56.81 +57.67 +LOW 5 +66.55 +61.84 +65.22 +IMAGENET-S +(TOP 1 ACC) +HIGH 5 +48.06 +40.37 +40.31 +MID 5 +26.90 +28.02 +29.85 +LOW 5 +38.11 +33.90 +37.04 +IMAGENET-S +(TOP 5 ACC) +HIGH 5 +71.10 +62.45 +62.24 +MID 5 +44.90 +46.67 +48.58 +LOW 5 +58.91 +53.43 +57.57 +Table 3. Pearson correlation (corrected for ImageNet Top-1 ac- +curacy) between Top-1 and Top-5 accuracy on ImageNet-A, +ImageNet-R, and ImageNet-Sketch (-S) and Pearson (ρP ), Spear- +man (ρS), and triplet alignment. +ρp +ρs +TRIPLETS +IMAGENET-A +(TOP 1 ACC) +0.277 +(P=0.000) +0.218 +(P=0.000) +0.203 +(P=0.000) +IMAGENET-A +(TOP 5 ACC) +0.363 +(P=0.000) +0.255 +(P=0.000) +0.235 +(P=0.000) +IMAGENET-R +(TOP 1 ACC) +0.089 +(P=0.05) +0.140 +(P=0.002) +0.158 +(P=0.000) +IMAGENET-R +(TOP 5 ACC) +0.094 +(P=0.038) +0.172 +(P=0.000) +0.190 +(P=0.000) +IMAGENET-S +(TOP 1 ACC) +0.073 +(P=0.105) +0.106 +(P=0.019) +0.122 +(P=0.007) +IMAGENET-S +(TOP 5 ACC) +0.082 +(P=0.069) +0.133 +(P=0.003) +0.150 +(P=0.001) +Table 4. Pearson correlation between Top-1 and Top-5 accuracy on +ImageNet-A, ImageNet-R, and ImageNet-Sketch (-S) and Pearson +(ρP ), Spearman (ρS), and triplet alignment. +ρp +ρs +TRIPLETS +IMAGENET-A +(TOP 1 ACC) +0.239 +(P=0.000) +0.151 +(P=0.001) +0.157 +(P=0.000) +IMAGENET-A +(TOP 5 ACC) +0.210 +(P=0.000) +0.112 +(P=0.013) +0.121 +(P=0.007) +IMAGENET-R +(TOP 1 ACC) +0.166 +(P=0.000) +0.164 +(P=0.000) +0.182 +(P=0.000) +IMAGENET-R +(TOP 5 ACC) +0.173 +(P=0.000) +0.170 +(P=0.000) +0.194 +(P=0.000) +IMAGENET-S +(TOP 1 ACC) +0.191 +(P=0.000) +0.164 +(P=0.000) +0.179 +(P=0.000) +IMAGENET-S +(TOP 5 ACC) +0.211 +(P=0.000) +0.197 +(P=0.000) +0.218 +(P=0.000) +Domain-shift robustness +Similarly, we find weak, but +mostly statistically significant, positive correlations between +our three alignment metrics and both Top-1 and Top-5 accu- +racy on both the ImageNet-R and ImageNet-Sketch datasets, +when correcting for ImageNet-1k performance, as shown +in Table 3. For each alignment metric, we also compare +the five models with the highest alignment, the five mod- +els with the lowest alignment, and the five models with +the nearest-to-the-mean alignment as seen in Table 2. We +find that according to all three alignment metrics, the most +aligned models perform far better on both Top-1 and Top-5 +predictions for both datasets. +Non-linear relationships +Based on the results in Tables 1 +and 2, as well as in Figure 3, while models with low align- +ment underperform models with high alignment, they seem +to consistently outperform models with medium alignment. +Furthermore, the linear correlation between alignment and +downstream alignment is at best fairly weak. When look- +ing at the entire set of 491 models in Figure 6, there ap- +pears to be a non-linear, potentially quadratic relationship +between alignment and few-shot transfer learning perfor- +mance. This matches the U-shaped behavior predicted by +Theorem 3.9. To test for this relationship, we measure the +correlations between the downstream properties of interest +and the z2 transformations of each alignment metric where +z2(xi) = ( xi−µ +σ +)2, µ is the mean, and σ is the standard +deviation. We find much stronger, statistically significant, +positive correlations between the z2 alignment metrics and +few-shot transfer learning performance as seen in Figure 5. +Using z2 alignment metrics also results in slightly stronger +correlations for domain-shift robustness but weaker correla- +tions for adversarial robustness as seen in Table 4. + +Alignment with human representations supports robust few-shot learning +5. Discussion +Our experimental results confirm our theoretical predictions +– not only do highly-aligned models exhibit better few-shot +learning and robustness properties than models with lower +alignment, but we also observe the U-shaped relationship +with alignment predicted in Theorem 3.9. We now dissect +the theoretical and empirical results via three key questions. +5.1. Which alignment metric should we be using? +We find that Spearman pairwise alignment is almost per- +fectly correlated with triplet alignment (ρ = 0.992). This +suggests that triplets, though they only form a small fraction +of the full set of quadruplets, already capture the majority +of the non-metric information contained in the entire set +of quadruplets. Pearson pairwise alignment has a strong, +though not quite as strong, correlation with triplet align- +ment (ρ = 0.824). While all three metrics have statistically +significant correlations with the downstream properties we +care about, Pearson pairwise alignment seems to overall +have the strongest correlations. This suggests that there is +recoverable metric information about representations en- +coded in the magnitudes of human similarity judgments, +even when these judgments are potentially noisy due to be- +ing elicited without anchor stimuli that would ground the +scale. The information-theoretic representation learning +framework would need to be extended in future work to +quantify this additional information. +5.2. Which positive downstream properties do +human-aligned models exhibit? +As predicted by our information-theoretic representation +learning framework, our experiments suggest that very +human-aligned models are better at few-shot transfer learn- +ing, more robust to adversarial examples, and more robust +to test-time domain shift than models with lower degrees +of alignment. The correlations between alignment and each +downstream property were positive and statistically signif- +icant and, in every experiment we conducted, the models +with the highest level of alignment outperformed the other +models. These results seem to confirm the intuition that +human alignment is useful in tasks where we want to use +human supervision to elicit human-like behavior. +While the models with the highest level of alignment clearly +exhibit the best downstream performance across all three +sets of tasks, our results suggest an additional unexpected +insight: there is a U-shaped relationship between alignment +and two of the properties we test: robustness to domain shift +and few-shot transfer learning performance. Thus, while +a high-degree of alignment may be sufficient for eliciting +desirable properties in models, it does not appear to be +necessary. In fact, in cases where achieving high alignment +is impractical (e.g., due to limitations on human-labeled +data), it is possible that better results may be achieved by +avoiding alignment altogether. +5.3. Are there downsides to human alignment? +It is already clear that increasing alignment can damage per- +formance across multiple criteria when that increase moves +the alignment level into the medium range. But are there +ever downsides to increasing alignment of models that are +already at or past that range? Throughout this study, one +of our key assumptions was that the task being solved is +designed and specified by humans, or at least easily solvable +by humans. However, there are numerous domains where +humans have poor performance or where our representa- +tions of the problem or stimuli are not helpful for solving +the task and a different set of inductive biases are required. +For example, many domains targeted by deep learning – +such as protein folding, drug design, and social network +analysis – require geometric inductive biases (Bronstein +et al., 2021). In these cases, the goal should be to achieve +alignment with the underlying laws governing the system of +interest (e.g., physical forces or mathematical laws), rather +than with humans. +6. Conclusion +Our findings confirm the intuition that representational align- +ment with humans elicits desirable human-like traits in mod- +els, including the ability to generalize from small data and +lower susceptibility to adversarial attacks and domain shift. +However, as both our theory and experiments suggest, in- +creasing alignment is not always desirable, first due to a U- +shaped relationship between alignment and desirable traits, +and second because there are many domains where human +representations simply are not useful. Notably, our dis- +covery of the U-shaped relationship serves to resolve the +tension between previously conflicting findings regarding +whether alignment improves performance. We hope that +our framework and results motivate further study into both +the positive and negative consequences of aligning models +with humans across diverse domains. We believe that rep- +resentational alignment is a quantifiable and tangible route +for making progress on the general AI alignment problem +by allowing us to measure agreement between models and +humans even over abstract domains such as moral values. +Acknowledgements +We would like to thank Lukas Muttenthaler for excellent +discussions that helped shape some of the ideas explored +in this paper. This work was supported by an ONR grant +(N00014-18-1-2873) to TLG and an NSERC fellowship +(567554-2022) to IS. + +Alignment with human representations supports robust few-shot learning +References +Aho, K., Roads, B. D., and Love, B. C. System alignment +supports cross-domain learning and zero-shot generali- +sation. Cognition, 227:105200, 2022. ISSN 0010-0277. +doi: https://doi.org/10.1016/j.cognition.2022.105200. +Bronstein, M. M., Bruna, J., Cohen, T., and Veliˇckovi´c, +P. +Geometric deep learning: Grids, groups, graphs, +geodesics, and gauges. arXiv preprint arXiv:2104.13478, +2021. +Fel, T., Felipe, I., Linsley, D., and Serre, T. Harmonizing +the object recognition strategies of deep neural networks +with humans. arXiv preprint arXiv:2211.04533, 2022. +Gabriel, I. Artificial intelligence, values, and alignment. +Minds and machines, 30(3):411–437, 2020. +Goldstone, R. L. and Rogosky, B. J. Using relations within +conceptual systems to translate across conceptual systems. +Cognition, 84(3):295–320, 2002. ISSN 0010-0277. doi: +https://doi.org/10.1016/S0010-0277(02)00053-7. +Hendrycks, D., Basart, S., Mu, N., Kadavath, S., Wang, F., +Dorundo, E., Desai, R., Zhu, T., Parajuli, S., Guo, M., +et al. The many faces of robustness: A critical analysis of +out-of-distribution generalization. In Proceedings of the +IEEE/CVF International Conference on Computer Vision, +pp. 8340–8349, 2021a. +Hendrycks, D., Zhao, K., Basart, S., Steinhardt, J., and +Song, D. Natural adversarial examples. In Proceedings +of the IEEE/CVF Conference on Computer Vision and +Pattern Recognition, pp. 15262–15271, 2021b. +Jamieson, K. G. and Nowak, R. D. Low-dimensional em- +bedding using adaptively selected ordinal data. 2011 49th +Annual Allerton Conference on Communication, Control, +and Computing (Allerton), pp. 1077–1084, 2011. +Kirchner, J. H., Smith, L., Thibodeau, J., McDonell, K., +and Reynolds, L. Researching alignment research: Un- +supervised analysis. arXiv preprint arXiv:2206.02841, +2022. +Kriegeskorte, N., Mur, M., and Bandettini, P. Representa- +tional similarity analysis - connecting the branches of sys- +tems neuroscience. Frontiers in Systems Neuroscience, 2, +2008. ISSN 1662-5137. doi: 10.3389/neuro.06.004.2008. +Krizhevsky, A., Hinton, G., et al. Learning multiple layers +of features from tiny images. 2009. +Kumar, M., Houlsby, N., Kalchbrenner, N., and Cubuk, +E. D. Do better ImageNet classifiers assess perceptual +similarity better? +Transactions of Machine Learning +Research, 2022. +Marjieh, R., Sucholutsky, I., Sumers, T. R., Jacoby, N., +and Griffiths, T. L. Predicting human similarity judg- +ments using large language models. +arXiv preprint +arXiv:2202.04728, 2022a. +Marjieh, R., van Rijn, P., Sucholutsky, I., Sumers, T. R., +Lee, H., Griffiths, T. L., and Jacoby, N. Words are all you +need? Capturing human sensory similarity with textual +descriptors. arXiv preprint arXiv:2206.04105, 2022b. +Moschella, L., Maiorca, V., Fumero, M., Norelli, A., Lo- +catello, F., and Rodol`a, E. Relative representations enable +zero-shot latent space communication. arXiv preprint +arXiv:2209.15430, 2022. +Muttenthaler, L., Dippel, J., Linhardt, L., Vandermeulen, +R. A., and Kornblith, S. Human alignment of neural net- +work representations. arXiv preprint arXiv:2211.01201, +2022. +Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., +Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, +L., Desmaison, A., Kopf, A., Yang, E., DeVito, Z., Raison, +M., Tejani, A., Chilamkurthy, S., Steiner, B., Fang, L., +Bai, J., and Chintala, S. PyTorch: An imperative style, +high-performance deep learning library. In Advances +in Neural Information Processing Systems 32, pp. 8024– +8035. Curran Associates, Inc., 2019. +Pedregosa, F., Varoquaux, G., Gramfort, A., Michel, V., +Thirion, B., Grisel, O., Blondel, M., Prettenhofer, P., +Weiss, R., Dubourg, V., Vanderplas, J., Passos, A., Cour- +napeau, D., Brucher, M., Perrot, M., and Duchesnay, E. +Scikit-learn: Machine learning in Python. Journal of +Machine Learning Research, 12:2825–2830, 2011. +Peterson, J. C., Abbott, J. T., and Griffiths, T. L. Evaluating +(and improving) the correspondence between deep neural +networks and human representations. Cognitive science, +42(8):2648–2669, 2018. +Roads, B. D. and Love, B. C. Learning as the unsuper- +vised alignment of conceptual systems. Nature Machine +Intelligence, 2(1):76–82, 2020. +Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., +Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, +M., et al. ImageNet large scale visual recognition chal- +lenge. International journal of computer vision, 115(3): +211–252, 2015. +Shepard, R. N. Multidimensional scaling, tree-fitting, and +clustering. Science, 210(4468):390–398, 1980. +Soares, N. and Fallenstein, B. Aligning superintelligence +with human interests: A technical research agenda. + +Alignment with human representations supports robust few-shot learning +Stolk, A., Verhagen, L., and Toni, I. Conceptual alignment: +How brains achieve mutual understanding. Trends in +Cognitive Sciences, 20(3):180–191, 2016. ISSN 1364- +6613. doi: https://doi.org/10.1016/j.tics.2015.11.007. +Sucholutsky, I., Marjieh, R., Jacoby, N., and Griffiths, T. L. +On the informativeness of supervision signals. arXiv +preprint arXiv:2211.01407, 2022. +Tucker, M., Zhou, Y., and Shah, J. A. Latent space alignment +using adversarially guided self-play. International Jour- +nal of Human–Computer Interaction, 38(18-20):1753– +1771, 2022. +Vallat, R. Pingouin: Statistics in Python. Journal of Open +Source Software, 3(31):1026, 2018. doi: 10.21105/joss. +01026. +Wang, H., Ge, S., Lipton, Z., and Xing, E. P. Learning +robust global representations by penalizing local predic- +tive power. Advances in Neural Information Processing +Systems, 32, 2019. +Wightman, +R. +PyTorch +Image +Models. +https://github.com/rwightman/ +pytorch-image-models, 2019. +Yudkowsky, E. The AI alignment problem: why it is hard, +and where to start. +Symbolic Systems Distinguished +Speaker, 2016. + +Alignment with human representations supports robust few-shot learning +A. Additional theoretical results +Definition A.1. Consider model A with input space X ⊆ Rnd, previously observed data x ∼ X, and k class centroids +c ∈ Rkd learned by A. We define domain shift as an update to the class centroids c → c∗ ∈ Rkd. Domain shift sensitivity +is then the proportion of triplets flipped as a result of this update. +σA(c, c∗) := E[||SA(x; c) − SA(x; c∗)||1 +|SA(x; c)| +] +From this definition and Theorem 3.9, it immediately follows that sensitivity to domain shift should have the same U-shaped +relationship with alignment that few-shot learning does in cases where the teacher model is robust to domain shift. +Corollary A.2. (Alignment and domain-shift robustness). Consider input space X ⊆ Rnd, shared data x ∼ X, and three +models, A, B1, and B2 with DP (A, B1; X) = ϵB1 and DP (A, B2; X) = ϵB2. Let c ∈ Rkd be k class centroids learned by +A, B1 and B2. If σA(c, c∗) = 0 and |0.5 − ϵB1| < |0.5 − ϵB2|, then σB1(c, c∗) < σB2(c, c∗). +We can also use this framework to define robustness to adversarial examples. We assume that an adversarial example is an +object that maximizes perceptual (i.e. representational) disagreement between the teacher and the student. +Definition A.3. Consider input space X ⊆ Rnd, shared data x ∼ X, and two models, A and B, with DP (A, B1; X) = ϵB. +An adversarial example is an object e ∈ Rd that maximizes disagreement between A and B on S(x; e), the subset of +n(n − 1)/2 triplets relating the objects in x to e. +e = max +X ||SA(x; e) − SB(x; e)||1 +(1) +Using Definition 3.6 we immediately get the following result. +Lemma A.4. Consider an input space X ⊆ Rnd, and two agents, A and B. DP (A, B; X) = E[ ||SA(X)−SB(X)||1 +n(n−1)(n−2)/2 ]. +We can now show that a model that is more aligned with the teacher will, on average, also be more robust to adversarial +examples. +Theorem A.5. (Alignment and adversarial robustness). Consider input space X ⊆ Rnd, shared data x ∼ X, and three +models, A, B1, and B2 with DP (A, B1; X) = ϵB1 and DP (A, B2; X) = ϵB2. If ϵB1 < ϵB2, then E[maxe∈x ||SA(x; e) − +SB1(x; e)||1] < E[maxe∈X ||SA(x; e) − SB2(x; e)||1]. +Proof. Note that for a set of k binomial random variables Xi ∼ Bin(n, p), the expectation of the k-th order statistic +is E[X(k)] = �n +x=0(1 − F(x; n, p)k) where F(x; n, p) = P(Xi ≤ x). In the case of adversarial examples, let Xi be +a random variable corresponding to the set of objects sampled uniformly from the input space X ⊆ Rnd then U = +||SA(X; e) − SB1(X; e)||1, Y ∼ Bin(n(n − 1)/2, ϵB1) and similarly V = ||SA(X; e) − SB2(X; e)||1, V ∼ Bin(n(n − +1)/2, ϵB1). In that case, the expected disagreement of A and B1 on an adversarial example is E[U(n)] = �n(n−1)/2 +x=0 +(1 − +F(x; n(n − 1)/2, ϵB1)n) and for A and B2 it is E[V(n)] = �n(n−1)/2 +x=0 +(1 − F(x; n(n − 1)/2, ϵB2)n). If ϵB1 < ϵB2, then +F(x; n(n − 1)/2, ϵB1) > F(x; n(n − 1)/2, ϵB2) and thus E[U(n)] < E[V(n)]. +Remark A.6. While this theorem shows that increased alignment generally leads to increased adversarial robustness, this +relies on a representational metric of adversarial examples. However, in practice, adversarial robustness is often measured +using hard classification error as a simple proxy. This proxy does not capture the fine-grained degree of misalignment +between humans and a model on each example. As a result, when measuring adversarial robustness using this proxy, the +effect of alignment may be dampened by the U-shaped effect seen in other classification settings as mentioned above. +B. List of 491 models used in experiments +adv inception v3, +bat resnext26ts, +beit base patch16 224, +beit base patch16 384, +beit large patch16 224, +beit large patch16 384, botnet26t 256, cait s24 224, cait s24 384, cait s36 384, cait xs24 384, cait xxs24 224, +cait xxs24 384, cait xxs36 224, cait xxs36 384, coat lite mini, coat lite small, coat lite tiny, coat mini, coat tiny, +convit base, convit small, convit tiny, convmixer 1024 20 ks9 p14, convmixer 1536 20, convmixer 768 32, con- +vnext base, +convnext base 384 in22ft1k, +convnext base in22ft1k, +convnext large, +convnext large 384 in22ft1k, + +Alignment with human representations supports robust few-shot learning +convnext large in22ft1k, convnext small, convnext tiny, cspdarknet53, cspresnet50, cspresnext50, deit base patch16 224, +deit base patch16 384, +deit small patch16 224, +deit tiny patch16 224, +densenet121, +densenet161, +densenet169, +densenet201, densenetblur121d, dla102, dla102x, dla102x2, dla169, dla34, dla46 c, dla46x c, dla60, dla60 res2net, +dla60 res2next, dla60x, dla60x c, dm nfnet f0, dm nfnet f1, dm nfnet f2, dpn107, dpn131, dpn68, dpn68b, dpn92, +dpn98, eca botnext26ts 256, eca halonext26ts, eca nfnet l0, eca nfnet l1, eca nfnet l2, eca resnet33ts, eca resnext26ts, +ecaresnet101d, ecaresnet101d pruned, ecaresnet269d, ecaresnet26t, ecaresnet50d, ecaresnet50d pruned, ecaresnet50t, +ecaresnetlight, efficientnet b0, efficientnet b1, efficientnet b1 pruned, efficientnet b2, efficientnet b2 pruned, efficientnet b3, +efficientnet b3 pruned, efficientnet b4, efficientnet el, efficientnet el pruned, efficientnet em, efficientnet es, efficient- +net es pruned, efficientnet lite0, efficientnetv2 rw m, efficientnetv2 rw s, efficientnetv2 rw t, ens adv inception resnet v2, +ese vovnet19b dw, +ese vovnet39b, +fbnetc 100, +fbnetv3 b, +fbnetv3 d, +fbnetv3 g, +gc efficientnetv2 rw t, +gcres- +net33ts, gcresnet50t, gcresnext26ts, gcresnext50ts, gernet l, gernet m, gernet s, ghostnet 100, gluon inception v3, +gluon resnet101 v1b, +gluon resnet101 v1c, +gluon resnet101 v1d, +gluon resnet101 v1s, +gluon resnet152 v1b, +gluon resnet152 v1c, +gluon resnet152 v1d, +gluon resnet152 v1s, +gluon resnet18 v1b, +gluon resnet34 v1b, +gluon resnet50 v1b, +gluon resnet50 v1c, +gluon resnet50 v1d, +gluon resnet50 v1s, +gluon resnext101 32x4d, +gluon resnext101 64x4d, gluon resnext50 32x4d, gluon senet154, gluon seresnext101 32x4d, gluon seresnext101 64x4d, +gluon seresnext50 32x4d, +gluon xception65, +gmixer 24 224, +gmlp s16 224, +halo2botnet50ts 256, +halonet26t, +halonet50ts, +haloregnetz b, +hardcorenas a, +hardcorenas b, +hardcorenas c, +hardcorenas d, +hardcorenas e, +hard- +corenas f, +hrnet w18, +hrnet w18 small, +hrnet w18 small v2, +hrnet w30, +hrnet w32, +hrnet w40, +hrnet w44, +hrnet w48, +hrnet w64, +ig resnext101 32x16d, +ig resnext101 32x8d, +inception resnet v2, +inception v3, +incep- +tion v4, jx nest base, jx nest small, jx nest tiny, lambda resnet26rpt 256, lambda resnet26t, lambda resnet50ts, +lamhalobotnet50ts 256, lcnet 050, lcnet 075, lcnet 100, legacy senet154, legacy seresnet101, legacy seresnet152, +legacy seresnet18, +legacy seresnet34, +legacy seresnet50, +legacy seresnext101 32x4d, +legacy seresnext26 32x4d, +legacy seresnext50 32x4d, mixer b16 224, mixer b16 224 miil, mixnet l, mixnet m, mixnet s, mixnet xl, mnasnet 100, +mnasnet small, mobilenetv2 050, mobilenetv2 100, mobilenetv2 110d, mobilenetv2 120d, mobilenetv2 140, mo- +bilenetv3 large 100, mobilenetv3 large 100 miil, mobilenetv3 rw, nasnetalarge, nf regnet b1, nf resnet50, nfnet l0, +pit b 224, pit s 224, pit ti 224, pit xs 224, pnasnet5large, regnetx 002, regnetx 004, regnetx 006, regnetx 008, +regnetx 016, regnetx 032, regnetx 040, regnetx 064, regnetx 080, regnetx 120, regnetx 160, regnetx 320, reg- +nety 002, regnety 004, regnety 006, regnety 008, regnety 016, regnety 032, regnety 040, regnety 064, regnety 080, +regnety 120, regnety 160, regnety 320, regnetz b16, regnetz c16, regnetz d32, regnetz d8, regnetz e8, repvgg a2, +repvgg b0, +repvgg b1, +repvgg b1g4, +repvgg b2, +repvgg b2g4, +repvgg b3, +repvgg b3g4, +res2net101 26w 4s, +res2net50 14w 8s, +res2net50 26w 4s, +res2net50 26w 6s, +res2net50 26w 8s, +res2net50 48w 2s, +res2next50, +resmlp 12 224, resmlp 12 distilled 224, resmlp 24 224, resmlp 24 distilled 224, resmlp 36 224, resmlp 36 distilled 224, +resmlp big 24 224, resmlp big 24 224 in22ft1k, resmlp big 24 distilled 224, resnest101e, resnest14d, resnest200e, +resnest269e, resnest26d, resnest50d, resnest50d 1s4x24d, resnest50d 4s2x40d, resnet101, resnet101d, resnet152, resnet152d, +resnet18, resnet18d, resnet200d, resnet26, resnet26d, resnet26t, resnet32ts, resnet33ts, resnet34, resnet34d, resnet50, +resnet50 gn, resnet50d, resnet51q, resnet61q, resnetblur50, resnetrs101, resnetrs152, resnetrs200, resnetrs270, resnetrs350, +resnetrs420, resnetrs50, resnetv2 101, resnetv2 101x1 bitm, resnetv2 50, resnetv2 50x1 bit distilled, resnetv2 50x1 bitm, +resnext101 32x8d, resnext26ts, resnext50 32x4d, resnext50d 32x4d, rexnet 100, rexnet 130, rexnet 150, rexnet 200, +sebotnet33ts 256, sehalonet33ts, selecsls42b, selecsls60, selecsls60b, semnasnet 075, semnasnet 100, seresnet152d, +seresnet33ts, seresnet50, seresnext26d 32x4d, seresnext26t 32x4d, seresnext26ts, seresnext50 32x4d, skresnet18, +skresnet34, skresnext50 32x4d, spnasnet 100, ssl resnet18, ssl resnet50, ssl resnext101 32x16d, ssl resnext101 32x4d, +ssl resnext101 32x8d, +ssl resnext50 32x4d, +swin base patch4 window12 384, +swin base patch4 window7 224, +swin large patch4 window12 384, +swin large patch4 window7 224, +swin small patch4 window7 224, +swin tiny patch4 window7 224, +swsl resnet18, +swsl resnet50, +swsl resnext101 32x16d, +swsl resnext101 32x4d, +swsl resnext101 32x8d, +swsl resnext50 32x4d, +tf efficientnet b0, +tf efficientnet b0 ap, +tf efficientnet b0 ns, +tf efficientnet b1, tf efficientnet b1 ap, tf efficientnet b1 ns, tf efficientnet b2, tf efficientnet b2 ap, tf efficientnet b2 ns, +tf efficientnet b3, tf efficientnet b3 ap, tf efficientnet b3 ns, tf efficientnet b4, tf efficientnet b4 ap, tf efficientnet b4 ns, +tf efficientnet b5, tf efficientnet b5 ap, tf efficientnet b5 ns, tf efficientnet b6, tf efficientnet b6 ap, tf efficientnet b6 ns, +tf efficientnet b7, +tf efficientnet b7 ap, +tf efficientnet b7 ns, +tf efficientnet cc b0 4e, +tf efficientnet cc b0 8e, +tf efficientnet cc b1 8e, tf efficientnet el, tf efficientnet em, tf efficientnet es, tf efficientnet lite0, tf efficientnet lite1, +tf efficientnet lite2, tf efficientnet lite3, tf efficientnet lite4, tf efficientnetv2 b0, tf efficientnetv2 b1, tf efficientnetv2 b2, +tf efficientnetv2 b3, tf efficientnetv2 l, tf efficientnetv2 l in21ft1k, tf efficientnetv2 m, tf efficientnetv2 m in21ft1k, +tf efficientnetv2 s, +tf efficientnetv2 s in21ft1k, +tf inception v3, +tf mixnet l, +tf mixnet m, +tf mixnet s, +tf mobilenetv3 large 075, +tf mobilenetv3 large 100, +tf mobilenetv3 large minimal 100, +tf mobilenetv3 small 075, + +Alignment with human representations supports robust few-shot learning +tf mobilenetv3 small 100, tf mobilenetv3 small minimal 100, tinynet a, tinynet b, tinynet c, tinynet d, tinynet e, +tnt s patch16 224, +tv densenet121, +tv resnet101, +tv resnet152, +tv resnet34, +tv resnet50, +tv resnext50 32x4d, +twins pcpvt base, twins pcpvt large, twins pcpvt small, twins svt base, twins svt large, twins svt small, vgg11, vgg11 bn, +vgg13, vgg13 bn, vgg16, vgg16 bn, vgg19, vgg19 bn, visformer small, vit base patch16 224, vit base patch16 224 miil, +vit base patch16 384, +vit base patch32 224, +vit base patch32 384, +vit base patch8 224, +vit base r50 s16 384, +vit small patch16 224, vit small patch16 384, vit small patch32 224, vit small patch32 384, vit small r26 s32 224, +vit small r26 s32 384, +vit tiny patch16 224, +vit tiny patch16 384, +vit tiny r s16 p8 224, +vit tiny r s16 p8 384, +wide resnet101 2, +wide resnet50 2, +xception, +xception41, +xception65, +xception71, +xcit large 24 p16 224, +xcit large 24 p16 224 dist, +xcit large 24 p16 384 dist, +xcit large 24 p8 224, +xcit large 24 p8 224 dist, +xcit large 24 p8 384 dist, +xcit medium 24 p16 224, +xcit medium 24 p16 224 dist, +xcit medium 24 p16 384 dist, +xcit medium 24 p8 224, +xcit medium 24 p8 224 dist, +xcit nano 12 p16 224, +xcit nano 12 p16 224 dist, +xcit nano 12 p16 384 dist, +xcit nano 12 p8 224, +xcit nano 12 p8 224 dist, +xcit nano 12 p8 384 dist, +xcit small 12 p16 224, +xcit small 12 p16 224 dist, +xcit small 12 p16 384 dist, +xcit small 12 p8 224, +xcit small 12 p8 224 dist, +xcit small 12 p8 384 dist, +xcit small 24 p16 224, +xcit small 24 p16 224 dist, +xcit small 24 p16 384 dist, +xcit small 24 p8 224, +xcit small 24 p8 224 dist, +xcit small 24 p8 384 dist, +xcit tiny 12 p16 224, xcit tiny 12 p16 224 dist, xcit tiny 12 p16 384 dist, xcit tiny 12 p8 224, xcit tiny 12 p8 224 dist, +xcit tiny 12 p8 384 dist, xcit tiny 24 p16 224, xcit tiny 24 p16 224 dist, xcit tiny 24 p16 384 dist, xcit tiny 24 p8 224, +xcit tiny 24 p8 224 dist, xcit tiny 24 p8 384 dist + diff --git a/FdFLT4oBgHgl3EQfGC-x/content/tmp_files/load_file.txt b/FdFLT4oBgHgl3EQfGC-x/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d0d3b6b52d17cca49fdde0b6bdb01f95f9019a4 --- /dev/null +++ b/FdFLT4oBgHgl3EQfGC-x/content/tmp_files/load_file.txt @@ -0,0 +1,1330 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf,len=1329 +page_content='Alignment with human representations supports robust few-shot learning Ilia Sucholutsky 1 Thomas L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' Griffiths 1 2 Abstract Should we care whether AI systems have repre- sentations of the world that are similar to those of humans?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' We provide an information-theoretic analysis that suggests that there should be a U- shaped relationship between the degree of rep- resentational alignment with humans and perfor- mance on few-shot learning tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' We confirm this prediction empirically, finding such a relation- ship in an analysis of the performance of 491 com- puter vision models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' We also show that highly- aligned models are more robust to both adversar- ial attacks and domain shifts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' Our results suggest that human-alignment is often a sufficient, but not necessary, condition for models to make effective use of limited data, be robust, and generalize well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' Introduction As AI systems are increasingly deployed in settings that involve interactions with humans, exploring the extent to which these systems are aligned with humans becomes more significant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' While this exploration has largely focused on the alignment of the values of AI systems with humans (Gabriel, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' Kirchner et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=', 2022), the alignment of their represen- tations is also important.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' Representing the world in the same way is a precursor to being able to express common values and to comprehensible generalization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' To the extent that humans have accurate representations of the world, repre- sentational alignment is also an effective source of inductive bias that might make it possible to learn from limited data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' As a motivating example, imagine a meeting between a 16th century alchemist and a 21st century chemist.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' They live in the same physical world and are intimately familiar with the materials that comprise it, but they would have significant difficulty expressing their values and generaliz- ing the results of an experiment they observe together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' The alchemist would likely learn poorly from examples of a reaction demonstrated by the chemist, not having the right inductive biases for the way the world actually works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' The 1Department of Computer Science, Princeton University, USA 2Department of Psychology, Princeton University, USA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/FdFLT4oBgHgl3EQfGC-x/content/2301.11990v1.pdf'} +page_content=' Corre- spondence to: Ilia Sucholutsky