jackkuo commited on
Commit
9023e3a
·
verified ·
1 Parent(s): 436bb3b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. -tFLT4oBgHgl3EQfvS-t/content/tmp_files/2301.12159v1.pdf.txt +1306 -0
  2. -tFLT4oBgHgl3EQfvS-t/content/tmp_files/load_file.txt +0 -0
  3. .gitattributes +64 -0
  4. 09E1T4oBgHgl3EQflAR-/content/tmp_files/2301.03280v1.pdf.txt +1397 -0
  5. 09E1T4oBgHgl3EQflAR-/content/tmp_files/load_file.txt +0 -0
  6. 0NE1T4oBgHgl3EQfRQO7/content/tmp_files/2301.03051v1.pdf.txt +1086 -0
  7. 0NE1T4oBgHgl3EQfRQO7/content/tmp_files/load_file.txt +434 -0
  8. 1dAyT4oBgHgl3EQfPfaV/content/tmp_files/2301.00026v1.pdf.txt +2071 -0
  9. 1dAyT4oBgHgl3EQfPfaV/content/tmp_files/load_file.txt +0 -0
  10. 3NA0T4oBgHgl3EQfNP9P/content/2301.02143v1.pdf +3 -0
  11. 3NA0T4oBgHgl3EQfNP9P/vector_store/index.faiss +3 -0
  12. 3NFAT4oBgHgl3EQfEBxO/content/2301.08419v1.pdf +3 -0
  13. 3NFAT4oBgHgl3EQfEBxO/vector_store/index.faiss +3 -0
  14. 3tFRT4oBgHgl3EQfojeI/vector_store/index.pkl +3 -0
  15. 4NE1T4oBgHgl3EQfAgLJ/content/2301.02841v1.pdf +3 -0
  16. 4NE1T4oBgHgl3EQfAgLJ/vector_store/index.pkl +3 -0
  17. 4dFIT4oBgHgl3EQf6yuB/content/2301.11395v1.pdf +3 -0
  18. 59E1T4oBgHgl3EQfmwQa/content/2301.03300v1.pdf +3 -0
  19. 59E1T4oBgHgl3EQfmwQa/vector_store/index.faiss +3 -0
  20. 59E1T4oBgHgl3EQfmwQa/vector_store/index.pkl +3 -0
  21. 69AyT4oBgHgl3EQf2vl7/content/2301.00756v1.pdf +3 -0
  22. 6dE4T4oBgHgl3EQfcQxJ/vector_store/index.faiss +3 -0
  23. 7NFAT4oBgHgl3EQfoB2V/content/tmp_files/2301.08632v1.pdf.txt +1161 -0
  24. 7NFAT4oBgHgl3EQfoB2V/content/tmp_files/load_file.txt +0 -0
  25. 7tE3T4oBgHgl3EQfRgnj/content/tmp_files/2301.04423v1.pdf.txt +417 -0
  26. 7tE3T4oBgHgl3EQfRgnj/content/tmp_files/load_file.txt +377 -0
  27. 7tE4T4oBgHgl3EQfcwyw/content/2301.05086v1.pdf +3 -0
  28. 7tE4T4oBgHgl3EQfcwyw/vector_store/index.pkl +3 -0
  29. 89E1T4oBgHgl3EQfCALf/content/2301.02860v1.pdf +3 -0
  30. 89E1T4oBgHgl3EQfCALf/vector_store/index.faiss +3 -0
  31. 89E1T4oBgHgl3EQfCALf/vector_store/index.pkl +3 -0
  32. 8NE2T4oBgHgl3EQf8Ag-/content/tmp_files/2301.04214v1.pdf.txt +572 -0
  33. 8NE2T4oBgHgl3EQf8Ag-/content/tmp_files/load_file.txt +364 -0
  34. 8dAyT4oBgHgl3EQf2_nF/content/2301.00762v1.pdf +3 -0
  35. 8dAyT4oBgHgl3EQf2_nF/vector_store/index.faiss +3 -0
  36. 8dAyT4oBgHgl3EQf2_nF/vector_store/index.pkl +3 -0
  37. 99E3T4oBgHgl3EQfSQn_/vector_store/index.faiss +3 -0
  38. 99FLT4oBgHgl3EQfCS7y/vector_store/index.pkl +3 -0
  39. 9NE1T4oBgHgl3EQfnwSt/content/2301.03313v1.pdf +3 -0
  40. 9NE1T4oBgHgl3EQfnwSt/vector_store/index.faiss +3 -0
  41. 9NE1T4oBgHgl3EQfnwSt/vector_store/index.pkl +3 -0
  42. B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf +0 -0
  43. B9AzT4oBgHgl3EQfGPvM/content/tmp_files/2301.01026v1.pdf.txt +389 -0
  44. B9AzT4oBgHgl3EQfGPvM/content/tmp_files/load_file.txt +420 -0
  45. BNE2T4oBgHgl3EQfRgdU/content/2301.03781v1.pdf +3 -0
  46. BNE2T4oBgHgl3EQfRgdU/vector_store/index.faiss +3 -0
  47. BNE2T4oBgHgl3EQfRgdU/vector_store/index.pkl +3 -0
  48. C9E5T4oBgHgl3EQfUA9Q/content/2301.05540v1.pdf +3 -0
  49. C9E5T4oBgHgl3EQfUA9Q/vector_store/index.pkl +3 -0
  50. GdE4T4oBgHgl3EQfHgxG/vector_store/index.faiss +3 -0
-tFLT4oBgHgl3EQfvS-t/content/tmp_files/2301.12159v1.pdf.txt ADDED
@@ -0,0 +1,1306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.12159v1 [cs.CV] 28 Jan 2023
2
+ ClusterFuG: Clustering Fully connected Graphs by Multicut
3
+ Ahmed Abbas 1 Paul Swoboda 1 2
4
+ Abstract
5
+ We propose a graph clustering formulation based
6
+ on multicut (a.k.a. weighted correlation cluster-
7
+ ing) on the complete graph.
8
+ Our formulation
9
+ does not need specification of the graph topology
10
+ as in the original sparse formulation of multicut,
11
+ making our approach simpler and potentially bet-
12
+ ter performing. In contrast to unweighted corre-
13
+ lation clustering we allow for a more expressive
14
+ weighted cost structure. In dense multicut, the
15
+ clustering objective is given in a factorized form
16
+ as inner products of node feature vectors. This al-
17
+ lows for an efficient formulation and inference in
18
+ contrast to multicut/weighted correlation cluster-
19
+ ing, which has at least quadratic representation
20
+ and computation complexity when working on
21
+ the complete graph. We show how to rewrite clas-
22
+ sical greedy algorithms for multicut in our dense
23
+ setting and how to modify them for greater ef-
24
+ ficiency and solution quality. In particular, our
25
+ algorithms scale to graphs with tens of thousands
26
+ of nodes. Empirical evidence on instance seg-
27
+ mentation on Cityscapes and clustering of Ima-
28
+ geNet datasets shows the merits of our approach.
29
+ 1. Introduction
30
+ Graph-based clustering approaches, primarily among them
31
+ multicut (Chopra & Rao, 1993), are theoretically appeal-
32
+ ing: They do not need specification of the number of clus-
33
+ ters, but infer them as part of the optimization process.
34
+ They allow for a flexible clustering objective with attrac-
35
+ tive and repulsive costs between pairs of nodes.
36
+ They
37
+ are also theoretically well-understood as optimization prob-
38
+ lems with intensively studied polyhedral descriptions. Effi-
39
+ cient solvers that scale well and give high quality solutions
40
+ have also been developed.
41
+ As a drawback, graph-based clustering approaches need
42
+ specification of the underlying graph topology. In prac-
43
+ 1MPI for Informatics, Saarland Informatics Campus, Germany
44
+ 2University of Mannheim, Germany. Correspondence to: Ahmed
45
+ Abbas <ahmed.abbas@mpi-inf.mpg.de>.
46
+ Preprint.
47
+ tice, this means an additional engineering effort as well as
48
+ the possibility to not get it right, which would decrease the
49
+ downstream task performance. Naively circumventing this
50
+ challenge by using the complete graph is not scalable – the
51
+ number of edges grows quadratically. One approach to re-
52
+ solve this conundrum is graph structure learning, which in-
53
+ fers the graph topology as part of the inference process, but
54
+ adds considerable additional complexity.
55
+ We propose a method to solve graph clustering efficiently
56
+ on complete graphs. Our formulation will use the well-
57
+ known edge-based multicut formulation and only restrict
58
+ the way edge costs can be computed: they need to be based
59
+ on inner products of node features. This has two advan-
60
+ tages: First, it reduces storage requirements. Instead of
61
+ storing a full adjacency matrix of edge costs as in multicut,
62
+ which grows quadratically with the number of nodes, we
63
+ only need to store a linear number of node features and can
64
+ compute edge costs on demand. Second, operations needed
65
+ in multicut algorithms can be made scalable. Instead of
66
+ operating on the complete graph we can sparsify it adap-
67
+ tively during the solving process. This allows to simulate
68
+ the workings of multicut algorithms on complete graphs by
69
+ working on a small subset of it. The key technical ingre-
70
+ dient to obtain these sparse subgraphs will be fast nearest
71
+ neighbor search, for which efficient and scalable implemen-
72
+ tations exist (Johnson et al., 2019). In effect, this allows us
73
+ to solve large dense multicut instances in moderate time,
74
+ which is not possible with existing solvers. In detail, our
75
+ contribution is as follows:
76
+ Formulation: We propose multicut on complete graphs
77
+ with factorized edge costs as an efficiently repre-
78
+ sentable graph clustering formalism.
79
+ Algorithm: We propose scalable algorithms for solving
80
+ the dense multicut problems, one mimicking exactly
81
+ the original greedy additive edge constraction (GAEC)
82
+ algorithm (Keuper et al., 2015), the other a more effi-
83
+ cient variant in the spirit of the balanced edge contrac-
84
+ tion heuristic (Kardoost & Keuper, 2018)1.
85
+ Empirical: We show efficacy in terms of memory and run-
86
+ time of our solvers and show the merit of using them
87
+ 1Our
88
+ code
89
+ is
90
+ available
91
+ at
92
+ https://github.com/aabbas90/cluster-fug
93
+
94
+ Clustering Fully connected Graphs by Multicut
95
+ for image segmentation on Cityscapes and clustering
96
+ of ImageNet classification dataset.
97
+ 2. Related work
98
+ Multicut and correlation clustering:
99
+ The original mul-
100
+ ticut problem is formulated as an extension of the min-
101
+ cut problem to multiple terminals with non-negative edge
102
+ costs (Hu, 1963).
103
+ In machine learning the multicut
104
+ problem is defined differently and is equivalent (up to
105
+ variable involution) to the correlation clustering prob-
106
+ lem (Demaine et al., 2006), i.e. arbitrary edges costs and
107
+ no terminals. For the purpose of this work we will use the
108
+ latter definition of multicut. The polyhedral geometry of
109
+ the multicut problem has been studied in (Deza et al., 1992;
110
+ Chopra & Rao, 1993; Oosten et al., 2001).
111
+ Although the multicut problem is NP-Hard (Bansal et al.,
112
+ 2004; Demaine et al., 2006), greedy algorithms perform
113
+ well in practice for computer vision and machine learn-
114
+ ing tasks (Keuper et al., 2015; Levinkov et al., 2017;
115
+ Bailoni et al., 2022).
116
+ More involved algorithms in-
117
+ clude message passing in the dual domain for multi-
118
+ cut, studied in (Swoboda & Andres, 2017; Lange et al.,
119
+ 2018; Abbas & Swoboda, 2022).
120
+ These algorithms give
121
+ lower bounds and improved primal solutions.
122
+ Another
123
+ line of efficient primal heuristics is based on move-
124
+ making (Beier et al., 2014; 2015). All these graphs, while
125
+ efficient, scale with the number of edges, making them
126
+ unsuitable for very large dense graphs.
127
+ Algorithms for
128
+ correlation clustering on complete graphs were proposed
129
+ in (Pan et al., 2015; Veldt, 2022). However, they only al-
130
+ low unweighted edges. In this paper we consider efficient
131
+ algorithms on full graphs and with weighted edges.
132
+ K-Means:
133
+ The K-means problem (Lloyd, 1982) is sim-
134
+ ilar to our approach in that it works directly on feature
135
+ representations and its objective is based on L2-distances
136
+ between features. Similarly to our algorithm, large num-
137
+ ber of points are handled by efficiently computing kNN-
138
+ graphs (Qaddoura et al., 2020), thereby reducing run time.
139
+ In contrast to multicut, the number of clusters must be
140
+ given a-priori, while in multicut it is derived as part of the
141
+ optimization process.
142
+ Other clustering approaches:
143
+ There are a number of
144
+ other paradigms for clustering. A prominent approach is
145
+ spectral clustering, in which a weighted graph is given and
146
+ a clustering is computed with the help of the eigenvec-
147
+ tors of the graph Laplacian (Von Luxburg, 2007; Jia et al.,
148
+ 2014). The work (Dhillon et al., 2007) shows connections
149
+ between weighted k-means and multiple spectral clustering
150
+ approaches. As for K-means and unlike multicut, spectral
151
+ clustering requires the number of clusters to be specified.
152
+ i
153
+ j
154
+ fi fj
155
+ (0, 0)
156
+ Figure 1: Example illustration of dense multicut prob-
157
+ lem (3) on 5 nodes. Each node i is associated with a vec-
158
+ tor fi ∈ R2 and all possible edges between distinct nodes
159
+ are considered (i.e. the complete graph). The edge cost be-
160
+ tween a pair of nodes i, j is measured by ⟨fi, fj⟩ and attrac-
161
+ tive/repulsive edges are colored green/red. Edge thickness
162
+ represents absolute edge cost. Also shown is the optimal
163
+ partitioning to 2 clusters with cut edges denoted by dashed
164
+ lines.
165
+ 3. Method
166
+ A decomposition (or clustering) of a weighted graph G =
167
+ (V, E, c) with vertices V , edges E and edge costs c ∈ RE
168
+ can be obtained by solving the following multicut problem
169
+ min
170
+ y∈MG
171
+
172
+ ij∈E
173
+ cijyij.
174
+ (1)
175
+ We say that an edge ij with cij > 0 is attractive. Its end-
176
+ points prefer to be in the same cluster. In the opposite case
177
+ cij < 0 we call the edge repulsive. The set MG enumer-
178
+ ates all possible partitions of G defined as
179
+ MG =
180
+
181
+
182
+ δ(V1, . . . , Vn) :
183
+ n ∈ N
184
+ Vi ∩ Vj = ∅
185
+ ∀i ̸= j
186
+ V1 ˙∪ . . . ˙∪Vn = V
187
+
188
+
189
+  .
190
+ (2)
191
+ where δ(·, . . . , ·) ⊆ E is the set of edges straddling distinct
192
+ components.
193
+ The goal of our work is to consider the scenario when the
194
+ graph G is complete i.e. E = {ij : i ∈ V, j ∈ V \ {i}}.
195
+ For large graphs storage and processing of edge costs c be-
196
+ comes prohibitive. To address this issue we instead require
197
+ as input a feature vector fi ∈ Rd for each node i in V . The
198
+ edge costs between a pair of nodes i and j can then be mea-
199
+ sured on-demand through some function s(fi, fj) → R. In
200
+ this case the multicut problem becomes
201
+ min
202
+ y∈MG
203
+
204
+ i∈V
205
+
206
+ j∈V \i
207
+ s(fi, fj)yij,
208
+ (3)
209
+
210
+ Clustering Fully connected Graphs by Multicut
211
+ which we term as dense multicut problem. An illustration
212
+ of our formulation is given in Figure 1. In the following
213
+ we first revisit an algorithm to approximately solve (1) and
214
+ show its extensions for dense multicut problem (3).
215
+ 3.1. Greedy Additive Edge Contraction
216
+ The
217
+ greedy
218
+ additive
219
+ edge
220
+ contraction
221
+ (GAEC)
222
+ scheme
223
+ (Keuper et al.,
224
+ 2015)
225
+ computes
226
+ approximate
227
+ solution of the multicut problem (1) as given in Algo-
228
+ rithm 1. It initializes each node as a separate cluster and
229
+ iteratively contracts a pair of nodes i, j with the largest
230
+ non-negative cost cij (if it exists). Let m be the node i and
231
+ j are contracted to. The edge costs of edges incident to m
232
+ are
233
+ cml = cil + cjl, l ∈ Ni ∪ Nj \ {i, j},
234
+ (4)
235
+ where costs of non-existing edges are assumed to be 0
236
+ and Ni corresponds to neighbours of i in graph G. For
237
+ complete graphs directly applying this algorithm by oper-
238
+ ating on edge costs is computationally expensive. More-
239
+ over, since each node is connected to all other nodes (Ni =
240
+ V \ {i}), cost updates (4) during edge contraction take
241
+ O(|V |) instructions.
242
+ Algorithm 1: GAEC (Keuper et al., 2015)
243
+ Data: Weighted graph G = (V, E, c)
244
+ Result: Clusters V
245
+ 1 while maxuv∈E cuv ≥ 0 do
246
+ 2
247
+ m := ij = arg maxuv∈E cuv
248
+ // Aggregate edge costs
249
+ 3
250
+ cml = cil + cjl, l ∈ Ni ∪ Nj \ {i, j}
251
+ // Update edges
252
+ 4
253
+ E = (E∪{ml|l ∈ Ni∪Nj})\{il}l∈Ni ∪{jl}l∈Nj
254
+ // Update nodes
255
+ 5
256
+ V = (V ∪ m) \ {i, j}
257
+ Contraction on complete graphs:
258
+ We show how to per-
259
+ form a more efficient (and equivalent) contraction by oper-
260
+ ating on the node features f by our formulation (3) for the
261
+ particular case of s(·, ·) defined as
262
+ s(fi, fj) = ⟨fi, fj⟩.
263
+ (5)
264
+ From now on, unless stated otherwise, our edge costs will
265
+ be given by (5).
266
+ Lemma 3.1 (Contraction with node features). Assume
267
+ edge costs are measured by (5) and nodes i and j are con-
268
+ tracted to m. Then features of node m given by
269
+ fm = fi + fj
270
+ (6)
271
+ produce contracted edge costs according to (4).
272
+ Proof. By applying (5) for l ∈ V and comparing with (4)
273
+ we get
274
+ s(fm, fl) = ⟨fm, fl⟩ = ⟨fi, fl⟩ + ⟨fj, fl⟩
275
+ = s(fi, fl) + s(fj, fl) .
276
+ Next we will build on the previous result to devise heuris-
277
+ tics for solving dense multicut problem (3) efficiently.
278
+ GAEC for complete graphs:
279
+ We devise an algorithm
280
+ which exactly imitates GAEC (Keuper et al., 2015) but
281
+ is applicable to our formulation on complete graphs (3).
282
+ Specifically to make GAEC efficient with node features
283
+ and a complete graph, we sparsify the original graph G by
284
+ working on its directed k-nearest neighbours (NN) graph
285
+ (V, A). The NN graph stores candidate edges for contrac-
286
+ tion.
287
+ The arc set A is populated by nearest neighbour
288
+ search w.r.t. feature similarity (5) and is updated on each
289
+ edge contraction. We denote by N +
290
+ i
291
+ the set of outgoing
292
+ neighbours of i in the NN graph i.e. {l|(l, i) ∈ A} and simi-
293
+ larly by N −
294
+ i the incoming neighbours. Moreover we define
295
+ N +
296
+ ij as N +
297
+ i ∪ N +
298
+ j . The complete strategy to obtain a fea-
299
+ sible solution of dense multicut problem is described in Al-
300
+ gorithm 2. It imitates Algorithm 1 by iteratively searching
301
+ and contracting the most attractive edge, but it restricts its
302
+ search only to the NN graph thereby reducing computation.
303
+ After contraction, the NN graph is updated (lines 5-8) by
304
+ only recomputing nearest neighbors of nodes which were
305
+ affected by the contraction in the NN graph.
306
+ Algorithm 2: Dense GAEC
307
+ Data: Node features fi, ∀i ∈ V ; Number of nearest
308
+ neighbours k
309
+ Result: Clusters V
310
+ // Find nearest neighbours of each node
311
+ 1 A = {(i, j)|i ∈ V, j ∈ arg top-ki′̸=i⟨fi, fi′⟩}
312
+ 2 while max(u,v)∈A⟨fu, fv⟩ ≥ 0 do
313
+ 3
314
+ m := (i, j) = arg top-k(u,v)∈A⟨fu, fv⟩
315
+ // Aggregate node features
316
+ 4
317
+ fm = fi + fj
318
+ // Update nodes
319
+ 5
320
+ V = (V ∪ m) \ {i, j}
321
+ // Nodes with i, j as NN
322
+ 6
323
+ H = {(q, arg maxl∈V \q⟨fm, fl⟩)|q ∈ N −
324
+ ij }
325
+ // NN of merged node
326
+ 7
327
+ H = H ∪ {(m, r)|r = arg top-kl∈V \m⟨fm, fl⟩}
328
+ // Update arcs
329
+ 8
330
+ A = (A ∪ H) \ {(q, i)}q∈N −
331
+ i ∪ {(q, j)}q∈N −
332
+ j
333
+ Proposition 3.2 (Dense Greedy Contraction). Algorithm 2
334
+ always merges a pair of nodes i and j with the largest edge
335
+
336
+ Clustering Fully connected Graphs by Multicut
337
+ cost i.e.
338
+ (i, j) ∈ arg max
339
+ (u,v)∈A
340
+ ⟨fu, fv⟩ =⇒ ⟨fi, fj⟩ ≥ max
341
+ u,v̸=u⟨fu, fv⟩.
342
+ (7)
343
+ Proof. The statement is trivially satisfied before any merge
344
+ operation is performed since A is constructed by nearest
345
+ neighbour search over all nodes in line 1 of the algorithm.
346
+ We now show that after each merge operation (i.e. after
347
+ line 8 of the algorithm) the statement (7) still holds. We
348
+ define Q = m ∪ {q|q ∈ N −
349
+ ij } to be the set of nodes using i
350
+ or j as their nearest neighbours. Two cases can arise:
351
+ Case 1: {i, j}∩Q ̸= ∅:
352
+ Due to nearest neighbour search
353
+ for all nodes in Q at lines 6 and 7, the statement holds.
354
+ Case 2: {i, j} ∩ Q = ∅:
355
+ In this case if i is the con-
356
+ tracted node m from the last edge contraction operation
357
+ then (i, j) ∈ A due to line 6. If i ̸= m then it remains
358
+ connected to its nearest neighbours either due to the initial
359
+ NN search at line 1 or the NN update at lines 6 and 7.
360
+ The above result guarantees that the most attractive edge
361
+ will always be present in the nearest neighbour graph
362
+ thus foregoing the need to search in the complete graph.
363
+ This proves that the Algorithm 2 performs locally optimal
364
+ merges as proposed in (Keuper et al., 2015) and is also scal-
365
+ able to large complete graphs. As a downside the algorithm
366
+ requires costly nearest neighbour search after every edge
367
+ contraction. Since computing nearest neighbours and con-
368
+ tracting edges is not commutative, in the worst case one
369
+ has to recompute the nearest neighbours on the contracted
370
+ graph from scratch.
371
+ Incremental nearest neighbours:
372
+ For faster nearest
373
+ neighbour updates after edge contraction we show how
374
+ to reuse more of the previously computed nearest neigh-
375
+ bors through the following two approaches. First, for all
376
+ nodes whose nearest neighbours are merging nodes (i.e.
377
+ line 6 of Alg. 2), we check if merged node m is already
378
+ a nearest neighbour without requiring exhaustive search.
379
+ Specifically assume a contracting node i was a k-nearest
380
+ neighbour of some other node q ∈ V \ i.
381
+ Then the
382
+ merged node m is a k-nearest neighbour of q if ⟨fq, fm⟩ ≥
383
+ minl∈N +
384
+ q ⟨fq, fl⟩. This check can be cheaply performed for
385
+ all such nodes thereby reducing computation. Second, we
386
+ devise a criterion which can allow to efficiently populate
387
+ nearest neighbours of the contracted node m.
388
+ Proposition 3.3 (Incremental nearest neighbours). Let the
389
+ k-nearest neighbours N +
390
+ i , N +
391
+ j
392
+ of nodes i and j be given.
393
+ Assume that nodes i, j are merged to form a new node m.
394
+ Then edge costs between nodes v ∈ V \ N +
395
+ ij and m are
396
+ i
397
+ j
398
+ N +
399
+ i
400
+ N +
401
+ j
402
+ N −
403
+ ij
404
+ Figure 2: Illustration of nearest neighbour graph and an
405
+ edge ij being contracted. The set N +
406
+ ij = N +
407
+ i
408
+ ∪ N +
409
+ j
410
+ is
411
+ searched first to find nearest neighbours of the merged node
412
+ efficiently (Prop. 3.3). The nodes in set N −
413
+ ij need to update
414
+ their nearest neighbours since their current nearest neigh-
415
+ bour nodes i and j are getting contracted. Only the arcs
416
+ to/from i and j are shown.
417
+ bounded from above by
418
+ bij := min
419
+ p∈N +
420
+ i
421
+ ⟨fi, fp⟩ + min
422
+ q∈N +
423
+ j
424
+ ⟨fj, fq⟩
425
+ Proof. Since neighbours of i are computed by nearest
426
+ neighbours search we have for all nodes p′ /∈ N +
427
+ i
428
+ ⟨fi, fp′⟩ ≤ min
429
+ p∈N +
430
+ i
431
+ ⟨fi, fp⟩,
432
+ and similarly for node j.
433
+ Then by definition of v and
434
+ Lemma 3.1 we obtain
435
+ ⟨fm, fv⟩ = ⟨fi, fv⟩ + ⟨fj, fv⟩
436
+ ≤ min
437
+ p∈N +
438
+ i
439
+ ⟨fi, fp⟩ + min
440
+ q∈N +
441
+ j
442
+ ⟨fj, fq⟩ .
443
+ The above proposition gives an upper bound of feature sim-
444
+ ilarity (i.e. edge cost) of merged node m with all nodes not
445
+ in N +
446
+ ij . Thus if a node in N +
447
+ ij exceeds this upper bound it
448
+ is more similar to m than all nodes not in N +
449
+ ij . This allows
450
+ to possibly skip recomputing the nearest neighbors of m in
451
+ Alg. 2 (line 7).
452
+ Lemma 3.4. If
453
+ |{p ∈ N +
454
+ ij : ⟨fm, fp⟩ ≥ bij}| ≥ k
455
+ (8)
456
+ then
457
+ k-nearest
458
+ neighbour
459
+ of
460
+ node
461
+ m
462
+ given
463
+ by
464
+ arg top-kv∈V \{i,j,m}⟨fm, fv⟩
465
+ can
466
+ be
467
+ chosen
468
+ as
469
+ arg top-kp∈N +
470
+ ij ⟨fm, fp⟩.
471
+ Proof. Since the elements of N +
472
+ ij already satisfy the bound
473
+ bij from Prop. 3.3 and there are at least k many such el-
474
+ ements, the k-nearest neighbours of node m can be taken
475
+ from N +
476
+ ij .
477
+
478
+ Clustering Fully connected Graphs by Multicut
479
+ Both of these approaches for efficiently updating the NN
480
+ graph after contraction are used in Alg. 3. This algorithm
481
+ can be used instead of lines 6 and 7 in Alg. 2 for improved
482
+ performance. See Figure 2 for an illustration on nearest
483
+ neighbour graph and edge contraction update.
484
+ Algorithm 3: Incremental NN update
485
+ Data: Contracting nodes i, j; Contracted node m; NN
486
+ graph (V, A); Node features fi, ∀i ∈ V ; Num.
487
+ of neighbours k;
488
+ Result: Nearest neighbour arcs H to add in A
489
+ // NNs of m by Prop. 3.3
490
+ 1 H = {(m, l)|l ∈ N +
491
+ ij , ⟨fm, fl⟩ ≥ bij}
492
+ // Keep at most k NN
493
+ 2 H = arg top-k(m,l)∈H⟨fm, fl⟩
494
+ 3 if H = ∅ then
495
+ 4
496
+ H = {(m, r)|r = arg top-kl∈V \m⟨fm, fl⟩}
497
+ 5 for q ∈ N −
498
+ ij \ {i, j} do
499
+ // Check if m a NN of q
500
+ 6
501
+ if ⟨fq, fm⟩ ≥ minl∈N +
502
+ q ⟨fq, fl⟩ then
503
+ 7
504
+ H = H ∪ (q, m)
505
+ 8
506
+ else
507
+ 9
508
+ H = H ∪ {(q, arg maxl∈V \q⟨fq, fl⟩)}
509
+ 3.2. Lazy Edge Contraction
510
+ We further forego the need for nearest neighbours recom-
511
+ putation after edge contraction by lifting the restriction of
512
+ performing only greedy moves. This allows to maximally
513
+ utilize the NN graph: the algorithm performs contractions,
514
+ including non-greedy ones, until no contraction candidates
515
+ are present in the NN graph. Specifically we do not per-
516
+ form the exhaustive search in lines 4 and 9 and only return
517
+ the nearest neighbours which are easily computable. The
518
+ NN graph is repopulated as lazily as possible i.e. when no
519
+ contraction candidates are left. In addition to being more
520
+ efficient this strategy is reminiscent of the balanced edge
521
+ contraction approach of (Kardoost & Keuper, 2018). The
522
+ authors normalized the edge costs with cluster size of two
523
+ end-points. These normalized edge costs were used to find
524
+ the edge to contract. This strategy encouraged consecutive
525
+ contractions to occur at different regions of the graph. As
526
+ our lazy approach does not always make the nearest neigh-
527
+ bours of the contracted node available thus contractions can
528
+ only be done to nodes other than the contracted node. This
529
+ also produces contractions in different regions.
530
+ Lastly we also utilize efficient methods for approximate
531
+ nearest neighbour search (Malkov & Yashunin, 2018) for
532
+ populating the possibly large initial NN graph. For later
533
+ nearest neighbour searches we still use exact methods as
534
+ the search space is reduced due to previous contractions.
535
+ 3.3. Varying Affinity Strength
536
+ Our basic edge costs computed by ⟨fi, fj⟩ for two features
537
+ fi and fj have one fundamental limitation: Clusters will by
538
+ default occupy whole quadrants. In other words, whenever
539
+ two features have angle lower than 90◦ they are attractive
540
+ and will prefer to be in the same cluster, see Figure 3. In
541
+ order to let our formulation favor larger or smaller clusters,
542
+ we modify our original similarity function s(·, ·) by adding
543
+ an additional term indicated by α-variables:
544
+ f i = [fi; αi],
545
+ (9)
546
+ s(f i, fj) = ⟨fi, fj⟩ ± αi · αj ,
547
+ (10)
548
+ where we choose positive sign for favoring larger clusters
549
+ and negative for smaller clusters. In our experiments we
550
+ will set αi = α with α > 0 a constant.
551
+ We note that the contraction mechanism carries over di-
552
+ rectly to our extended setting.
553
+ Lemma 3.5. Aggregating features of the contracted node
554
+ m by fm = f i + f j is equivalent to setting edge costs as
555
+ per (4) on complete graph.
556
+ Proof. Similar to the proof of Lemma 3.1 as follows
557
+ s(f m, fl) = ⟨fm, fl⟩ ± αm · αl
558
+ = ⟨fi + fj, fl⟩ ± (αi + αj) · αl
559
+ = ⟨fi, fl⟩ ± αi · αl + ⟨fj, fl⟩ ± αj · αl
560
+ = s(fi, f l) + s(f j, fl) .
561
+ Large clusters:
562
+ If we want to allow for larger clusters
563
+ (corresponding to choosing + in (10)), we work directly on
564
+ the extended feature set fi = [fi; αi] and use it in the NN
565
+ graph.
566
+ Small clusters:
567
+ If we want to allow for smaller clusters
568
+ (corresponding to choosing − in (10)), we must modify our
569
+ algorithms slightly. In order to construct NN graphs we
570
+ will use two sets of features: First, the query nodes will
571
+ have their features defined by ˆfi = [fi, −αi] and second,
572
+ the pre-existing nodes j ∈ V in the graph will keep the
573
+ same features fj from (10). In order to search for nearest
574
+ neighbors of node i in the graph V the modified similarity
575
+ function (10) can be implemented by an inner product as
576
+ s(f i, f j) = ⟨ ˆfi, f j⟩ .
577
+ (11)
578
+ 4. Experiments
579
+ We study the benefits of the multicut on complete
580
+ graphs (3) and compare possible algorithms on the
581
+
582
+ Clustering Fully connected Graphs by Multicut
583
+ fd
584
+ fe
585
+ ff
586
+ fg
587
+ fh
588
+ fa
589
+ fb
590
+ fc
591
+ ⟨fc, fb⟩ > 0
592
+ ⟨fc, fd⟩ > 0
593
+ Figure 3: Illustration of edge costs between 8 nodes where
594
+ feature vectors of each node i is in two-dimensional space
595
+ i.e. fi ∈ R2. If we want each node to be a separate cluster
596
+ then the edge costs measured by (5) are not suitable. This is
597
+ because there will always be atleast two vectors with pos-
598
+ itive costs preferring to be in the same cluster. Using an
599
+ large enough positive value of α through (10) this issue can
600
+ be resolved.
601
+ tasks of ImageNet (Deng et al., 2009) clustering and
602
+ Cityscapes (Cordts et al., 2016) panoptic segmentation.
603
+ The algorithms are
604
+ GAEC: The greedy additive edge contraction algorithm
605
+ from (Keuper et al., 2015)(Alg. 1) is run on the com-
606
+ plete graph where all edge costs are precomputed and
607
+ then passed to the algorithm.
608
+ RAMA: We also compare with the recent GPU-based mul-
609
+ ticut solver of (Abbas & Swoboda, 2022). Similar to
610
+ GAEC we run it on the complete graph. The solver
611
+ uses dual optimization for better solution quality and
612
+ also gives lower bounds to the multicut objective (1).
613
+ As a drawback this solver cannot handle large in-
614
+ stances due to high memory requirement of complete
615
+ graphs. For running the solver we use an NVIDIA
616
+ A40 GPU with 48GB of memory.
617
+ DGAEC: Our Algorithm 2 which operates on node features
618
+ and performs contractions according to Lemma 3.1.
619
+ The nearest neighbour graph is updated by exhaustive
620
+ search after edge contraction. The number of nearest
621
+ neighbours k is set to 1.
622
+ DGAECInc: Our Algorithm 2 which additionally makes
623
+ use of Algorithm 3 for incrementally populating near-
624
+ est neighbours after edge contraction. The value of k
625
+ is set to 5.
626
+ DLAEC: A variant of our DGAEC where non-greedy moves
627
+ are also allowed as described in Sec. 3.2. The value of
628
+ k is set to 5.
629
+ DAppLAEC: Another variant of our DLAEC where initial
630
+ nearest neighbours are computed by approximate near-
631
+ est neighbour search method
632
+ (Malkov & Yashunin,
633
+ 2018) through the implementation (Johnson et al.,
634
+ 2019).
635
+ For all multicut algorithms on all datasets we set the value
636
+ of affinity strength αi in (11) to 0.4, preferring small clus-
637
+ ters. All CPU algorithms are run on an AMD 7502P CPU
638
+ with a maximum of 8 threads to allow for faster NN search.
639
+ 4.1. ImageNet clustering
640
+ We evaluate clustering of the ImageNet (Deng et al., 2009)
641
+ validation set containing 50k images.
642
+ Each image in
643
+ the dataset acts as a node for our dense multicut for-
644
+ mulation.
645
+ The features of each image are computed
646
+ by a ResNet50 (He et al., 2016) backbone trained by
647
+ MoCov3 (Chen et al., 2021) in unsupervised fashion by a
648
+ constrastive loss on the training split of ImageNet. The fea-
649
+ tures have a dimension of 2048 and are normalized to have
650
+ unit L2 norm. We create two problem instances containing
651
+ 5k and 50k images by considering 100 and all 1000 classes
652
+ respectively.
653
+ Clustering quality:
654
+ Before comparing our algorithmic
655
+ contributions we first test the efficacy of our dense
656
+ multicut formulation by comparing its clustering re-
657
+ sult with k-means (Lloyd, 1982) using the implemen-
658
+ tation from (Pedregosa et al., 2011) and initialization
659
+ of (Arthur & Vassilvitskii, 2007). Since k-means requires
660
+ the number of clusters to be known beforehand we set it to
661
+ the number of classes in the problem instance. For an ad-
662
+ ditional comparison we also run k-means on the number of
663
+ clusters given by our dense multicut algorithm. The quality
664
+ of clustering results are evaluated using normalized mutual
665
+ information (NMI) and adjusted mutual information (AMI)
666
+ metrics (Vinh et al., 2010). The results are given in Table 1.
667
+ We observe that although our formulation does not require
668
+ the number of clusters to be specified, the results are on par
669
+ with k-means. Additionally the value of affinity strength
670
+ α does not need to be changed for different problem in-
671
+ stances. As compared to k-means our algorithms are much
672
+ faster especially on the larger instance. The RAMA solver
673
+ of (Abbas & Swoboda, 2022) performs better than all other
674
+ approaches on the smaller instance but runs out of mem-
675
+ ory for the larger one. Lastly, our formulation creates more
676
+ clusters than the number of classes. This is mainly due to
677
+ presence of outliers in the feature space as the feature ex-
678
+ tractor is trained without any groundtruth information.
679
+ Algorithms comparison:
680
+ We compare different algo-
681
+ rithms for solving dense multicut problem (3) for imageNet
682
+ clustering in Table 2. Firstly, we see that on the smaller
683
+
684
+ Clustering Fully connected Graphs by Multicut
685
+ Table 1: Comparison of clustering obtained by different
686
+ methods on ImageNet validation set. t [s]: compute time
687
+ in seconds, NMI: normalized mutual information, AMI: ad-
688
+ justed mutual information, # clusters: number of clusters, †:
689
+ out of GPU memory. For k-means the number of clusters
690
+ was specified as input.
691
+ Method
692
+ t [s] ↓
693
+ NMI ↑
694
+ AMI ↑
695
+ # clusters
696
+ ImageNet-100 (|V | = 5k)
697
+ k-means
698
+ 16
699
+ 0.42
700
+ 0.27
701
+ 100
702
+ k-means
703
+ 32
704
+ 0.53
705
+ 0.26
706
+ 333
707
+ RAMA
708
+ 0.9
709
+ 0.57
710
+ 0.29
711
+ 639
712
+ DGAECInc
713
+ 42
714
+ 0.43
715
+ 0.22
716
+ 343
717
+ DAppLAEC
718
+ 3.2
719
+ 0.47
720
+ 0.26
721
+ 333
722
+ ImageNet-1000 (|V | = 50k)
723
+ k-means
724
+ 701
725
+ 0.54
726
+ 0.2
727
+ 1000
728
+ k-means
729
+ 1801
730
+ 0.61
731
+ 0.19
732
+ 2440
733
+ RAMA
734
+
735
+
736
+
737
+
738
+ DGAECInc
739
+ 2964
740
+ 0.49
741
+ 0.19
742
+ 2488
743
+ DAppLAEC
744
+ 65
745
+ 0.56
746
+ 0.26
747
+ 2440
748
+ instance the GPU based solver RAMA (Abbas & Swoboda,
749
+ 2022) gives the best performance. Secondly using incre-
750
+ mental nearest neighbour search through Alg. 3 gives bet-
751
+ ter run time than exhaustive search. Lastly our non-greedy
752
+ algorithms give the best run time among all CPU-based al-
753
+ gorithms although with slightly worse objectives.
754
+ On the smaller instance, RAMA outperforms other algo-
755
+ rithms in terms of the objective value (3) and also gives bet-
756
+ ter clustering quality as compared to k-means. As a draw-
757
+ back RAMA cannot handle large dense multicut instances.
758
+ This shows multicut on complete graphs can be a suitable
759
+ alternative to k-means. We speculate that algorithmic im-
760
+ provements on top of our proposed algorithms will further
761
+ improve clustering quality for large graphs.
762
+ 4.2. Panoptic segmentation
763
+ We
764
+ evaluate our method
765
+ on
766
+ the
767
+ task
768
+ of
769
+ panoptic
770
+ segmentation (Kirillov et al., 2019) on the Cityscapes
771
+ dataset (Cordts et al., 2016). The panoptic segmentation
772
+ task consists of assigning a class label to each pixel and
773
+ partitioning different instances of classes with object cat-
774
+ egories (e.g.
775
+ car, person etc.).
776
+ We focus on the task
777
+ of partitioning for which the multicut formulation (1)
778
+ has been used by (Kirillov et al., 2017; Abbas & Swoboda,
779
+ 2021).
780
+ The latter work used a carefully crafted graph
781
+ structure.
782
+ Our dense multicut (3) formulation foregoes
783
+ the need for finding a suitable graph structure. We use
784
+ the pretrained Axial-ResNet50 (Wang et al., 2021) network
785
+ Table 2: Comparison of algorithms for solving dense mul-
786
+ ticut problem on two splits of Imagenet validation set. t [s]:
787
+ compute time in seconds, Obj: objective value of cluster-
788
+ ing (3), †: out of GPU memory, ⋆: no result within a 3 hour
789
+ time limit.
790
+ ImageNet-100
791
+ ImageNet-1000
792
+ Method
793
+ t [s] ↓
794
+ Obj ↓
795
+ t [s] ↓
796
+ Obj ↓
797
+ GAEC
798
+ 24
799
+ -6.84e5
800
+ 2605
801
+ -9.353e7
802
+ RAMA
803
+ 0.8
804
+ -6.95e5
805
+
806
+
807
+ DGAEC
808
+ 132
809
+ -6.84e5
810
+
811
+
812
+ DGAECInc
813
+ 42
814
+ -6.84e5
815
+ 2934
816
+ -9.353e7
817
+ DLAEC
818
+ 5
819
+ -6.83e5
820
+ 341
821
+ -9.332e7
822
+ DAppLAEC
823
+ 3.2
824
+ -6.83e5
825
+ 65
826
+ -9.332e7
827
+ from (Yu et al., 2022), made available by (Weber et al.,
828
+ 2021) to compute the node features. Specifically, the net-
829
+ work computes L2-normalized instance discriminative fea-
830
+ tures in its intermediate stages which we use for our study
831
+ without any training.
832
+ For our evaluation we first compute semantic class predic-
833
+ tions and then create a dense multicut instance for each se-
834
+ mantic category with objects (i.e., car, person etc.). Such
835
+ classes are also known as thing classes. The goal of the
836
+ multicut problem is then to partition all nodes belonging
837
+ to same semantic class to different objects. This strategy
838
+ creates a total of 1631 dense multicut problem instances
839
+ of varying sizes from 500 images of the Cityscapes valida-
840
+ tion set. The largest problem instance contains around 43k
841
+ nodes.
842
+ Clustering quality:
843
+ As a first point of comparison we
844
+ check whether formulating a multicut problem on the com-
845
+ plete graph by (3) is beneficial as compared to a hand-
846
+ crafted sparse graph structure. We take the sparse graph
847
+ structure from (Abbas & Swoboda, 2021) as a baseline.
848
+ Their graph also includes long-range edges for dealing with
849
+ occlusions leading to about 10 ·|V | edges in total. We com-
850
+ pute the edge costs in this sparse graph in the same way as
851
+ for our dense multicut formulation. For solving this multi-
852
+ cut problem (1) we use Alg. 1.
853
+ In Table 3 we compare the quality of clustering through
854
+ the panoptic quality metric (Kirillov et al., 2019). We ob-
855
+ serve that our dense multicut formulation performs better
856
+ than multicut on the sparse handcrafted graph. This im-
857
+ provement is significant for classes which can have many
858
+ instances of the same class within an image (i.e. person,
859
+ car) thus making the partitioning problem difficult. For
860
+ classes with large objects (e.g. truck) having more edges
861
+ does not help since the sparse graph can already capture
862
+
863
+ Clustering Fully connected Graphs by Multicut
864
+ Table
865
+ 3:
866
+ Comparison
867
+ of
868
+ panoptic
869
+ segmentation
870
+ on Cityscapes dataset.
871
+ Multicut on sparse graph
872
+ of (Abbas & Swoboda, 2021) is computed by Alg. 1.
873
+ For dense multicut we use the DAppLAEC algorithm.
874
+ PQth: Average panoptic quality of all thing classes.
875
+ Panoptic quality (%) ↑
876
+ Category
877
+ Sparse multicut
878
+ Dense multicut
879
+ Person
880
+ 40.0
881
+ 46.9
882
+ Rider
883
+ 53.0
884
+ 54.4
885
+ Car
886
+ 50.7
887
+ 60.5
888
+ Truck
889
+ 52.7
890
+ 52.3
891
+ Bus
892
+ 72.1
893
+ 71.1
894
+ Train
895
+ 65.6
896
+ 62.9
897
+ Motorcycle
898
+ 47.0
899
+ 46.8
900
+ Bicycle
901
+ 45.7
902
+ 46.9
903
+ PQth
904
+ 53.3
905
+ 55.2
906
+ most inter-pixel relations. On average our dense multicut
907
+ formulation gives better results than sparse multicut while
908
+ alleviating the need for designing a graph structure.
909
+ Algorithms comparison:
910
+ We compare dense multicut al-
911
+ gorithms for the panoptic segmentation task in terms of ob-
912
+ jective value and run time. We were not able to run RAMA
913
+ since the GPU could not store large graphs. The compar-
914
+ ison of performance to the remaining algorithms averaged
915
+ over all problem instances is given in Table 4. Moreover,
916
+ in Figure 4 we compare performance of the algorithms on
917
+ all large problem instances.
918
+ In terms of run time, we see that our most naive algorithm
919
+ DGAEC is slower than GAEC which directly operates on
920
+ edge costs. Our other algorithms surpass GAEC reaching up
921
+ to an order of magnitude run time improvement with lazy
922
+ edge contractions and approximate initial nearest neigh-
923
+ bours search. In terms of objective value we see slight im-
924
+ provement by our lazy contraction algorithms as compared
925
+ to the greedy ones.
926
+ Sensitivity of affinity strength:
927
+ In Table 5 we study the
928
+ effect of changing the value of α from (10). We observe
929
+ even better panoptic quality using a value of 0.3 as com-
930
+ pared to our default of 0.4. As the edge costs lie in [−1, 1]
931
+ due to L2-normalized node features, values of α close to
932
+ 0 or 1 gives more performance degradation. Last, we see
933
+ further improvement if the value of α is set differently for
934
+ each class. We refer to the Appendix for further results.
935
+ −1.4 −1.2
936
+ −1
937
+ −0.8 −0.6 −0.4 −0.2
938
+ 0
939
+ ← Objective value (×108)
940
+ 10−1
941
+ 100
942
+ 101
943
+ 102
944
+ 103
945
+ 104
946
+ ← time [s]
947
+ GAEC
948
+ DGAEC
949
+ DGAECInc
950
+ DLAEC
951
+ DAppLAEC
952
+ Figure 4: Comparison of algorithms on large dense multi-
953
+ cut instances (|V | ≥ 5000) from Cityscapes validation set.
954
+ Overlaid bars mark the 0.25, 0.5 and 0.75-quantile.
955
+ Table 4: Comparison of algorithms for solving dense mul-
956
+ ticut problem on Cityscapes validation set. (t [s]): average
957
+ compute times in seconds, (Obj): average objective value
958
+ of clustering (3). The average is calculated over all problem
959
+ instances.
960
+ Method
961
+ t [s] ↓
962
+ Obj (×106) ↓
963
+ GAEC
964
+ 10.0
965
+ -6.338
966
+ DGAEC
967
+ 84.1
968
+ -6.338
969
+ DGAECInc
970
+ 3.2
971
+ -6.338
972
+ DLAEC
973
+ 2.1
974
+ -6.340
975
+ DAppLAEC
976
+ 1.5
977
+ -6.341
978
+ Table 5: Results of panoptic segmentation via dense multi-
979
+ cut with different values of attraction/repulsion strength α
980
+ in (10). PQth: Avg. panoptic quality over all thing classes.
981
+ α
982
+ 0.2
983
+ 0.3
984
+ 0.4
985
+ 0.5
986
+ 0.6
987
+ 0.7
988
+ 0.8
989
+ PQth
990
+ 54.5
991
+ 55.8
992
+ 55.2
993
+ 55.0
994
+ 54.1
995
+ 52.0
996
+ 49.3
997
+
998
+ Clustering Fully connected Graphs by Multicut
999
+ 5. Conclusion
1000
+ We have demonstrated that optimizing multicut on large
1001
+ complete graphs is possible when using factorized edge
1002
+ costs through inner products of features. We speculate that
1003
+ further algorithmic improvements are possible e.g. by per-
1004
+ forming dual optimization directly on the node features.
1005
+ As a potential theoretical advantage our approach sidesteps
1006
+ the need for learning graph structure. This offers a possibil-
1007
+ ity to embed it as a differentiable layer in neural networks,
1008
+ using e.g. the work (Vlastelica et al., 2019).
1009
+ References
1010
+ Abbas, A. and Swoboda, P. Combinatorial optimization for
1011
+ panoptic segmentation: A fully differentiable approach.
1012
+ Advances in Neural Information Processing Systems, 34:
1013
+ 15635–15649, 2021.
1014
+ Abbas, A. and Swoboda, P.
1015
+ RAMA: A Rapid Multicut
1016
+ Algorithm on GPU. In Proceedings of the IEEE/CVF
1017
+ Conference on Computer Vision and Pattern Recognition
1018
+ (CVPR), pp. 8193–8202, June 2022.
1019
+ Arthur, D. and Vassilvitskii, S. K-means++: The advan-
1020
+ tages of careful seeding.
1021
+ In Proceedings of the Eigh-
1022
+ teenth Annual ACM-SIAM Symposium on Discrete Al-
1023
+ gorithms, SODA ’07, pp. 1027–1035, USA, 2007. So-
1024
+ ciety for Industrial and Applied Mathematics.
1025
+ ISBN
1026
+ 9780898716245.
1027
+ Bailoni, A., Pape, C., H¨utsch, N., Wolf, S., Beier, T.,
1028
+ Kreshuk, A., and Hamprecht, F. A. GASP, a General-
1029
+ ized Framework for Agglomerative Clustering of Signed
1030
+ Graphs and Its Application to Instance Segmentation. In
1031
+ Proceedings of the IEEE/CVF Conference on Computer
1032
+ Vision and Pattern Recognition, pp. 11645–11655, 2022.
1033
+ Bansal, N., Blum, A., and Chawla, S. Correlation cluster-
1034
+ ing. Machine learning, 56(1-3):89–113, 2004.
1035
+ Beier, T., Kroeger, T., Kappes, J. H., Kothe, U., and Ham-
1036
+ precht, F. A. Cut, glue & cut: A fast, approximate solver
1037
+ for multicut partitioning. In Proceedings of the IEEE
1038
+ Conference on Computer Vision and Pattern Recogni-
1039
+ tion, pp. 73–80, 2014.
1040
+ Beier, T., Hamprecht, F. A., and Kappes, J. H.
1041
+ Fusion
1042
+ moves for correlation clustering.
1043
+ In Proceedings of
1044
+ the IEEE Conference on Computer Vision and Pattern
1045
+ Recognition, pp. 3507–3516, 2015.
1046
+ Chen, X., Xie, S., and He, K. An empirical study of train-
1047
+ ing self-supervised vision transformers. In Proceedings
1048
+ of the IEEE/CVF International Conference on Computer
1049
+ Vision, pp. 9640–9649, 2021.
1050
+ Chopra, S. and Rao, M. R. The partition problem. Mathe-
1051
+ matical Programming, 59(1-3):87–115, 1993.
1052
+ Cordts, M., Omran, M., Ramos, S., Rehfeld, T., Enzweiler,
1053
+ M., Benenson, R., Franke, U., Roth, S., and Schiele, B.
1054
+ The cityscapes dataset for semantic urban scene under-
1055
+ standing. In Proceedings of the IEEE conference on com-
1056
+ puter vision and pattern recognition, 2016.
1057
+ Demaine, E. D., Emanuel, D., Fiat, A., and Immorlica, N.
1058
+ Correlation clustering in general weighted graphs. Theo-
1059
+ retical Computer Science, 361(2-3):172–187, 2006.
1060
+ Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., and
1061
+ Fei-Fei, L. Imagenet: A large-scale hierarchical image
1062
+ database. In 2009 IEEE conference on computer vision
1063
+ and pattern recognition, pp. 248–255. Ieee, 2009.
1064
+ Deza, M., Gr¨otschel, M., and Laurent, M.
1065
+ Clique-web
1066
+ facets for multicut polytopes.
1067
+ Mathematics of Opera-
1068
+ tions Research, 17(4):981–1000, 1992.
1069
+ Dhillon, I. S., Guan, Y., and Kulis, B. Weighted graph cuts
1070
+ without eigenvectors a multilevel approach. IEEE Trans-
1071
+ actions on Pattern Analysis and Machine Intelligence, 29
1072
+ (11):1944–1957, 2007. doi: 10.1109/TPAMI.2007.1115.
1073
+ He, K., Zhang, X., Ren, S., and Sun, J. Deep residual learn-
1074
+ ing for image recognition. In Proceedings of the IEEE
1075
+ conference on computer vision and pattern recognition,
1076
+ pp. 770–778, 2016.
1077
+ Hu, T. C. Multi-commodity network flows. Operations
1078
+ research, 11(3):344–360, 1963.
1079
+ Jia, H., Ding, S., Xu, X., and Nie, R. The latest research
1080
+ progress on spectral clustering. Neural Comput. Appl.,
1081
+ 24(7–8):1477–1486, jun 2014. ISSN 0941-0643. doi:
1082
+ 10.1007/s00521-013-1439-2.
1083
+ Johnson, J., Douze, M., and J´egou, H. Billion-scale similar-
1084
+ ity search with GPUs. IEEE Transactions on Big Data,
1085
+ 7(3):535–547, 2019.
1086
+ Kardoost, A. and Keuper, M. Solving minimum cost lifted
1087
+ multicut problems by node agglomeration. In Asian Con-
1088
+ ference on Computer Vision, pp. 74–89. Springer, 2018.
1089
+ Keuper, M., Levinkov, E., Bonneel, N., Lavou´e, G., Brox,
1090
+ T., and Andres, B.
1091
+ Efficient decomposition of image
1092
+ and mesh graphs by lifted multicuts. In Proceedings of
1093
+ the IEEE International Conference on Computer Vision,
1094
+ 2015.
1095
+ Kirillov, A., Levinkov, E., Andres, B., Savchynskyy, B.,
1096
+ and Rother, C.
1097
+ Instancecut: from edges to instances
1098
+ with multicut. In Proceedings of the IEEE Conference
1099
+ on Computer Vision and Pattern Recognition, 2017.
1100
+
1101
+ Clustering Fully connected Graphs by Multicut
1102
+ Kirillov, A., He, K., Girshick, R., Rother, C., and Doll´ar, P.
1103
+ Panoptic segmentation. In Proceedings of the IEEE/CVF
1104
+ Conference on Computer Vision and Pattern Recogni-
1105
+ tion, pp. 9404–9413, 2019.
1106
+ Lange, J.-H., Karrenbauer, A., and Andres, B.
1107
+ Partial
1108
+ optimality and fast lower bounds for weighted correla-
1109
+ tion clustering. In International Conference on Machine
1110
+ Learning, 2018.
1111
+ Levinkov, E., Kirillov, A., and Andres, B. A comparative
1112
+ study of local search algorithms for correlation cluster-
1113
+ ing. In GCPR, 2017.
1114
+ Lloyd, S. Least squares quantization in pcm. IEEE Transac-
1115
+ tions on Information Theory, 28(2):129–137, 1982. doi:
1116
+ 10.1109/TIT.1982.1056489.
1117
+ Malkov, Y. A. and Yashunin, D. A. Efficient and robust
1118
+ approximate nearest neighbor search using hierarchical
1119
+ navigable small world graphs. IEEE transactions on pat-
1120
+ tern analysis and machine intelligence, 42(4):824–836,
1121
+ 2018.
1122
+ Oosten, M., Rutten, J. H., and Spieksma, F. C. The clique
1123
+ partitioning problem: facets and patching facets. Net-
1124
+ works: An International Journal, 38(4):209–226, 2001.
1125
+ Pan, X., Papailiopoulos, D., Oymak, S., Recht, B., Ram-
1126
+ chandran, K., and Jordan, M. I. Parallel correlation clus-
1127
+ tering on big graphs. In Cortes, C., Lawrence, N., Lee,
1128
+ D., Sugiyama, M., and Garnett, R. (eds.), Advances in
1129
+ Neural Information Processing Systems, volume 28. Cur-
1130
+ ran Associates, Inc., 2015.
1131
+ Pedregosa, F., Varoquaux, G., Gramfort, A., Michel, V.,
1132
+ Thirion, B., Grisel, O., Blondel, M., Prettenhofer, P.,
1133
+ Weiss, R., Dubourg, V., Vanderplas, J., Passos, A., Cour-
1134
+ napeau, D., Brucher, M., Perrot, M., and Duchesnay, E.
1135
+ Scikit-learn: Machine learning in Python.
1136
+ Journal of
1137
+ Machine Learning Research, 12:2825–2830, 2011.
1138
+ Qaddoura, R., Faris, H., and Aljarah, I. An efficient cluster-
1139
+ ing algorithm based on the k-nearest neighbors with an
1140
+ indexing ratio. International Journal of Machine Learn-
1141
+ ing and Cybernetics, 11(3):675–714, 2020.
1142
+ Swoboda, P. and Andres, B. A message passing algorithm
1143
+ for the minimum cost multicut problem. In Proceedings
1144
+ of the IEEE Conference on Computer Vision and Pattern
1145
+ Recognition, 2017.
1146
+ Veldt, N. Correlation clustering via strong triadic closure
1147
+ labeling: Fast approximation algorithms and practical
1148
+ lower bounds. In International Conference on Machine
1149
+ Learning, pp. 22060–22083. PMLR, 2022.
1150
+ Vinh, N. X., Epps, J., and Bailey, J.
1151
+ Information
1152
+ theoretic
1153
+ measures
1154
+ for
1155
+ clusterings
1156
+ comparison:
1157
+ Variants,
1158
+ properties,
1159
+ normalization
1160
+ and
1161
+ correc-
1162
+ tion
1163
+ for
1164
+ chance.
1165
+ Journal
1166
+ of
1167
+ Machine
1168
+ Learn-
1169
+ ing
1170
+ Research,
1171
+ 11(95):2837–2854,
1172
+ 2010.
1173
+ URL
1174
+ http://jmlr.org/papers/v11/vinh10a.html.
1175
+ Vlastelica, M., Paulus, A., Musil, V., Martius, G., and
1176
+ Rol´ınek, M. Differentiation of blackbox combinatorial
1177
+ solvers. arXiv preprint arXiv:1912.02175, 2019.
1178
+ Von Luxburg, U. A tutorial on spectral clustering. Statistics
1179
+ and computing, 17(4):395–416, 2007.
1180
+ Wang, H., Zhu, Y., Adam, H., Yuille, A., and Chen, L.-C.
1181
+ Max-deeplab: End-to-end panoptic segmentation with
1182
+ mask transformers.
1183
+ In Proceedings of the IEEE/CVF
1184
+ conference on computer vision and pattern recognition,
1185
+ pp. 5463–5474, 2021.
1186
+ Weber, M., Wang, H., Qiao, S., Xie, J., Collins, M. D., Zhu,
1187
+ Y., Yuan, L., Kim, D., Yu, Q., Cremers, D., Leal-Taixe,
1188
+ L., Yuille, A. L., Schroff, F., Adam, H., and Chen, L.-
1189
+ C. DeepLab2: A TensorFlow Library for Deep Labeling.
1190
+ arXiv: 2106.09748, 2021.
1191
+ Yu, Q., Wang, H., Qiao, S., Collins, M., Zhu, Y., Adam, H.,
1192
+ Yuille, A., and Chen, L.-C. k-means mask transformer.
1193
+ In European Conference on Computer Vision, pp. 288–
1194
+ 307. Springer, 2022.
1195
+
1196
+ Clustering Fully connected Graphs by Multicut
1197
+ Appendix
1198
+ A. Influence of affinity strength
1199
+ On the Cityscapes dataset we compare panoptic quality on different object classes by varying the value of affinity strength
1200
+ α in (11). The results are given in Table 6. We observe that for classes contain many small objects large value of α is
1201
+ suitable whereas for classes with large objects small value of α is preferable. Although our default value of 0.4 already
1202
+ makes dense multicut outperform the baseline, further improvement is still possible e.g. by tuning α.
1203
+ Table 6: Comparison of panoptic segmentation on Cityscapes dataset for different values of affinity strength α (11). All
1204
+ results are computed using the DAppLAEC algorithm. Largest values in each row are highlighted with bold.
1205
+ Panoptic quality on varying values of α
1206
+ Category
1207
+ 0.1
1208
+ 0.2
1209
+ 0.3
1210
+ 0.4
1211
+ 0.5
1212
+ 0.6
1213
+ 0.7
1214
+ 0.8
1215
+ 0.9
1216
+ Person
1217
+ 31.5
1218
+ 38.1
1219
+ 43.2
1220
+ 46.9
1221
+ 49.8
1222
+ 52.6
1223
+ 54.3
1224
+ 55.0
1225
+ 52.4
1226
+ Rider
1227
+ 51.1
1228
+ 53.0
1229
+ 53.9
1230
+ 54.5
1231
+ 55.5
1232
+ 55.4
1233
+ 53.9
1234
+ 51.0
1235
+ 45.5
1236
+ Car
1237
+ 45.6
1238
+ 52.9
1239
+ 57.8
1240
+ 60.5
1241
+ 63.3
1242
+ 64.8
1243
+ 64.1
1244
+ 62.2
1245
+ 57.8
1246
+ Truck
1247
+ 54.1
1248
+ 53.7
1249
+ 52.7
1250
+ 52.3
1251
+ 49.0
1252
+ 47.8
1253
+ 45.4
1254
+ 41.5
1255
+ 34.7
1256
+ Bus
1257
+ 75.1
1258
+ 74.2
1259
+ 73.5
1260
+ 71.2
1261
+ 69.3
1262
+ 63.6
1263
+ 58.5
1264
+ 54.5
1265
+ 47.3
1266
+ Train
1267
+ 75.0
1268
+ 74.9
1269
+ 71.5
1270
+ 62.9
1271
+ 56.3
1272
+ 51.7
1273
+ 45.1
1274
+ 40.4
1275
+ 32.3
1276
+ Motorcycle
1277
+ 45.5
1278
+ 46.1
1279
+ 48.0
1280
+ 46.8
1281
+ 48.7
1282
+ 49.1
1283
+ 47.8
1284
+ 45.2
1285
+ 39.8
1286
+ Bicycle
1287
+ 38.1
1288
+ 43.2
1289
+ 45.6
1290
+ 46.9
1291
+ 47.8
1292
+ 48.0
1293
+ 46.9
1294
+ 44.6
1295
+ 40.4
1296
+ Average (PQth)
1297
+ 52.0
1298
+ 54.5
1299
+ 55.8
1300
+ 55.2
1301
+ 55.0
1302
+ 54.1
1303
+ 52.0
1304
+ 49.3
1305
+ 43.8
1306
+
-tFLT4oBgHgl3EQfvS-t/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
.gitattributes CHANGED
@@ -8731,3 +8731,67 @@ _NAzT4oBgHgl3EQf_f58/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex
8731
  xdFLT4oBgHgl3EQfmC8i/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8732
  AtFKT4oBgHgl3EQfWC5j/content/2301.11790v1.pdf filter=lfs diff=lfs merge=lfs -text
8733
  LdE2T4oBgHgl3EQfqAhW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8731
  xdFLT4oBgHgl3EQfmC8i/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8732
  AtFKT4oBgHgl3EQfWC5j/content/2301.11790v1.pdf filter=lfs diff=lfs merge=lfs -text
8733
  LdE2T4oBgHgl3EQfqAhW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8734
+ KtE1T4oBgHgl3EQfYwQa/content/2301.03141v1.pdf filter=lfs diff=lfs merge=lfs -text
8735
+ IdFLT4oBgHgl3EQfJC9S/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8736
+ mtAyT4oBgHgl3EQfyvmi/content/2301.00690v1.pdf filter=lfs diff=lfs merge=lfs -text
8737
+ 3NFAT4oBgHgl3EQfEBxO/content/2301.08419v1.pdf filter=lfs diff=lfs merge=lfs -text
8738
+ GdE4T4oBgHgl3EQfHgxG/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8739
+ z9FQT4oBgHgl3EQfzjal/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8740
+ ntE0T4oBgHgl3EQf8gLK/content/2301.02790v1.pdf filter=lfs diff=lfs merge=lfs -text
8741
+ 3NA0T4oBgHgl3EQfNP9P/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8742
+ l9E0T4oBgHgl3EQf8AKF/content/2301.02783v1.pdf filter=lfs diff=lfs merge=lfs -text
8743
+ udE3T4oBgHgl3EQfNgn9/content/2301.04385v1.pdf filter=lfs diff=lfs merge=lfs -text
8744
+ mtAyT4oBgHgl3EQfyvmi/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8745
+ ntE0T4oBgHgl3EQf8gLK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8746
+ etFAT4oBgHgl3EQf7R7K/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8747
+ etFAT4oBgHgl3EQf7R7K/content/2301.08744v1.pdf filter=lfs diff=lfs merge=lfs -text
8748
+ x9FAT4oBgHgl3EQfAhwk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8749
+ KtE1T4oBgHgl3EQfYwQa/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8750
+ l9E0T4oBgHgl3EQf8AKF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8751
+ lNFIT4oBgHgl3EQfryt6/content/2301.11333v1.pdf filter=lfs diff=lfs merge=lfs -text
8752
+ xNA0T4oBgHgl3EQfMf_C/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8753
+ 8dAyT4oBgHgl3EQf2_nF/content/2301.00762v1.pdf filter=lfs diff=lfs merge=lfs -text
8754
+ GtE2T4oBgHgl3EQfTgdU/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8755
+ 89E1T4oBgHgl3EQfCALf/content/2301.02860v1.pdf filter=lfs diff=lfs merge=lfs -text
8756
+ xNA0T4oBgHgl3EQfMf_C/content/2301.02134v1.pdf filter=lfs diff=lfs merge=lfs -text
8757
+ ptE3T4oBgHgl3EQfLwlT/content/2301.04366v1.pdf filter=lfs diff=lfs merge=lfs -text
8758
+ HdE4T4oBgHgl3EQfHwzB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8759
+ 9NE1T4oBgHgl3EQfnwSt/content/2301.03313v1.pdf filter=lfs diff=lfs merge=lfs -text
8760
+ 3NA0T4oBgHgl3EQfNP9P/content/2301.02143v1.pdf filter=lfs diff=lfs merge=lfs -text
8761
+ 6dE4T4oBgHgl3EQfcQxJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8762
+ 89E1T4oBgHgl3EQfCALf/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8763
+ e9E_T4oBgHgl3EQf1xyk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8764
+ ONAyT4oBgHgl3EQftPmP/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8765
+ PdE4T4oBgHgl3EQfkQ3K/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8766
+ 59E1T4oBgHgl3EQfmwQa/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8767
+ 4dFIT4oBgHgl3EQf6yuB/content/2301.11395v1.pdf filter=lfs diff=lfs merge=lfs -text
8768
+ 59E1T4oBgHgl3EQfmwQa/content/2301.03300v1.pdf filter=lfs diff=lfs merge=lfs -text
8769
+ l9AzT4oBgHgl3EQfNvvD/content/2301.01155v1.pdf filter=lfs diff=lfs merge=lfs -text
8770
+ YdAzT4oBgHgl3EQfYvzm/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8771
+ w9FKT4oBgHgl3EQf5i7o/content/2301.11938v1.pdf filter=lfs diff=lfs merge=lfs -text
8772
+ NdFOT4oBgHgl3EQf2jRR/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8773
+ 69AyT4oBgHgl3EQf2vl7/content/2301.00756v1.pdf filter=lfs diff=lfs merge=lfs -text
8774
+ PdE4T4oBgHgl3EQfkQ3K/content/2301.05150v1.pdf filter=lfs diff=lfs merge=lfs -text
8775
+ 8dAyT4oBgHgl3EQf2_nF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8776
+ YdAzT4oBgHgl3EQfYvzm/content/2301.01342v1.pdf filter=lfs diff=lfs merge=lfs -text
8777
+ v9AzT4oBgHgl3EQfCPoQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8778
+ C9E5T4oBgHgl3EQfUA9Q/content/2301.05540v1.pdf filter=lfs diff=lfs merge=lfs -text
8779
+ 7tE4T4oBgHgl3EQfcwyw/content/2301.05086v1.pdf filter=lfs diff=lfs merge=lfs -text
8780
+ YtE3T4oBgHgl3EQfcQqU/content/2301.04524v1.pdf filter=lfs diff=lfs merge=lfs -text
8781
+ ytE3T4oBgHgl3EQfmArI/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8782
+ c9FJT4oBgHgl3EQf-i35/content/2301.11692v1.pdf filter=lfs diff=lfs merge=lfs -text
8783
+ c9AyT4oBgHgl3EQfjPg-/content/2301.00410v1.pdf filter=lfs diff=lfs merge=lfs -text
8784
+ otE4T4oBgHgl3EQfvA18/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8785
+ BNE2T4oBgHgl3EQfRgdU/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8786
+ rtE3T4oBgHgl3EQf8wvj/content/2301.04811v1.pdf filter=lfs diff=lfs merge=lfs -text
8787
+ ytE3T4oBgHgl3EQfmArI/content/2301.04613v1.pdf filter=lfs diff=lfs merge=lfs -text
8788
+ 4NE1T4oBgHgl3EQfAgLJ/content/2301.02841v1.pdf filter=lfs diff=lfs merge=lfs -text
8789
+ 99E3T4oBgHgl3EQfSQn_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8790
+ otE4T4oBgHgl3EQfvA18/content/2301.05237v1.pdf filter=lfs diff=lfs merge=lfs -text
8791
+ 9NE1T4oBgHgl3EQfnwSt/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8792
+ wNAzT4oBgHgl3EQf7f4e/content/2301.01889v1.pdf filter=lfs diff=lfs merge=lfs -text
8793
+ BNE2T4oBgHgl3EQfRgdU/content/2301.03781v1.pdf filter=lfs diff=lfs merge=lfs -text
8794
+ vNE2T4oBgHgl3EQfgQds/content/2301.03935v1.pdf filter=lfs diff=lfs merge=lfs -text
8795
+ 3NFAT4oBgHgl3EQfEBxO/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8796
+ VNE5T4oBgHgl3EQfbw9b/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
8797
+ l9AzT4oBgHgl3EQfNvvD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
09E1T4oBgHgl3EQflAR-/content/tmp_files/2301.03280v1.pdf.txt ADDED
@@ -0,0 +1,1397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Determination of the top-quark mass from top-quark pair events with the matrix element method
2
+ at next-to-leading order: Potential and prospects.
3
+ Till Martini∗
4
+ Fraunhofer Zentrum SIRIOS, Fraunhofer Institute for High-Speed Dynamics EMI, Berlin, Germany
5
+ Turan Nuraliyev† and Peter Uwer‡
6
+ Humboldt-Universität zu Berlin, Institut für Physik, Newtonstraße 15, 12489 Berlin, Germany
7
+ More than 25 years ago the matrix element method has been used in a pioneering work by D�0 to determine the
8
+ top-quark mass from a handful of events. Since then the method has been matured into a powerful analysis tool.
9
+ While the first applications were restricted to leading-order accuracy, in the meantime also the extension to next-
10
+ to-leading order (NLO) accuracy has been studied. In this article we explore the potential of the matrix element
11
+ method at NLO to determine the top-quark mass using events with pair-produced top quarks. We simulate a
12
+ toy experiment by generating unweighted events with a fixed input mass and apply the matrix element method
13
+ to construct an estimator for the top-quark mass. Two different setups are investigated: unweighted events
14
+ obtained from the fixed-order cross section at NLO accuracy as well as events obtained using POWHEG matched
15
+ to a parton shower. The latter lead to a more realistic simulation and allow to study the impact of higher-order
16
+ corrections as well as the robustness of the approach. We find that the matrix element method in NLO accuracy
17
+ leads to a significant reduction of the theoretical uncertainties compared to leading order. In view of the high
18
+ luminosity phase of the LHC, this observation is especially relevant in analyses which are no longer dominated
19
+ by statistical uncertainties.
20
+ I.
21
+ INTRODUCTION
22
+ Regarding experimental as well as theoretical progress,
23
+ hadronic top-quark pair production has evolved into one of
24
+ the flagship processes at the LHC. This development is pro-
25
+ pelled by the expectation of the top quark to play a prominent
26
+ role in extensions of the Standard Model due to it being by far
27
+ the heaviest of the elementary particles with a life time sig-
28
+ nificantly shorter than the time scale of hadronization. The
29
+ high production rate of top-quark pairs at the LHC as well
30
+ as onward advances in experimental data taking enable for
31
+ ever-decreasing statistical and systematic uncertainties in the
32
+ recorded data. In order to make optimal use of this fact in ex-
33
+ perimental analyses, the employed theoretical predictions are
34
+ required to keep up in terms of uncertainties.
35
+ The next-to-leading order QCD corrections for top-quark
36
+ pair production have been calculated for the spin independent
37
+ case more then 30 years ago [1–4]. Later, also the spin depen-
38
+ dent cross sections were evaluated at NLO accuracy in QCD
39
+ [5, 6]. In a series of ground breaking articles also the next-
40
+ to-next-to-leading order QCD corrections were calculated [7–
41
+ 9]. Furthermore, beyond fixed order also the resummation of
42
+ soft-gluon corrections has been studied in great detail ([10–
43
+ 17]). In addition to QCD corrections also weak and QED
44
+ corrections have been calculated [18–22]. In summary, many
45
+ detailed theoretical predictions for top-quark pair production
46
+ are available. However, these might not be readily applicable
47
+ in the experimental analysis. It is thus important to put more
48
+ ∗ Work on this article was conducted while employed at Humboldt-
49
+ Universität
50
+ zu
51
+ Berlin,
52
+ Institut
53
+ für
54
+ Physik,
55
+ Berlin,
56
+ Germany;
57
+ Till.Martini@physik.hu-berlin.de
58
+ † Turan.Nuraliyev@physik.hu-berlin.de
59
+ ‡ Peter.Uwer@physik.hu-berlin.de
60
+ effort in improving the interface between experiment and the-
61
+ ory to make optimal use of the increasing precision reached in
62
+ both fields.
63
+ Multivariate analysis methods like the matrix element
64
+ method (MEM), turn out to be particularly useful in making
65
+ optimal use of the theoretical predictions. The MEM requires
66
+ the calculation of event weights in terms of differential cross
67
+ sections and is thus often formulated at lower-order accuracy
68
+ only. At leading order (LO), the MEM has been established
69
+ as a powerful analysis tool for both signal searches as well as
70
+ parameter inference by virtue of its optimal utilization of the
71
+ information content of the available data. Typically, the im-
72
+ pact of higher-order QCD corrections on theoretical predic-
73
+ tions can be significant while often simultaneously decreasing
74
+ the theoretical uncertainties. In the quest for accuracy and pre-
75
+ cision to match experimental achievements, the MEM at next-
76
+ to-leading order (NLO) represents a promising remedy. But
77
+ when taking higher-order corrections into account, the calcu-
78
+ lation of event weights constitutes a non-trivial task due to the
79
+ intricate combination of virtual and real contributions to ob-
80
+ tain meaningful finite results. The problem of extending the
81
+ MEM beyond the Born approximation has been solved in the
82
+ past by introducing modified jet algorithms on the one hand
83
+ or sensible event definitions on the other hand ([23–25]). At
84
+ the same time, the application of the MEM at NLO has been
85
+ demonstrated for top-quark mass extraction from simulated
86
+ single top-quark events ([23–25]) as well as anomalous cou-
87
+ pling parameter determination from simulated Higgs boson
88
+ events in association with a single top quark ([26]). Addi-
89
+ tionally, the effects of a parton shower applied to simulated
90
+ single top-quark data has been investigated with the MEM at
91
+ NLO ([25]). In this work, we present the application of the
92
+ MEM at NLO to top-quark pair production at the LHC. In
93
+ contrast to the electroweak production mechanism of single
94
+ top quarks studied before, top-quark pair production is QCD-
95
+ induced at LO already with the two production channels of
96
+ arXiv:2301.03280v1 [hep-ph] 9 Jan 2023
97
+
98
+ 2
99
+ quark-antiquark annihilation and gluon-gluon fusion consti-
100
+ tuting the dominant source of top quarks at the LHC. Given
101
+ the aforementioned prominent role of top-quark pair produc-
102
+ tion in both experimental as well as theoretical advances at
103
+ the LHC, it represents an ideal example to study higher-order
104
+ effects within the MEM. Furthermore, in view of the ongo-
105
+ ing progress in top-quark mass measurements, the MEM at
106
+ NLO accuracy could be an interesting alternative to existing
107
+ approaches.
108
+ The paper is structured as follows. In section II the NLO
109
+ QCD calculation of the differential cross section for top-quark
110
+ pair production with the phase space slicing method and the
111
+ subsequent generation of unweighted events are briefly re-
112
+ viewed. Section III focuses on the application of the MEM to
113
+ the generated events. To study parton shower effects, events
114
+ generated with POWHEG+Pythia [27–31] are also analysed.
115
+ The conclusions are presented in section IV.
116
+ II.
117
+ TOP-QUARK PAIR PRODUCTION AT THE LHC
118
+ A.
119
+ Implementing the NLO prediction with the phase space
120
+ slicing method
121
+ The MEM at NLO as presented in [23–25] requires the
122
+ cross-section calculation at NLO to be carried out using the
123
+ phase space slicing method [32]. The respective calculation
124
+ is available in the literature [5]. Thus, in this section we only
125
+ give a brief review of the important aspects of the calcula-
126
+ tion and present the validation for the choice of the slicing pa-
127
+ rameter. In the phase space slicing method, the cross-section
128
+ prediction at NLO accuracy dσNLO is formed of two contri-
129
+ butions: First, the so-called hard part dσHard is just the ma-
130
+ trix element for the real corrections evaluated for phase space
131
+ points where all partons are resolved, that is the additional
132
+ parton is neither collinear to the incoming partons nor soft.
133
+ Second, a Born-like part is comprised of the Born contribution
134
+ dσLO, the virtual corrections dσvirtual (taken from Ref. [33])
135
+ as well as the so-called soft and collinear parts dσsoft/coll.
136
+ stemming from approximated real corrections integrated over
137
+ phase space regions in which the additional parton is unre-
138
+ solved. The separation of the phase space for the real cor-
139
+ rections into resolved and unresolved regions is mediated by
140
+ the so-called slicing parameter xmin which acts as a scale to
141
+ separate the two. In the unresolved regions, well-known fac-
142
+ torization properties of QCD real corrections can be employed
143
+ allowing to analytically integrate over the additional radiation
144
+ in the singular limits in an approximate way thereby reducing
145
+ the respective phase space to Born-like kinematics. The diver-
146
+ gences of these integrations can be regularized within dimen-
147
+ sional regularization leading to poles in the dimensional shift
148
+ away from four space-time dimensions. The outcome can be
149
+ combined with the virtual contributions to cancel the respec-
150
+ tive poles from the loop integration and yield finite results ac-
151
+ cording to the Kinoshita-Lee-Nauenberg theorem ([34, 35]).
152
+ Since the real corrections are approximated in the unresolved
153
+ (singular) regions, the result is only accurate up to deviations
154
+ 10−6
155
+ 10−5
156
+ 10−4
157
+ 10−3
158
+ xmin
159
+ 703
160
+ 704
161
+ 705
162
+ 706
163
+ 707
164
+ 708
165
+ 709
166
+ σNLO[pb]
167
+ reference
168
+ σNLO
169
+ FIG. 1. Phase space slicing parameter (in-)dependence of the total
170
+ cross section predicted at NLO accuracy. The red line shows the
171
+ reference value taken from HATHOR [36].
172
+ 0
173
+ 10
174
+ 20
175
+ 30
176
+ 40
177
+ dσNLO
178
+ dk⊥
179
+ 1
180
+ [pb GeV−1]
181
+ xmin = 0.0002
182
+ xmin = 0.0001
183
+ xmin = 0.00005
184
+ 0
185
+ 100
186
+ 200
187
+ 300
188
+ 400
189
+ 500
190
+ k⊥
191
+ 1 [GeV]
192
+ −2σ
193
+ −σ
194
+ σ
195
+
196
+ pull
197
+ xmin = 0.0002 vs xmin = 0.0001
198
+ xmin = 0.0001 vs xmin = 0.00005
199
+ FIG. 2. Phase space slicing parameter (in-)dependence of the top-
200
+ quark transverse momentum predicted at NLO accuracy.
201
+ proportional to the slicing parameter xmin:
202
+ dσNLO = dσHard + dσLO + dσvirtual + dσsoft/coll. + O(xmin) .
203
+ (1)
204
+ Additionally, the separation of the real phase space in terms of
205
+ the slicing parameter introduces logarithmic dependencies of
206
+ the hard and soft/collinear contributions on xmin which cancel
207
+ in the sum. However, when numerically integrating over the
208
+ finite hard contribution, these logarithms can lead to numeri-
209
+ cal instabilities if xmin is chosen too small. Hence, the value
210
+ of xmin has to be chosen as a compromise between numeri-
211
+ cal stability and the demand that the deviation in Eq. (1) is
212
+ negligible compared to the statistical uncertainties of the total
213
+ cross section as well as distributions calculated at NLO accu-
214
+ racy. Fig. 1 shows NLO predictions for the total cross section
215
+ of top-quark pair production for different values of the slicing
216
+ parameter xmin. The total cross section as the sum of Born,
217
+ virtual and real contributions in Fig. 1 is indeed finite. How-
218
+ ever, it shows a systematic deviation from the reference value
219
+ taken from HATHOR [36] for values xmin ⪆ 2 × 10−3 while for
220
+ values xmin ⪅ 5 × 10−6 numerical instabilities dominate. Ac-
221
+ cordingly, a value of xmin = 10−4 is chosen. As an example
222
+ of a differential distribution the top-quark transverse momen-
223
+
224
+ 3
225
+ 0
226
+ 10
227
+ 20
228
+ 30
229
+ 40
230
+
231
+ dk⊥
232
+ 1 [pb GeV−1]
233
+ dσNLO
234
+ dσLO
235
+ 0
236
+ 100
237
+ 200
238
+ 300
239
+ 400
240
+ 500
241
+ k⊥
242
+ 1 [GeV]
243
+ 1.0
244
+ 1.5
245
+ 2.0
246
+ dσNLO
247
+ dσLO
248
+ 0
249
+ 10
250
+ 20
251
+ 30
252
+ 40
253
+ 50
254
+
255
+ dη1 [pb]
256
+ dσNLO
257
+ dσLO
258
+ −4
259
+ −2
260
+ 0
261
+ 2
262
+ 4
263
+ η1
264
+ 1.0
265
+ 1.5
266
+ 2.0
267
+ dσNLO
268
+ dσLO
269
+ FIG. 3.
270
+ Differential distributions together with the respective k-
271
+ factors.
272
+ tum calculated at NLO accuracy is shown for three different
273
+ choices of xmin in Fig. 2. In the lower plot we show for dif-
274
+ ferent choices of xmin the differences in units of the statistical
275
+ uncertainties. We conclude that all three choices lead to coher-
276
+ ent predictions justifying the choice xmin = 10−4. In addition
277
+ to the top-quark transverse momentum this has been checked
278
+ also for the top-quark energy distribution and the rapidity dis-
279
+ tribution. Furthermore, the distributions calculated here have
280
+ been cross checked with results from madgraph5 aMC@NLO
281
+ [37]. The comparison is shown in appendix A, Fig. 9 and
282
+ Fig. 10. The impact of the NLO corrections on kinematic dis-
283
+ tributions is displayed in Fig. 3 where NLO and LO predic-
284
+ tions for kinematic distributions are compared and their ratios
285
+ (the k-factor) are shown at the bottom of the plots. Results
286
+ for further distributions are shown in Fig. 11 in appendix A.
287
+ As can be seen from the rather constant k-factors, the NLO
288
+ corrections only mildly affect the shapes of the kinematic dis-
289
+ tributions. However, the NLO corrections lead to a significant
290
+ increase of the cross sections by a factor of roughly 1.5. In
291
+ Fig. 4 the impact of variations of the factorization scale µF
292
+ and renormalization scale µR by a factor of 2 as a means to es-
293
+ timate the effect of un-calculated higher orders are illustrated
294
+ for the shapes of two representative kinematic distributions of
295
+ the top quark. For moderate energy scales, one observes a
296
+ significant reduction of the impact of the scale variation.
297
+ 0
298
+ 200
299
+ 400
300
+ 600
301
+ 800
302
+ 1000
303
+ k⊥
304
+ 1 [GeV]
305
+ 10−6
306
+ 10−5
307
+ 10−4
308
+ 10−3
309
+ 10−2
310
+ 1
311
+ σ
312
+
313
+ dk⊥
314
+ 1
315
+ [GeV−1]
316
+ LO
317
+ NLO
318
+ −4
319
+ −3
320
+ −2
321
+ −1
322
+ 0
323
+ 1
324
+ 2
325
+ 3
326
+ 4
327
+ y
328
+ 0.00
329
+ 0.05
330
+ 0.10
331
+ 0.15
332
+ 0.20
333
+ 0.25
334
+ 0.30
335
+ 1
336
+ σ
337
+
338
+ dy
339
+ LO
340
+ NLO
341
+ FIG. 4. Effect of scale variations on the shapes of kinematic distri-
342
+ butions of the top quark.
343
+ B.
344
+ Unweighted event generation
345
+ From the calculation of the cross section at NLO accu-
346
+ racy outlined in the previous section, event weights can be
347
+ calculated which can be used to generate unweighted events
348
+ which are distributed according to the NLO cross section. As
349
+ described in Ref. [25], a sensible event definition is manda-
350
+ tory for obtaining meaningful event weights at NLO accuracy.
351
+ In particular, the event definition must not fix the invariant
352
+ masses or the overall transverse momentum of the final-state
353
+ objects. For top-quark pair production, we define events ⃗x
354
+ in terms of the transverse momentum k⊥
355
+ 1 , azimuthal angle φ1
356
+ and pseudo rapidity η1 of the top quark as well as the pseudo
357
+ rapidity of the antitop quark η2:
358
+ ⃗x = (k⊥
359
+ 1 ,φ1,η1,η2) .
360
+ (2)
361
+ The two-particle Born phase space as well as the three-particle
362
+ phase space for the real radiation can be parameterized in
363
+ terms of these variables
364
+ dR2 =
365
+ k⊥
366
+ 1
367
+ 3 coshη1 coshη2
368
+ 8π2 E1 E2 shad
369
+ dk⊥
370
+ 1 dφ1 dη1 dη2 ,
371
+ (3)
372
+ dR3 =
373
+ k⊥
374
+ 1
375
+ 2 k⊥
376
+ 2 k⊥
377
+ 3
378
+ 2 coshη1 coshη2 coshη3
379
+ 128π5 E1 E2 E3 shad
380
+ × dk⊥
381
+ 1 dφ1 dη1 dη2 dk⊥
382
+ 3 dφ3 dη3 ,
383
+ (4)
384
+
385
+ 4
386
+ 0.00
387
+ 0.01
388
+ 0.02
389
+ 0.03
390
+ 0.04
391
+ 0.05
392
+ 0.06
393
+ 1
394
+ σNLO
395
+ dσNLO
396
+ dk⊥
397
+ 1
398
+ [pb GeV−1]
399
+ results
400
+ reference
401
+ 0
402
+ 100
403
+ 200
404
+ 300
405
+ 400
406
+ 500
407
+ k⊥
408
+ 1 [GeV]
409
+ −2σ
410
+ −σ
411
+ σ
412
+
413
+ pull
414
+ 0.00
415
+ 0.01
416
+ 0.02
417
+ 0.03
418
+ 0.04
419
+ 0.05
420
+ 0.06
421
+ 0.07
422
+ 1
423
+ σNLO
424
+ dσNLO
425
+ dη1
426
+ [pb]
427
+ results
428
+ reference
429
+ −10.0
430
+ −7.5
431
+ −5.0
432
+ −2.5
433
+ 0.0
434
+ 2.5
435
+ 5.0
436
+ 7.5
437
+ 10.0
438
+ η1
439
+ −2σ
440
+ −σ
441
+ σ
442
+
443
+ pull
444
+ FIG. 5. Validation of the event generation: Comparison of differen-
445
+ tial distributions of the top quark obtained from unweighted events
446
+ with results from madgraph5 aMC@NLO.
447
+ where Ei denotes the energy of particle i and shad is the
448
+ hadronic center-of-mass energy squared. The additional ra-
449
+ diation occurring in the real corrections is parametrized by
450
+ the transverse momentum k⊥
451
+ 3 , the azimuthal angle φ3 and the
452
+ pseudo rapidity η3 of the radiated parton. These parametriza-
453
+ tions allow together with Eq. (1) to calculate the event weight
454
+ at NLO accuracy for each event ⃗x using
455
+ d4σNLO
456
+ dk⊥
457
+ 1 dφ1 dη1 dη2
458
+ =
459
+ d4σLO
460
+ dk⊥
461
+ 1 dφ1 dη1 dη2
462
+ +
463
+
464
+ d7σHard
465
+ dk⊥
466
+ 1 dφ1 dη1 dη2 dk⊥
467
+ 3 dφ3 dη3
468
+ dk⊥
469
+ 3 dφ3 dη3
470
+ +
471
+ d4σvirtual
472
+ dk⊥
473
+ 1 dφ1 dη1 dη2
474
+ +
475
+ d4σsoft/collinear
476
+ dk⊥
477
+ 1 dφ1 dη1 dη2
478
+ .
479
+ (5)
480
+ The weights calculated in this way can also be used to gener-
481
+ ate unweighted events with, e.g., the von-Neumann acception-
482
+ rejection method ([38]). Fig. 5 shows the distribution of the
483
+ unweighted events compared to kinematic distributions ob-
484
+ tained with the madgraph5 aMC@NLO code [37]. The events
485
+ obtained from the event weights defined in Eq. (5) are within
486
+ the uncertainties in perfect agreement with the predictions ob-
487
+ tained using madgraph5 aMC@NLO. In appendix A, Fig. 12
488
+ we show in addition the calculation of the Mt¯t- and the φ1-
489
+ distribution with the same perfect agreement. The compari-
490
+ son of the generated unweighted events with the results from
491
+ madgraph5 aMC@NLO also serves as a further validation for
492
+ the choice of the slicing parameter.
493
+ III.
494
+ APPLICATION: DETERMINATION OF THE
495
+ TOP-QUARK MASS USING THE MEM AT NLO
496
+ The event weights defined in Eq. (5) can be used in the
497
+ MEM to calculate the likelihood at NLO accuracy for a given
498
+ sample of N events {⃗xi}, i = 1,...,N:
499
+ L�{⃗xi} | mt
500
+ � =
501
+ 1
502
+ (σNLO(mt))N
503
+ N
504
+
505
+ i=1
506
+ d4σNLO(mt)
507
+ dk⊥
508
+ 1 dφ1 dη1 dη2
509
+ ������⃗x=⃗xi
510
+ (6)
511
+ where the dependence of the total and differential cross
512
+ sections on the value of the top-quark mass is high-
513
+ lighted—exemplarily for generic model parameters. Here, the
514
+ so-called transfer functions, parametrizing the probability of
515
+ measuring a certain signal in the detector given a particular
516
+ partonic configuration, are set to delta functions. The trans-
517
+ fer functions account for particle decays, additional radiation
518
+ as well as detector effects. Thus, this choice for the transfer
519
+ functions corresponds to the assumption of a perfect detector
520
+ which allows a perfect unfolding from the detector signals to
521
+ partonic variables. While for variables related to angles, set-
522
+ ting the transfer function to delta function may give a reason-
523
+ able approximation, this is not necessarily true in case of vari-
524
+ ables sensitive to energies. In future applications non-trivial
525
+ transfer functions should thus be incorporated. This may be
526
+ done using invertible neural networks trained to a full simula-
527
+ tion as discussed in great detail in Ref. [39]. This is however
528
+ beyond the scope of this work which focuses on exploring
529
+ the potential of the method for top-quark mass measurements.
530
+ Maximizing the likelihood with respect to the parameter mt
531
+ yields an estimator for the top-quark mass ˆmt:
532
+ L�{⃗xi} | ˆmt
533
+ � = max
534
+ mt
535
+ �L�{⃗xi} | mt
536
+ �� .
537
+ (7)
538
+ Because the event weights in Eq. (6) are normalized to yield
539
+ probabilities, the MEM is only sensitive to the shapes of kine-
540
+ matic distributions but not to the total number of events in the
541
+ sample. To also benefit from the information of the total event
542
+ number the so-called extended likelihood can be used. The
543
+ extended likelihood is obtained from the likelihood in Eq. (7)
544
+ by multiplying with the Poisson probability for observing N
545
+ events when the expected number of events is given by the
546
+ total cross section times the integrated luminosity Lint of the
547
+ collider:
548
+ Lext
549
+ �{⃗xi} | mt
550
+ � = (σNLO(mt) Lint)N
551
+ N!
552
+ e−σNLO(mt) Lint L�{⃗xi} | mt
553
+ �.
554
+ (8)
555
+ In Fig. 6 we show the likelihood obtained analysing 9900
556
+ unweighted top-quark pair events distributed according to the
557
+ NLO prediction. Likelihood (upper plot) as well as the ex-
558
+ tended likelihood (lower plot) have been studied. The green
559
+ curves correspond to likelihoods calculated at NLO accuracy
560
+
561
+ 5
562
+ 160
563
+ 165
564
+ 170
565
+ 175
566
+ 180
567
+ 185
568
+ 190
569
+ m [GeV]
570
+ −50
571
+ −25
572
+ 0
573
+ 25
574
+ 50
575
+ 75
576
+ 100
577
+ 125
578
+ 150
579
+ − log
580
+
581
+ L(m)
582
+ Lmax
583
+
584
+ mtrue = 173.2 GeV, 9900 analysed events
585
+ LO prediction:
586
+ ˆm2µ0
587
+ 0.5µ0 = 169.77 ± 1.18+2.21
588
+ −2.66GeV
589
+ NLO prediction:
590
+ ˆm2µ0
591
+ 0.5µ0 = 173.65 ± 1.20+0.30
592
+ +0.17GeV
593
+ 140
594
+ 150
595
+ 160
596
+ 170
597
+ 180
598
+ 190
599
+ m [GeV]
600
+ −100
601
+ 0
602
+ 100
603
+ 200
604
+ 300
605
+ 400
606
+ 500
607
+ 600
608
+ 700
609
+ 800
610
+ − log
611
+
612
+ Lext(m)
613
+ Lext,max
614
+
615
+ mtrue = 173.2 GeV, 9900 analysed events
616
+ LO prediction:
617
+ ˆm2µ0
618
+ 0.5µ0 = 160.22 ± 0.34−6.97
619
+ +7.86GeV
620
+ NLO prediction:
621
+ ˆm2µ0
622
+ 0.5µ0 = 173.68 ± 0.36−4.11
623
+ +3.69GeV
624
+ FIG. 6.
625
+ Analysis of unweighted events following the fixed-order
626
+ NLO prediction with (extended) likelihoods calculated at LO and
627
+ NLO accuracy.
628
+ ˆmt ±∆stat
629
+
630
+ 2µ0
631
+ sys
632
+
633
+ µ0/2
634
+ sys
635
+ [GeV]
636
+ likelihood
637
+ LO prediction
638
+ NLO prediction
639
+ L 169.77±1.18+2.21
640
+ −2.66 173.65±1.20+0.30
641
+ +0.17
642
+ Lext 160.22±0.34−6.97
643
+ +7.86 173.68±0.36−4.11
644
+ +3.69
645
+ TABLE I. Extracted values for the estimator of the top-quark mass
646
+ from 9900 unweighted events following the fixed-order NLO predic-
647
+ tion.
648
+ using different choices for the factorization and renormaliza-
649
+ tion scale. The orange curves are obtained using only LO
650
+ predictions again for different scale settings in the likelihood
651
+ calculation. The analysed events are generated for an input
652
+ value of the top-quark mass of mtrue = 173.2 GeV and the scale
653
+ choice µF = µR = µ0 = mt. The extracted values for the estima-
654
+ tor of the top-quark mass together with statistical and system-
655
+ atic uncertainties are summarized in Tab. I. The estimators ˆmt
656
+ are determined from the minima of the parabolas fitted to the
657
+ negative logarithms of the likelihood functions while the sta-
658
+ tistical uncertainties ∆stat are estimated from their widths. The
659
+ systematic uncertainties ∆2µ0
660
+ sys , ∆µ0/2
661
+ sys
662
+ are estimated by varying
663
+ the scale by a factor 2 around µ0. As can be seen from Fig. 6
664
+ and Tab. I, both the NLO and the LO analyses have similar
665
+ statistical uncertainties of about 1.2 GeV and 0.35 GeV de-
666
+ pending on whether the likelihood or the extended likelihood
667
+ is employed. As expected, the statistical uncertainties are to
668
+ good approximation independent from the perturbative order
669
+ of the theoretical predictions of the cross sections. Taking the
670
+ statistical uncertainties into account, the extracted estimators
671
+ from the NLO analyses are in perfect agreement with the input
672
+ value. For the likelihood as well as for the extended likelihood
673
+ the NLO differential cross section matches the probability dis-
674
+ tribution underlying the event sample thus leading to an unbi-
675
+ ased estimator. Obviously, taking into account the information
676
+ on the total number of events via the extended likelihood leads
677
+ to a reduction of the statistical uncertainties as additional in-
678
+ formation contained in the event sample is used. Since the
679
+ cross section shows a much stronger residual scale depen-
680
+ dence than the normalized distributions, the extended likeli-
681
+ hood leads however to a significantly larger systematic un-
682
+ certainty due to uncalculated higher order corrections. In ad-
683
+ dition, the uncertainty of the luminosity measurement which
684
+ is not taken into account in the extended likelihood analysis
685
+ leads to an additional uncertainty outweighing the gain in the
686
+ reduced statistical uncertainty.
687
+ The estimators from the LO analyses on the other hand
688
+ show a bias of 2.9×∆stat and 38×∆stat depending on whether
689
+ the likelihood or the extended likelihood is used. It should
690
+ be emphasized that the occurrence of a bias per se does not
691
+ rule out the application of the MEM. It is well known, that
692
+ the MEM typically leads to a bias if the probability distribu-
693
+ tion used in the evaluation of the likelihood does not match
694
+ the distribution underlying the event sample. However, via
695
+ a calibration procedure it is possible to compensate the bias
696
+ and obtain an unbiased determination. Since the calibration
697
+ can introduce additional uncertainties the preferred situation
698
+ is that the probability distribution used in the likelihood de-
699
+ termination matches the probability distribution of the event
700
+ sample as best as possible thus reducing the need of addi-
701
+ tional calibration. As shown in section II, the NLO correc-
702
+ tions dominantly alter the normalization of the kinematic dis-
703
+ tributions rather than their shape. Accordingly, the analysis
704
+ employing extended likelihoods which is sensitive to the total
705
+ cross section shows thus a much stronger separation between
706
+ the results obtained from the NLO and LO predictions.
707
+ Significant improvement from taking NLO corrections into
708
+ account can be seen in their impact on the theoretical uncer-
709
+ tainties: In the NLO analyses the theoretical uncertainties due
710
+ to uncalculated higher order corrections are roughly halved
711
+ with respect to the LO analyses.
712
+ In order to further study the robustness of the approach
713
+ and having a more realistic simulation, unweighted events ob-
714
+ tained from a parton shower simulation matched to the NLO
715
+ calculation can be used. The parton shower resums certain
716
+ logarithmic corrections to all orders on top of the fixed-order
717
+ NLO parton level calculation. Since these additional correc-
718
+ tions present in the event sample are not accounted for in the
719
+ fixed-order-only likelihood calculation based on Eq. (5), there
720
+ is a mismatch between the underlying probability distribution
721
+ of the generated events and the basis of the likelihood calcula-
722
+ tion (Eq. (5)). As seen before in case of the LO analysis, this
723
+
724
+ 6
725
+ 0.00
726
+ 0.01
727
+ 0.02
728
+ 0.03
729
+ 0.04
730
+ 0.05
731
+ 0.06
732
+ 1
733
+ σNLO
734
+ dσNLO
735
+ dk⊥
736
+ 1
737
+ [GeV−1]
738
+ POWHEG events
739
+ fixed-order NLO
740
+ 0
741
+ 100
742
+ 200
743
+ 300
744
+ 400
745
+ 500
746
+ k⊥
747
+ 1 [GeV]
748
+ 0
749
+ 1
750
+ 2
751
+ POWHEG
752
+ fixed-order NLO
753
+ 0.00
754
+ 0.02
755
+ 0.04
756
+ 0.06
757
+ 0.08
758
+ 1
759
+ σNLO
760
+ dσNLO
761
+ dη1
762
+ POWHEG events
763
+ fixed-order NLO
764
+ −10.0
765
+ −7.5
766
+ −5.0
767
+ −2.5
768
+ 0.0
769
+ 2.5
770
+ 5.0
771
+ 7.5
772
+ 10.0
773
+ η1
774
+ 0
775
+ 1
776
+ 2
777
+ POWHEG
778
+ fixed-order NLO
779
+ 0.00
780
+ 0.01
781
+ 0.02
782
+ 0.03
783
+ 0.04
784
+ 0.05
785
+ 0.06
786
+ 1
787
+ σNLO
788
+ dσNLO
789
+ dy
790
+ POWHEG events
791
+ fixed-order NLO
792
+ −4
793
+ −2
794
+ 0
795
+ 2
796
+ 4
797
+ y
798
+ 0
799
+ 1
800
+ 2
801
+ POWHEG
802
+ fixed-order NLO
803
+ FIG. 7. Impact of the parton shower on the kinematic distributions
804
+ of the top quark.
805
+ mismatch can cause a systematic bias in the extracted estima-
806
+ tor.
807
+ Fig.
808
+ 7
809
+ shows
810
+ the
811
+ distributions
812
+ obtained
813
+ using
814
+ POWHEG+Pythia
815
+ [27–31]
816
+ to
817
+ generate
818
+ about
819
+ the
820
+ same
821
+ number of events as in the case of the fixed-order analysis.
822
+ The parton shower only mildly affects the kinematic distribu-
823
+ tions relevant for the event definition. Further distributions
824
+ supporting this observation are shown in Fig. 13 in the ap-
825
+ pendix A. Apart from minor differences in the k⊥
826
+ 1 distribution
827
+ at low k⊥
828
+ 1 , a small difference is visible for k⊥
829
+ 1 > 300 GeV,
830
+ where the POWHEG+Pythia events lead to a slightly harder
831
+ distribution than the events generated from the fixed order
832
+ NLO cross section.
833
+ 160
834
+ 165
835
+ 170
836
+ 175
837
+ 180
838
+ 185
839
+ 190
840
+ m [GeV]
841
+ 0
842
+ 100
843
+ 200
844
+ 300
845
+ 400
846
+ − log
847
+
848
+ L(m)
849
+ Lmax
850
+
851
+ mtrue = 173.2 GeV, 9232 analysed events
852
+ LO prediction:
853
+ ˆm2µ0
854
+ 0.5µ0 = 173.88 ± 1.22+2.13
855
+ −2.57GeV
856
+ NLO prediction:
857
+ ˆm2µ0
858
+ 0.5µ0 = 177.93 ± 1.24+0.22
859
+ +0.38GeV
860
+ FIG. 8. Analysis of 9232 POWHEG+Pythia events with fixed-order
861
+ likelihoods calculated at LO and NLO accuracy.
862
+ ˆmt ±∆stat
863
+
864
+ 2µ0
865
+ sys
866
+
867
+ µ0/2
868
+ sys
869
+ [GeV]
870
+ likelihood
871
+ LO prediction
872
+ NLO prediction
873
+ L 173.88±1.22+2.13
874
+ −2.57 177.93±1.24+0.22
875
+ +0.38
876
+ TABLE II. Extracted values for the estimator of the top-quark mass
877
+ from unweighted POWHEG+Pythia events following the NLO predic-
878
+ tion matched to a parton shower.
879
+ The result of the likelihood analysis using LO and NLO
880
+ cross section predictions is shown in Fig. 8 and summarized
881
+ in Tab. II. We do not study the extended likelihood, since the
882
+ extended likelihood leads to much larger systematic uncer-
883
+ tainties. Again the statistical uncertainties are very similar for
884
+ the LO and NLO analysis, while the systematic uncertainty is
885
+ significantly reduced when using NLO predictions. In both
886
+ cases we observe a shift of about 4 GeV compared to the re-
887
+ sults based on the event sample generated from the fixed-order
888
+ NLO predictions. The large shift shows the high sensitivity of
889
+ the MEM with respect to tiny changes in the distributions. In
890
+ a mass determination from events registered at the LHC this
891
+ shift must be taken into account via a calibration procedure.
892
+ It is remarkable that the shift is, taking the uncertainties into
893
+ account, independent from the perturbative order of the em-
894
+ ployed likelihood calculation. This is similar to what has been
895
+ observed in Refs. [24, 25]. The LO likelihood analysis repro-
896
+ duces the true mass value used in the POWHEG+Pythia analy-
897
+ sis. However, this ist most likely accidental and due to the fact
898
+ that the LO fixed-order results undershoots the true mass value
899
+ by about 4 GeV which is compensated by the aforementioned
900
+ shift.
901
+ IV.
902
+ CONCLUSION
903
+ In this work the MEM at NLO is applied to top-quark
904
+ pair production at the LHC. To investigate the potential of
905
+ the matrix element method to measure the top-quark mass,
906
+ the MEM at NLO is applied to pseudo-data: unweighted
907
+
908
+ 7
909
+ events generated from the fixed-order NLO cross section
910
+ as well as events obtained using POWHEG+Pythia incorpo-
911
+ rating the parton shower effects. Using pseudo-data based
912
+ on POWHEG+Pythia allows to study the effect of the parton
913
+ shower and gives a more realistic simulation. Including the
914
+ NLO corrections in the likelihood calculation leads to a signif-
915
+ icant reduction of the theoretical uncertainties of the extracted
916
+ top-quark mass, while the statistical uncertainties remain al-
917
+ most unchanged compared to the LO analysis. We stress that
918
+ the uncertainties due to scale variation cannot be reduced by a
919
+ calibration. The reduction of the uncertainties associated with
920
+ the scale variation when going from LO to NLO thus presents
921
+ an important improvement and a strong argument in favour of
922
+ the the MEM at NLO accuracy.
923
+ Another important observation is the fact that the extended
924
+ likelihood yields a significant improvement in terms of the sta-
925
+ tistical uncertainties. However, in practical applications this
926
+ gain in precision is completely outweighed by the theoretical
927
+ uncertainties of the number of expected events. This can be
928
+ understood from the fact that, much as the NLO corrections
929
+ (see Fig. 3), the scale variations do not dramatically change
930
+ the shape of the kinematic distributions but mostly their nor-
931
+ malization (see Fig. 4) thereby making the extended likeli-
932
+ hood analyses more sensitive to their effect. Additionally, em-
933
+ ploying the extended likelihood requires precise knowledge of
934
+ the integrated luminosity of the LHC. The dependence on this
935
+ parameter introduces an additional source of systematic un-
936
+ certainty. This has to be taken into account for future experi-
937
+ mental applications of the MEM with realistic event numbers
938
+ for abundantly produced top-quark pairs at the LHC which
939
+ will most likely be dominated by systematic uncertainties. As
940
+ has already been stated before ([23–26]), for parameter infer-
941
+ ence with the MEM it is mandatory to perform the likelihood
942
+ calculation at least at NLO accuracy in order to properly fix
943
+ the renormalization scheme of the extracted parameter.
944
+ The application of the MEM at NLO to top-quark pair
945
+ events at the LHC can offer an alternative approach to deter-
946
+ mine the top-quark mass with high accuracy. As has been
947
+ demonstrated in this work, already for a few ten thousand
948
+ events the precision of the analysis becomes dominated by
949
+ systematic uncertainties. As the LHC produces millions of
950
+ top-quark pairs, the analysis could be performed with a rather
951
+ small fraction of cherry-picked events allowing to minimize
952
+ the overall systematic uncertainty. The results obtained in this
953
+ article suggest that top-quark mass determination with an un-
954
+ certainty below 1 GeV could be feasible. Of course, for a re-
955
+ alistic application of the MEM to experimental data, transfer
956
+ functions accounting for decays, additional radiation and de-
957
+ tector effects have to be considered. In addition, as the analy-
958
+ sis based on the events including parton shower effects shows,
959
+ a further calibration is required.
960
+ ACKNOWLEDGMENTS
961
+ This work was supported in part by the Bundesministerium
962
+ für Bildung and Forschung under contract 05H18KHCA1.
963
+ 0
964
+ 10
965
+ 20
966
+ 30
967
+ 40
968
+ 50
969
+ 60
970
+ dσNLO
971
+ dE1
972
+ [pb GeV−1]
973
+ result
974
+ reference
975
+ 0
976
+ 200
977
+ 400
978
+ 600
979
+ 800
980
+ 1000
981
+ E1 [GeV]
982
+ −2σ
983
+ −σ
984
+ σ
985
+
986
+ pull
987
+ 0
988
+ 10
989
+ 20
990
+ 30
991
+ 40
992
+ 50
993
+ dσNLO
994
+ dη1
995
+ [pb]
996
+ result
997
+ reference
998
+ −10.0
999
+ −7.5
1000
+ −5.0
1001
+ −2.5
1002
+ 0.0
1003
+ 2.5
1004
+ 5.0
1005
+ 7.5
1006
+ 10.0
1007
+ η1
1008
+ −2σ
1009
+ −σ
1010
+ σ
1011
+
1012
+ pull
1013
+ FIG. 9. Validation of the implementation: Comparison of differential
1014
+ distributions of the top quark obtained in this work with results from
1015
+ madgraph5 aMC@NLO.
1016
+ Appendix A: Additional results on distributions used for the
1017
+ validation
1018
+ In this appendix we show further cross checks used for the
1019
+ validation of the implementation. Fig. 9 shows comparisons
1020
+ of NLO predictions for differential distributions calculated
1021
+ in this work with distributions obtained from madgraph5
1022
+ aMC@NLO [37] which is based on the dipole subtraction
1023
+ method [40, 41]. The pull distributions in the bottom plots
1024
+ of Fig. 9 and Fig. 10 illustrate the agreement between both
1025
+ implementations within statistical uncertainties. This compar-
1026
+ isons serve as a further validation for the choice of the slicing
1027
+ parameter. Fig. 11 shows the NLO corrections (upper part)
1028
+ together with the k-factors (lower part) for the Mt¯t and the φ1-
1029
+ distribution. Similar to what is shown in Fig. 3 again a flat k-
1030
+ factor is observed. As a check of the event generation and the
1031
+ unweighting procedure Fig. 12 shows distributions calculated
1032
+ from the generated unweighted events compared with a calcu-
1033
+ lation using madgraph5 aMC@NLO [37]. Similar to Fig. 7 we
1034
+ show in Fig. 13 for further distributions the comparison of dis-
1035
+ tributions obtained at fixed-order NLO accuracy with results
1036
+ using POWHEG+Pythia.
1037
+
1038
+ 8
1039
+ 0
1040
+ 5
1041
+ 10
1042
+ 15
1043
+ dσNLO
1044
+ dφ1
1045
+ [pb]
1046
+ result
1047
+ reference
1048
+ −4
1049
+ −3
1050
+ −2
1051
+ −1
1052
+ 0
1053
+ 1
1054
+ 2
1055
+ 3
1056
+ 4
1057
+ φ1
1058
+ −2σ
1059
+ −σ
1060
+ σ
1061
+
1062
+ pull
1063
+ 0
1064
+ 10
1065
+ 20
1066
+ 30
1067
+ 40
1068
+ dσNLO
1069
+ dk⊥
1070
+ 1
1071
+ [pb GeV−1]
1072
+ result
1073
+ reference
1074
+ 0
1075
+ 100
1076
+ 200
1077
+ 300
1078
+ 400
1079
+ 500
1080
+ k⊥
1081
+ 1 [GeV]
1082
+ −2σ
1083
+ −σ
1084
+ σ
1085
+
1086
+ pull
1087
+ FIG. 10. Same as Fig. 9 but for the φ1- and the k⊥
1088
+ 1 -distribution.
1089
+ 0
1090
+ 10
1091
+ 20
1092
+ 30
1093
+ 40
1094
+
1095
+ dMt¯t [pb GeV−1]
1096
+ dσNLO
1097
+ dσLO
1098
+ 300
1099
+ 400
1100
+ 500
1101
+ 600
1102
+ 700
1103
+ 800
1104
+ 900
1105
+ 1000
1106
+ Mt¯t [GeV]
1107
+ 1.0
1108
+ 1.5
1109
+ 2.0
1110
+ dσNLO
1111
+ dσLO
1112
+ 0
1113
+ 5
1114
+ 10
1115
+ 15
1116
+ 20
1117
+
1118
+ dφ1 [pb]
1119
+ dσNLO
1120
+ dσLO
1121
+ −4
1122
+ −3
1123
+ −2
1124
+ −1
1125
+ 0
1126
+ 1
1127
+ 2
1128
+ 3
1129
+ 4
1130
+ φ1
1131
+ 1.0
1132
+ 1.5
1133
+ 2.0
1134
+ dσNLO
1135
+ dσLO
1136
+ FIG. 11. Same as Fig. 3 but for the Mt¯t and the φ1-distribution.
1137
+
1138
+ 9
1139
+ 0.00
1140
+ 0.01
1141
+ 0.02
1142
+ 0.03
1143
+ 0.04
1144
+ 0.05
1145
+ 0.06
1146
+ 1
1147
+ σNLO
1148
+ dσNLO
1149
+ dMt¯t
1150
+ [pb GeV−1]
1151
+ results
1152
+ reference
1153
+ 300
1154
+ 400
1155
+ 500
1156
+ 600
1157
+ 700
1158
+ 800
1159
+ 900
1160
+ 1000
1161
+ Mt¯t[GeV]
1162
+ −2σ
1163
+ −σ
1164
+ σ
1165
+
1166
+ pull
1167
+ 0.000
1168
+ 0.005
1169
+ 0.010
1170
+ 0.015
1171
+ 0.020
1172
+ 0.025
1173
+ 1
1174
+ σNLO
1175
+ dσNLO
1176
+ dφ1
1177
+ [pb]
1178
+ results
1179
+ reference
1180
+ −4
1181
+ −3
1182
+ −2
1183
+ −1
1184
+ 0
1185
+ 1
1186
+ 2
1187
+ 3
1188
+ 4
1189
+ φ1
1190
+ −2σ
1191
+ −σ
1192
+ σ
1193
+
1194
+ pull
1195
+ FIG. 12. Same as Fig. 5 but for the Mt¯t- and the φ1-distribution.
1196
+ 0.00
1197
+ 0.02
1198
+ 0.04
1199
+ 0.06
1200
+ 0.08
1201
+ 0.10
1202
+ 1
1203
+ σNLO
1204
+ dσNLO
1205
+ dE1
1206
+ [GeV−1]
1207
+ POWHEG events
1208
+ fixed-order NLO
1209
+ 0
1210
+ 200
1211
+ 400
1212
+ 600
1213
+ 800
1214
+ 1000
1215
+ E1 [GeV]
1216
+ 0
1217
+ 1
1218
+ 2
1219
+ POWHEG
1220
+ fixed-order NLO
1221
+ 0.00
1222
+ 0.01
1223
+ 0.02
1224
+ 0.03
1225
+ 0.04
1226
+ 0.05
1227
+ 0.06
1228
+ 1
1229
+ σNLO
1230
+ dσNLO
1231
+ dMt¯t
1232
+ [GeV−1]
1233
+ POWHEG events
1234
+ fixed-order NLO
1235
+ 300
1236
+ 400
1237
+ 500
1238
+ 600
1239
+ 700
1240
+ 800
1241
+ 900
1242
+ 1000
1243
+ Mt¯t [GeV]
1244
+ 0
1245
+ 1
1246
+ 2
1247
+ POWHEG
1248
+ fixed-order NLO
1249
+ 0.000
1250
+ 0.005
1251
+ 0.010
1252
+ 0.015
1253
+ 0.020
1254
+ 0.025
1255
+ 0.030
1256
+ 1
1257
+ σNLO
1258
+ dσNLO
1259
+ dφ1
1260
+ POWHEG events
1261
+ fixed-order NLO
1262
+ −4
1263
+ −3
1264
+ −2
1265
+ −1
1266
+ 0
1267
+ 1
1268
+ 2
1269
+ 3
1270
+ 4
1271
+ φ1
1272
+ 0
1273
+ 1
1274
+ 2
1275
+ POWHEG
1276
+ fixed-order NLO
1277
+ FIG. 13. Same as Fig. 7 but for the energy, Mt¯t- and φ1-distribution.
1278
+
1279
+ 10
1280
+ [1] P. Nason, S. Dawson, and R. K. Ellis, Nucl. Phys. B 303, 607
1281
+ (1988).
1282
+ [2] P. Nason, S. Dawson, and R. K. Ellis, Nucl. Phys. B 327, 49
1283
+ (1989), [Erratum: Nucl.Phys.B 335, 260–260 (1990)].
1284
+ [3] W. Beenakker, H. Kuijf, W. L. van Neerven, and J. Smith, Phys.
1285
+ Rev. D 40, 54 (1989).
1286
+ [4] W. Beenakker, W. L. van Neerven, R. Meng, G. A. Schuler, and
1287
+ J. Smith, Nucl. Phys. B 351, 507 (1991).
1288
+ [5] W. Bernreuther, A. Brandenburg, Z. G. Si, and P. Uwer, Nucl.
1289
+ Phys. B 690, 81 (2004), arXiv:hep-ph/0403035.
1290
+ [6] K.
1291
+ Melnikov
1292
+ and
1293
+ M.
1294
+ Schulze,
1295
+ JHEP
1296
+ 08,
1297
+ 049
1298
+ (2009),
1299
+ arXiv:0907.3090 [hep-ph].
1300
+ [7] M. Czakon, P. Fiedler,
1301
+ and A. Mitov, Phys. Rev. Lett. 110,
1302
+ 252004 (2013), arXiv:1303.6254 [hep-ph].
1303
+ [8] M. Czakon, D. Heymes, and A. Mitov, Phys. Rev. Lett. 116,
1304
+ 082003 (2016), arXiv:1511.00549 [hep-ph].
1305
+ [9] M. Czakon, P. Fiedler, D. Heymes, and A. Mitov, JHEP 05,
1306
+ 034 (2016), arXiv:1601.05375 [hep-ph].
1307
+ [10] M. Beneke, M. Czakon, P. Falgari, A. Mitov, and C. Schwinn,
1308
+ Phys. Lett. B690, 483 (2010), arXiv:0911.5166 [hep-ph].
1309
+ [11] M. Czakon, A. Mitov,
1310
+ and G. F. Sterman, Phys. Rev. D80,
1311
+ 074017 (2009), arXiv:0907.1790 [hep-ph].
1312
+ [12] M. Beneke, P. Falgari, S. Klein, and C. Schwinn, Nucl. Phys.
1313
+ B855, 695 (2012), arXiv:1109.1536 [hep-ph].
1314
+ [13] M. Cacciari, M. Czakon, M. Mangano, A. Mitov, and P. Nason,
1315
+ Phys. Lett. B710, 612 (2012), arXiv:1111.5869 [hep-ph].
1316
+ [14] N.
1317
+ Kidonakis,
1318
+ Phys.
1319
+ Part.
1320
+ Nucl.
1321
+ 45,
1322
+ 714
1323
+ (2014),
1324
+ arXiv:1210.7813 [hep-ph].
1325
+ [15] A. Ferroglia, B. D. Pecjak, and L. L. Yang, Phys. Rev. D86,
1326
+ 034010 (2012), arXiv:1205.3662 [hep-ph].
1327
+ [16] A. Ferroglia, S. Marzani, B. D. Pecjak, and L. L. Yang, JHEP
1328
+ 01, 028 (2014), arXiv:1310.3836 [hep-ph].
1329
+ [17] M. Czakon, A. Ferroglia, D. Heymes, A. Mitov, B. D. Pecjak,
1330
+ D. J. Scott, X. Wang, and L. L. Yang, JHEP 05, 149 (2018),
1331
+ arXiv:1803.07623 [hep-ph].
1332
+ [18] W. Beenakker, A. Denner, W. Hollik, R. Mertig, T. Sack, and
1333
+ D. Wackeroth, Nucl. Phys. B411, 343 (1994).
1334
+ [19] W. Bernreuther, M. Fuecker,
1335
+ and Z.-G. Si, Phys. Rev. D74,
1336
+ 113005 (2006), arXiv:hep-ph/0610334 [hep-ph].
1337
+ [20] J. H. Kühn, A. Scharf,
1338
+ and P. Uwer, Eur. Phys. J. C51, 37
1339
+ (2007), arXiv:hep-ph/0610335 [hep-ph].
1340
+ [21] S. Moretti, M. R. Nolten, and D. A. Ross, Phys. Lett. B639,
1341
+ 513 (2006), [Erratum: Phys. Lett.B660,607(2008)], arXiv:hep-
1342
+ ph/0603083 [hep-ph].
1343
+ [22] D. Pagani, I. Tsinikos, and M. Zaro, Eur. Phys. J. C76, 479
1344
+ (2016), arXiv:1606.01915 [hep-ph].
1345
+ [23] T.
1346
+ Martini
1347
+ and
1348
+ P.
1349
+ Uwer,
1350
+ JHEP
1351
+ 09,
1352
+ 083
1353
+ (2015),
1354
+ arXiv:1506.08798 [hep-ph].
1355
+ [24] T. Martini and P. Uwer, (2017), arXiv:1712.04527 [hep-ph].
1356
+ [25] M. Kraus, T. Martini, and P. Uwer, Phys. Rev. D 100, 076010
1357
+ (2019), arXiv:1901.08008 [hep-ph].
1358
+ [26] M. Kraus, T. Martini, S. Peitzsch,
1359
+ and P. Uwer,
1360
+ (2019),
1361
+ arXiv:1908.09100 [hep-ph].
1362
+ [27] P. Nason, JHEP 11, 040 (2004), arXiv:hep-ph/0409146.
1363
+ [28] T. Sjostrand, S. Mrenna,
1364
+ and P. Z. Skands, JHEP 05, 026
1365
+ (2006), arXiv:hep-ph/0603175.
1366
+ [29] S. Frixione, P. Nason, and C. Oleari, JHEP 11, 070 (2007),
1367
+ arXiv:0709.2092 [hep-ph].
1368
+ [30] S. Frixione, P. Nason, and G. Ridolfi, JHEP 09, 126 (2007),
1369
+ arXiv:0707.3088 [hep-ph].
1370
+ [31] S. Alioli, P. Nason, C. Oleari, and E. Re, JHEP 06, 043 (2010),
1371
+ arXiv:1002.2581 [hep-ph].
1372
+ [32] W. T. Giele, E. W. N. Glover, and D. A. Kosower, Nucl. Phys.
1373
+ B403, 633 (1993), arXiv:hep-ph/9302225 [hep-ph].
1374
+ [33] S. Badger, R. Sattler, and V. Yundin, Phys. Rev. D 83, 074020
1375
+ (2011), arXiv:1101.5947 [hep-ph].
1376
+ [34] T. Kinoshita, J. Math. Phys. 3, 650 (1962).
1377
+ [35] T. D. Lee and M. Nauenberg, Phys. Rev. 133, B1549 (1964).
1378
+ [36] M. Aliev, H. Lacker, U. Langenfeld, S. Moch, P. Uwer, et al.,
1379
+ Comput.Phys.Commun. 182, 1034 (2011), arXiv:1007.1327
1380
+ [hep-ph].
1381
+ [37] J. Alwall, R. Frederix, S. Frixione, V. Hirschi, F. Maltoni,
1382
+ O. Mattelaer, H. S. Shao, T. Stelzer, P. Torrielli, and M. Zaro,
1383
+ JHEP 07, 079 (2014), arXiv:1405.0301 [hep-ph].
1384
+ [38] J. von Neumann, in Monte Carlo Method, National Bureau of
1385
+ Standards Applied Mathematics Series, Vol. 12, edited by A. S.
1386
+ Householder, G. E. Forsythe, and H. H. Germond (US Gov-
1387
+ ernment Printing Office, Washington, DC, 1951) Chap. 13, pp.
1388
+ 36–38.
1389
+ [39] A. Butter, T. Heimel, T. Martini, S. Peitzsch,
1390
+ and T. Plehn,
1391
+ (2022), arXiv:2210.00019 [hep-ph].
1392
+ [40] S. Catani and M. Seymour, Nucl.Phys. B485, 291 (1997),
1393
+ arXiv:hep-ph/9605323 [hep-ph].
1394
+ [41] S. Catani, S. Dittmaier, M. H. Seymour,
1395
+ and Z. Trocsanyi,
1396
+ Nucl.Phys. B627, 189 (2002), arXiv:hep-ph/0201036 [hep-ph].
1397
+
09E1T4oBgHgl3EQflAR-/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
0NE1T4oBgHgl3EQfRQO7/content/tmp_files/2301.03051v1.pdf.txt ADDED
@@ -0,0 +1,1086 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.03051v1 [math.RA] 8 Jan 2023
2
+ FUNCTORS BETWEEN REPRESENTATION CATEGORIES.
3
+ UNIVERSAL MODULES
4
+ A. L. AGORE
5
+ Abstract. Let g and h be two Lie algebras with h finite dimensional and consider
6
+ A = A(h, g) to be the corresponding universal algebra as introduced in [4]. Given an
7
+ A-module U and a Lie h-module V we show that U ⊗ V can be naturally endowed
8
+ with a Lie g-module structure. This gives rise to a functor between the category of Lie
9
+ h-modules and the category of Lie g-modules and, respectively, to a functor between
10
+ the category of A-modules and the category of Lie g-modules.
11
+ Under some finite
12
+ dimensionality assumptions, we prove that the two functors admit left adjoints which
13
+ leads to the construction of universal A-modules and universal Lie h-modules as the
14
+ representation theoretic counterparts of Manin-Tambara’s universal coacting objects
15
+ [11, 16].
16
+ Introduction
17
+ The universal coacting bialgebra/Hopf algebra on a finite dimensional (graded) asso-
18
+ ciative algebra originates in the work of Yu. I. Manin ([11]). The importance of this
19
+ construction became obvious mostly due to its interaction with non-commutative geom-
20
+ etry where it is seen as some sort of symmetry group (see [13] for more details on this
21
+ view point). The non-graded version of this construction appeared a few years later in a
22
+ paper by D. Tambara ([16]). However, as remarked in [16], the universal coacting bialge-
23
+ bra is in fact the dual of the so-called universal measuring bialgebra introduced by M.E.
24
+ Sweedler in [15]. We should note that, unlike Manin-Tambara’s construction, Sweedler’s
25
+ universal measuring bialgebra/Hopf algebra exists even in the infinite-dimensional case.
26
+ In recent years, universal (co)acting objects have been considered in various settings
27
+ and for different purposes. For instance, [8] extends Sweedler’s construction to monoids
28
+ in a braided monoidal category. On the other hand, the Manin-Tambara construction
29
+ was introduced in the setting of Poisson algebras ([2]), finite index-subfactors ([6]), su-
30
+ perpotential algebras ([7]), polynomial algebras ([14]), bialgebroids ([5]) or Lie/Leibniz
31
+ algebras ([4]). The corresponding universal coacting bialgebras/Hopf algebras, which in
32
+ certain cases carry some extra structure (e.g. a Poisson Hopf algebra structure as in [2]),
33
+ seem to play a prominent role in solving other seemingly unrelated problems such as
34
+ the classification of gradings on various kinds of algebras ([4, 12]), the description of the
35
+ automorphisms group of certain algebraic structures ([4]) and even in quantum Galois
36
+ 2010 Mathematics Subject Classification. 16D90, 16T05, 17A32, 17B10.
37
+ Key words and phrases. universal module.
38
+ This work was supported by a grant of Romanian Ministry of Research, Innovation and Digitization,
39
+ CNCS/CCCDI – UEFISCDI, project number PN-III-P4-ID-PCE-2020-0458, within PNCDI III.
40
+ 1
41
+
42
+ 2
43
+ A. L. AGORE
44
+ theory ([6]). Another related universal (co)acting construction was considered in [3] as
45
+ the Hopf algebraic analogue of the universal group of a grading and its connections to
46
+ the problem of classifying Hopf algebra coactions have been highlighted.
47
+ One of the most general constructions of universal (co)acting bialgebras/Hopf algebras,
48
+ performed in the setting of Ω-algebras, was introduced in [1] together with generalized
49
+ duality results. Necessary and sufficient conditions for the existence of universal coacting
50
+ bialgebras/Hopf algebras are provided, explaining in this general setting the need for
51
+ assuming finite dimensionality in both Manin and Tambara’s papers.
52
+ It is worth to point out that both Sweedler and Manin-Tambara’s constructions have
53
+ a categorical interpretation. More precisely, for Tambara’s construction one considers
54
+ the left adjoint, say a(A, −), of the tensor product endofunctor A ⊗ − on the category
55
+ of k-algebras, where A is a finite dimensional associative algebra. Tambara’s universal
56
+ coacting bialgebra is precisely a(A, A) which turns out to be naturally endowed with a
57
+ bialgebra structure. Similarly, for an arbitrary associative algebra A, it can be proved
58
+ that the contravariant functor Hom(−, A) taking coalgebras to (convolution) algebras
59
+ has a right adjoint which hereafter we denote by M(A, −). As before, Sweedler’s uni-
60
+ versal measuring bialgebra is exactly M(A, A) which again has a bialgebra structure.
61
+ In this paper we deal with the representation theoretic version of Manin-Tambara’s con-
62
+ struction in the Lie algebra setting. Our approach is a categorical one. More precisely,
63
+ given two fixed Lie algebras g and h, with h finite dimensional, and the corresponding
64
+ universal algebra A = A(h, g) (see[4]), we first show that the tensor product between
65
+ an A-module U and a Lie h-module V can be endowed with a Lie g-module structure
66
+ (Theorem 2.1). As a consequence, we are able to construct two ”tensor product” func-
67
+ tors between the categories of Lie modules over h and g and respectively between the
68
+ category of A-modules and the category of Lie g-modules. Under the appropriate finite
69
+ dimensionality assumptions, the two functors mentioned above are proved to admit left
70
+ adjoints. These left adjoints are given precisely by what we have called the universal
71
+ Lie h-module and the universal A-module, respectively. The two universal modules are
72
+ introduced in a constructive manner in Theorem 2.4 and Theorem 2.10. These are the
73
+ counterparts for Lie and associative representations of Manin-Tambara’s constructions.
74
+ Furthermore, the two aforementioned pairs of adjoint functors allow us to travel both
75
+ ways between the representation categories of different algebraic structures, such as Lie
76
+ and associative algebras, and to transfer certain properties which are usually preserved
77
+ by left/right adjoints.
78
+ 1. Preliminaries
79
+ This section will be used mostly as an opportunity to fix some notation and to provide
80
+ certain useful references. Let us start with a few words on notation.
81
+ 1.1. Notational conventions. All vector spaces, (bi)linear maps, unadorned tensor
82
+ products, Lie or associative algebras, bialgebras and so on are over an arbitrary com-
83
+ mutative field k. All (co)associative (co)algebras are assumed to be (co)unital.
84
+ The
85
+ notation employed for coalgebras is standard: ∆ stands for the comultiplication and ε
86
+
87
+ UNIVERSAL MODULES
88
+ 3
89
+ for the counit. We use Sweedler’s notation with implied summation for both coalge-
90
+ bras (resp. bialgebras), as in ∆(c) = c(1) ⊗ c2, and for comodule structures: a right
91
+ C-comodule structure ρ on a vector space V will be denoted by ρ(v) = v(0) ⊗v(1). When
92
+ we need to be precise, the structures involved will be adorned. δij denotes Kronecker’s
93
+ symbol while IdX stands for the identity map on the set X.
94
+ In the sequel, k[Xsi |s = 1, · · · , n, i ∈ I] denotes the usual polynomial algebra on vari-
95
+ ables Xsi. We shall denote by Liek and ComAlgk the categories of Lie and commutative
96
+ associative algebras, respectively. Given an associative algebra A and a Lie algebra g
97
+ we denote by AM and gLM the categories of left A-modules and left Lie g-modules, re-
98
+ spectively. Recall that a (left) Lie g-module is a vector space V equipped with a bilinear
99
+ map ⇀: g × V → V such that for all x, y ∈ g and v ∈ V we have:
100
+ [x, y] ⇀ v = x ⇀ (y ⇀ v) − y ⇀ (x ⇀ v).
101
+ Throughout the paper, g and h will denote two arbitrary Lie algebras with h finite
102
+ dimensional. Let {fi | i ∈ I} and {e1, · · · , en} be two fixed basis in g and h, respectively.
103
+ We consider {τ s
104
+ i,j | i, j, s = 1, · · · , n} to be the structure constants of h, i.e. for any i,
105
+ j = 1, · · · , n we have:
106
+ [ei, ej]h =
107
+ n
108
+
109
+ s=1
110
+ τ s
111
+ i,j es.
112
+ (1)
113
+ Similarly, for any i, j ∈ I, let Bi,j ⊆ I be a finite subset of I such that for any i, j ∈ I
114
+ we have:
115
+ [fi, fj]g =
116
+
117
+ u∈Bi,j
118
+ βu
119
+ i,j fu.
120
+ (2)
121
+ 1.2. The universal algebra of h and g. We recall briefly, for further use, the con-
122
+ struction of the universal commutative algebra A(h, g) of two given Lie algebras h and
123
+ g (recall that h is always assumed to be finite dimensional). It was first introduced in
124
+ [4] in the more general setting of Leibniz algebras as the counterpart of Tambara’s con-
125
+ struction ([16]). We restrict here to the Lie algebra version of the construction which
126
+ can be summarized as follows. We have:
127
+ A(h, g) := k[Xsi |s = 1, · · · , n, i ∈ I]/J
128
+ (3)
129
+ where J is the ideal generated by all polynomials of the form
130
+ P (h, g)
131
+ (a,i,j) :=
132
+
133
+ u∈Bi,j
134
+ βu
135
+ i,j Xau −
136
+ n
137
+
138
+ s,t=1
139
+ τ a
140
+ s,t XsiXtj,
141
+ for all a = 1, · · · , n and i, j ∈ I.
142
+ (4)
143
+ When working in the universal algebra A(h, g), we denote by xsi := �
144
+ Xsi the class of Xsi.
145
+ Consequently, the following relations hold in A(h, g):
146
+
147
+ u∈Bi,j
148
+ βu
149
+ i,j xau =
150
+ n
151
+
152
+ s,t=1
153
+ τ a
154
+ s,t xsixtj,
155
+ for all a = 1, · · · , n, and i, j ∈ I.
156
+ (5)
157
+ When the (finite dimensional) Lie algebra h is fixed, the universal algebra construction
158
+ gives rise to a functor A(h, −): Liek → ComAlgk which turns out to be the left adjoint
159
+
160
+ 4
161
+ A. L. AGORE
162
+ of the tensor product h ⊗ −: ComAlgk → Liek (see [4, Theorem 2.1]), where for any
163
+ commutative algebra X the tensor product h ⊗ X is endowed with the current Lie
164
+ algebra structure. In order to avoid dealing with cumbersome notation, when there is no
165
+ fear of confusion, we denote A = A(h, g). Furthermore, If h = g, then the corresponding
166
+ universal algebra A(h, h) will be denoted simply by B. The notation is meant to highlight
167
+ the fact that B is a bialgebra; in fact, it admits a unique bialgebra structure such that h
168
+ becomes a right B-comodule with respect to ηh : h → h⊗B where η: 1Liek → h⊗A(h, −)
169
+ denotes the unit of the adjunction between A(h, −) and h ⊗ −.
170
+ More precisely, the
171
+ comultiplication and the counit on B are given for any i, j = 1, · · · , n by
172
+ ∆(xij) =
173
+ n
174
+
175
+ s=1
176
+ xis ⊗ xsj
177
+ and
178
+ ε(xij) = δi,j1k
179
+ (6)
180
+ For basic categorical concepts we refer the reader to [10] and for unexplained notions
181
+ pertaining to Lie and Hopf algebras to [9] and [15], respectively.
182
+ 2. Universal modules
183
+ Our first important result provides a way of defining a Lie g-module structure on the
184
+ tensor product between a Lie h-module and an A-module.
185
+ Theorem 2.1. Let (U, ↷) ∈ hLM be a Lie h-module and (V, ·) ∈ AM an A-module.
186
+ Then (U ⊗ V, ⇀) ∈ gLM is a Lie g-module where the action of g on U ⊗ V is given for
187
+ all i ∈ I, l ∈ U and t ∈ V by:
188
+ fi ⇀ (l ⊗ t) =
189
+ n
190
+
191
+ j=1
192
+ (ej ↷ l) ⊗ (xji · t)
193
+ (7)
194
+ Proof. Indeed, having in mind that (U, ↷) is a Lie module and A = A(h, g) is a com-
195
+ mutative algebra, we have:
196
+ [fi, fj] ⇀ (l ⊗ t)
197
+ (2)
198
+ =
199
+
200
+ u∈Bi,j
201
+ βu
202
+ i,j fu ⇀ (l ⊗ t)
203
+ (7)
204
+ =
205
+
206
+ u∈Vi,j,r=1,n
207
+ βu
208
+ i,j (er ↷ l) ⊗ (xru · t)
209
+ =
210
+
211
+ r=1,n
212
+ (er ↷ l) ⊗
213
+ � �
214
+ u∈Bi,j
215
+ βu
216
+ i,j xru
217
+
218
+ ·t
219
+ (5)
220
+ =
221
+
222
+ s,p,r=1,n
223
+ τ r
224
+ s,p (er ↷ l) ⊗ (xsixpj) · t
225
+ =
226
+
227
+ s,p=1,n
228
+ � n
229
+
230
+ r=1
231
+ τ r
232
+ s,p er
233
+
234
+ ↷ l ⊗ (xsixpj) · t
235
+ (1)
236
+ =
237
+
238
+ s,p=1,n
239
+ [es, ep] ↷ l ⊗ (xsixpj) · t
240
+ =
241
+
242
+ s,p=1,n
243
+ es ↷ (ep ↷ l) ⊗ xsi · (xpj · t) −
244
+
245
+ s,p=1,n
246
+ ep ↷ (es ↷ l) ⊗ xpj · (xsi · t)
247
+ (7)
248
+ = fi ⇀
249
+ n
250
+
251
+ p=1
252
+ (ep ↷ l) ⊗ (xpj · t) − fj ⇀
253
+ n
254
+
255
+ s=1
256
+ (es ↷ l) ⊗ (xsi · t)
257
+ (7)
258
+ = fi ⇀
259
+
260
+ fj ⇀ (l ⊗ t)
261
+
262
+ − fj ⇀
263
+
264
+ fi ⇀ (l ⊗ t)
265
+
266
+
267
+ UNIVERSAL MODULES
268
+ 5
269
+ for all i, j ∈ I and l ∈ U, t ∈ V , i.e. (U ⊗ V, ⇀) is a left Lie g-module.
270
+
271
+ Inspired by Theorem 2.1 we can consider two types of universal modules.
272
+ 2.1. The universal A-module. The first such universal module is associated with a
273
+ Lie h-module and a Lie g-module as follows:
274
+ Definition 2.2. Given a Lie h-module U and a Lie g-module Z, the universal A-module
275
+ of U and Z is a pair
276
+
277
+ U(U, Z), ρU(U, Z)
278
+
279
+ consisting of an A-module U(U, Z) and a mor-
280
+ phism of Lie g-modules ρU(U, Z) : Z → U ⊗ U(U, Z) such that for any other pair (X, f)
281
+ consisting of an A-module X and a morphism of Lie g-modules f : Z → U ⊗X, there ex-
282
+ ists a unique morphism of A-modules g: U(U, Z) → X such that the following diagram
283
+ is commutative:
284
+ Z
285
+ ρU(U, Z)
286
+
287
+ f
288
+ �❘
289
+
290
+
291
+
292
+
293
+
294
+
295
+
296
+
297
+
298
+
299
+
300
+
301
+
302
+
303
+ U ⊗ U(U, Z)
304
+ IdU⊗g
305
+
306
+ U ⊗ X
307
+ (8)
308
+ In other words, the above definition is saying that, when it exists, the universal A-module
309
+ of U and Z is in fact the initial object of the category whose objects are pairs (X, f)
310
+ consisting of an A-module X and a morphism of Lie g-modules f : Z → U ⊗ X, while
311
+ morphisms between two such objects (X, f) and (X′, f ′) are defined to be A-module
312
+ maps g: X → X′ satisfying (IdU ⊗ g) ◦ f = f ′.
313
+ As direct consequences of the above definition, we obtain the following:
314
+ Corollary 2.3. Let U be a Lie h-module. Then, for all Lie g-modules Z and all A-
315
+ modules X, we have a bijective correspondence between:
316
+ (1) Lie g-module maps f : Z → U ⊗ X;
317
+ (2) A-module maps g: U(U, Z) → X.
318
+ Under the appropiate finite-dimensionality assumptions required for all Manin-Tambara
319
+ type constructions, the universal A-module introduced in Definition 2.2 exists:
320
+ Theorem 2.4. If U is a finite dimensional Lie h-module then the universal A -module
321
+ of U and any other Lie g-module Z exists.
322
+ Proof. Let {u1, · · · , um}, m ∈ N∗, be a k-basis of the Lie module U and denote by ωt
323
+ ij ∈ k
324
+ the structure constants of U with respect to its Lie h-module structure ↷, i.e. for all
325
+ i = 1, · · · , n, j = 1, · · · , m we have:
326
+ ei ↷ uj =
327
+ m
328
+
329
+ s=1
330
+ ωs
331
+ i,j us
332
+ (9)
333
+ Furthermore, consider {zr | r ∈ J} to be a k-basis for the arbitrary Lie g-module Z and
334
+ if ↬ denotes its Lie module structure, then for all j ∈ I and r ∈ J we can find a finite
335
+
336
+ 6
337
+ A. L. AGORE
338
+ subset Tj,r of J such that:
339
+ fj ↬ zr =
340
+
341
+ l∈Tj,r
342
+ ηl
343
+ j,r zl
344
+ (10)
345
+ where ηl
346
+ j,r ∈ k for all j ∈ I, r ∈ J, and l ∈ Tj,r.
347
+ Consider now T (U, Z) to be the free A-module on the set {Yij | i = 1, · · · , m, j ∈ J}
348
+ and denote by U(U, Z) the quotient of T (U, Z) by its A-submodule generated by the
349
+ following elements:
350
+
351
+ p∈Tj,i
352
+ ηp
353
+ j,i Ysp −
354
+ m
355
+
356
+ t=1
357
+ n
358
+
359
+ r=1
360
+ ωs
361
+ r,t xrj • Yti
362
+ (11)
363
+ for all s = 1, · · · , m, i ∈ J and j ∈ I, where • denotes the A-module action on T (U, Z).
364
+ Denoting ytj := �
365
+ Ytj, where �
366
+ Ytj stands for the equivalence class of Ytj in the quotient
367
+ module U(U, Z), it follows that the relations below hold in U(U, Z):
368
+
369
+ p∈Tj,i
370
+ ηp
371
+ j,i ysp =
372
+ m
373
+
374
+ t=1
375
+ n
376
+
377
+ r=1
378
+ ωs
379
+ r,t xrj • yti
380
+ (12)
381
+ for all s = 1, · · · , m, i ∈ J and j ∈ I.
382
+ Furthermore, we can define a morphism of Lie g-modules ρU(U, Z): Z → U ⊗ U(U, Z) as
383
+ follows:
384
+ ρU(U, Z)(zr) :=
385
+ m
386
+
387
+ s=1
388
+ us ⊗ ysr,
389
+ for all r ∈ J.
390
+ (13)
391
+ It follows now that for all j ∈ I and i ∈ J we have:
392
+ ρU(U, Z)(fj ↬ zi)
393
+ (10)
394
+ = ρU(U,Z)
395
+ � �
396
+ p∈Tj,i
397
+ ηp
398
+ ji zp
399
+
400
+ =
401
+
402
+ p∈Tj,i
403
+ m
404
+
405
+ s=1
406
+ ηp
407
+ ji us ⊗ ysp =
408
+ m
409
+
410
+ s=1
411
+
412
+ us ⊗
413
+
414
+ p∈Tj,i
415
+ ηp
416
+ ji ysp
417
+
418
+ (12)
419
+ =
420
+ m
421
+
422
+ s,t=1
423
+ n
424
+
425
+ r=1
426
+ ωs
427
+ r,t us ⊗ xrj • yti =
428
+ m
429
+
430
+ t=1
431
+ n
432
+
433
+ r=1
434
+ � m
435
+
436
+ s=1
437
+ ωs
438
+ r,t us
439
+
440
+ ⊗ xrj • yti
441
+ (9)
442
+ =
443
+ m
444
+
445
+ t=1
446
+ n
447
+
448
+ r=1
449
+ er ↷ ut ⊗ xrj • yti
450
+ (7)
451
+ =
452
+ m
453
+
454
+ t=1
455
+ fj ⇀ (ut ⊗ yti) = fj ⇀
456
+ m
457
+
458
+ t=1
459
+ ut ⊗ yti
460
+ (13)
461
+ = fj ⇀ ρU(U, Z)(zi)
462
+ which shows that ρU(U, Z) is indeed a Lie g-modules map.
463
+ We will show that the pair
464
+
465
+ U(U, Z), ρU(U, Z)
466
+
467
+ constructed above is in fact the universal
468
+ A-module of U and Z. To this end, consider a pair (X, f) consisting of an A-module X
469
+ and a morphism of Lie g-modules f : Z → U ⊗ X. Let {wsr | s = 1, · · · , m, r ∈ J} be a
470
+ family of elements of X such that for all r ∈ J we have:
471
+ g(zr) =
472
+ m
473
+
474
+ s=1
475
+ us ⊗ wsr
476
+ (14)
477
+
478
+ UNIVERSAL MODULES
479
+ 7
480
+ Furthermore, as g: Z → U ⊗ X is a Lie g-modules map, a straightforward computation
481
+ shows that the following compatibilities hold for all s = 1, · · · , m, i ∈ J and j ∈ I:
482
+
483
+ p∈Tj,i
484
+ ηp
485
+ j,i wsp =
486
+ m
487
+
488
+ t=1
489
+ n
490
+
491
+ r=1
492
+ ωs
493
+ r,t xrj · wti
494
+ (15)
495
+ where · denotes the A-module action on X.
496
+ The universal property of the free module yields a unique A-module map g: T (U, Z) → X
497
+ such that g(Ysr) = wsr, for all s = 1, · · · , m and r ∈ J. Moreover, Ker(g) contains the A-
498
+ submodule of T (U, Z) generated by the elements listed in (11). Indeed, as g : U(U, Z) →
499
+ X is a morphism of A-modules we have:
500
+ g
501
+ � �
502
+ p∈Tj,i
503
+ ηp
504
+ j,i Ysp −
505
+ m
506
+
507
+ t=1
508
+ n
509
+
510
+ r=1
511
+ ωs
512
+ r,t xrj • Yti
513
+
514
+ =
515
+
516
+ p∈Tj,i
517
+ ηp
518
+ j,i wsp −
519
+ m
520
+
521
+ t=1
522
+ n
523
+
524
+ r=1
525
+ ωs
526
+ r,t xrj · wti
527
+ (15)
528
+ = 0
529
+ for all s = 1, · · · , m, i ∈ J and j ∈ I. This shows that there exists a unique A-modules
530
+ map g: U(U, Z) → X such that g(ysr) = zsr, for all s = 1, · · · , m and r ∈ J. This
531
+ implies that for all r ∈ J we have:
532
+
533
+ IdU ⊗ g
534
+
535
+ ◦ ρU(U, Z)(zr) =
536
+
537
+ IdU ⊗ g
538
+ �� m
539
+
540
+ s=1
541
+ us ⊗ ysr
542
+
543
+ =
544
+ m
545
+
546
+ s=1
547
+ us ⊗ wsr
548
+ (14)
549
+ = g(zr)
550
+ which means precisely that diagram (8) is commutative. Moreover, g is obviously the
551
+ unique A-modules map with this property and the proof is now finished.
552
+
553
+ The case g = h. Particularizing the results of Section 2 for g = h, where h is the finite
554
+ dimensional Lie algebra defined in (1), leads to the following interesting consequences.
555
+ According to the discussion in Preliminaries, the universal algebra A(h, h) denoted by B
556
+ is in this case a bialgebra with coalgebra structure depicted in (6). This allows us to see
557
+ the tensor product U(U, Z) ⊗ U(U, Z) as well as the base field k as B-modules via the
558
+ comultiplication and the counit of B as follows:
559
+ xij ∗ (y ⊗ t) =
560
+ n
561
+
562
+ t=1
563
+ xit • y ⊗ xtj • t
564
+ (16)
565
+ xij · α = δijα
566
+ (17)
567
+ for all xij ∈ B, y, t ∈ U(U, Z) and α ∈ k, where • denotes the B-module strucuture on
568
+ U(U, Z) as in the proof of Theorem 2.4.
569
+ First we show that if U is a finite dimensional Lie h-module as considered in (9), then
570
+ the B-module U(U, U) denoted by U(U) admits a coalgebra structure with respect to
571
+ which
572
+
573
+ U, ρU(U)
574
+
575
+ becomes a right U(U)-comodule.
576
+ Proposition 2.5. Let U be a finite dimensional Lie h-module. There exists a unique
577
+ coalgebra structure on U(U) such that
578
+
579
+ U, ρU(U)
580
+
581
+ becomes a right U(U)-comodule.
582
+ Proof. In particular both U(U) ⊗ U(U) and k are B-modules via the formulas (16) and
583
+ (17) respectively. Therefore, U ⊗ U(U) ⊗ U(U) and U ⊗ k are Lie h-modules via (7).
584
+ Furthermore, it can be easily checked that the maps
585
+
586
+ ρU(U) ⊗ IdU(U)
587
+
588
+ ◦ρU(U) : U → U ⊗
589
+
590
+ 8
591
+ A. L. AGORE
592
+ U(U) ⊗ U(U) and canU : U → U ⊗ k are morphisms of Lie h-modules, where canU : U →
593
+ U ⊗ k is the canonical isomorphism. Now Definition 2.2 yields a unique B-modules map
594
+ ∆: U(U) → U(U) ⊗ U(U) such that the following diagram is commutative:
595
+ U
596
+ ρU(U)
597
+
598
+
599
+ ρU(U)⊗IdU(U)
600
+
601
+ ◦ρU(U)
602
+ �❆
603
+
604
+
605
+
606
+
607
+
608
+
609
+
610
+
611
+
612
+
613
+
614
+
615
+
616
+
617
+
618
+
619
+ U ⊗ U(U)
620
+ IdU ⊗∆
621
+
622
+ U ⊗ U(U) ⊗ U(U)
623
+ Similarly, we obtain a unique B-modules map ε: U(U) → k such that the following
624
+ diagram is commutative:
625
+ U
626
+ ρU(U) �
627
+ canU
628
+ �■
629
+
630
+
631
+
632
+
633
+
634
+
635
+
636
+
637
+
638
+ U ⊗ U(U)
639
+ IdU⊗ε
640
+
641
+ U ⊗ k
642
+ A straightforward computation shows that the commutativity of the two diagrams above
643
+ imply that ∆ and ε take the following form for all l, t = 1, · · · , m:
644
+ ∆(ylt) =
645
+ m
646
+
647
+ s=1
648
+ yls ⊗ yst,
649
+ ε(ylt) = δlt1k.
650
+ It is now obvious that
651
+
652
+ U(U), ∆, ε
653
+
654
+ form a coalgebra. Finally, by the commutativity of
655
+ the two diagrams above we obtain that
656
+
657
+ U, ρU(U)
658
+
659
+ is a right U(U)-comodule.
660
+
661
+ Remark 2.6. It is worth pointing out that with the coalgebra structure introduced
662
+ above, U(U) becomes a B-module coalgebra. Indeed, having in mind that both ∆ and ε
663
+ are B-module maps, we have:
664
+ ∆(xab • ylt) = xab ∗ ∆(ylt) = xab ∗
665
+ � m
666
+
667
+ s=1
668
+ yls ⊗ yst
669
+ �(16)
670
+ =
671
+ n
672
+
673
+ c=1
674
+ m
675
+
676
+ s=1
677
+ xac • yls ⊗ xcb • yst
678
+ = (xab)(1) • (ylt)(1) ⊗ (xab)(2) • (ylt)(2)
679
+ and
680
+ ε(xab • ylt) = xab · ε(ylt)
681
+ (17)
682
+ = δab ε(ylt) = ε(xab) ε(ylt).
683
+ This shows that • is a coalgebra map, as desired.
684
+ It turns out that the pair
685
+
686
+ U(U), ρU(U)
687
+
688
+ is universal in the following way:
689
+ Proposition 2.7. For any coalgebra X with a B-module structure and any Lie h-module
690
+ morphism ψ: U → U ⊗X which makes U into a right X-comodule, there exists a unique
691
+
692
+ UNIVERSAL MODULES
693
+ 9
694
+ B-modules and coalgebra morphism θ: U(U) → X such that the following diagram is
695
+ commutative:
696
+ U
697
+ ρU(U) �
698
+ ψ
699
+ �■
700
+
701
+
702
+
703
+
704
+
705
+
706
+
707
+
708
+
709
+ U ⊗ U(U)
710
+ IdU ⊗θ
711
+
712
+ U ⊗ X
713
+ Proof. In light of Definition 2.2, such a unique A-modules map θ exists. We are left to
714
+ show that θ is also a coalgebra map. From the proof of Theorem 2.4 we know that θ is
715
+ defined for all l, t = 1, · · · , m by θ(ylt) = zlt where zlt are elements of X such that for all
716
+ r = 1, · · · , m we have ψ(ur) = �m
717
+ s=1 us ⊗ zsr. As (U, ψ) is a right comodule, we obtain:
718
+ ∆(zlt) =
719
+ m
720
+
721
+ s=1
722
+ zls ⊗ zst,
723
+ ε(zlt) = δlt1k.
724
+ To this end, we have:
725
+
726
+
727
+ θ(ylt)
728
+
729
+ = ∆(zlt) =
730
+ m
731
+
732
+ s=1
733
+ zls ⊗ zst =
734
+ m
735
+
736
+ s=1
737
+ θ(yls) ⊗ θ(yst) = (θ ⊗ θ) ◦ ∆(ylt)
738
+ Similarly one can check that ε◦θ = ε which shows that θ is indeed a coalgebra map.
739
+
740
+ 2.2. The universal h-module. The second type of universal module one can consider
741
+ is the following:
742
+ Definition 2.8. Given an A-module V and a Lie g-module W, the universal Lie h-
743
+ module of V and W is a pair
744
+
745
+ V(V, W), τV(V, W )
746
+
747
+ consisting of a Lie h-module V(V, W)
748
+ and a morphism of Lie g-modules τV(V, W ): W → V(V, W)⊗V such that for any other pair
749
+ (Y, f) consisting of a Lie h-module Y and a morphism of Lie g-modules f : W → Y ⊗ V ,
750
+ there exists a unique morphism of Lie h-modules g: V(V, W) → Y such that the following
751
+ diagram is commutative:
752
+ W
753
+ τV(V, W )
754
+
755
+ f
756
+ �❘
757
+
758
+
759
+
760
+
761
+
762
+
763
+
764
+
765
+
766
+
767
+
768
+
769
+
770
+
771
+
772
+ V(V, W) ⊗ V
773
+ g⊗IdV
774
+
775
+ Y ⊗ V
776
+ (18)
777
+ The universal Lie h-module of V and W, when it exists, can again be seen as the initial
778
+ object of the category whose objects are pairs (Y, f) consisting of a Lie h-module Y
779
+ and a morphism of Lie g-modules f : W → Y ⊗ V , while morphisms between two such
780
+ objects (Y, f) and (Y ′, f ′) are defined to be Lie h-module maps g: Y → Y ′ satisfying
781
+ (g ⊗ IdV ) ◦ f = f ′.
782
+ Corollary 2.9. Let V be an A-module. Then, for all Lie g-modules W and all Lie
783
+ h-modules Y , we have a bijective correspondence between:
784
+ (1) Lie g-module maps f : W → Y ⊗ V ;
785
+ (2) Lie h-module maps g: V(V, W) → Y .
786
+
787
+ 10
788
+ A. L. AGORE
789
+ The universal h-module introduced in Definition 2.8 also exists provided that the A-
790
+ module V is finite dimensional.
791
+ Theorem 2.10. If V is a finite dimensional A-module then the universal Lie h-module
792
+ of V and any other Lie g-module W exists.
793
+ Proof. As this proof is somewhat similar in spirit with the one of Theorem 2.4, we will be
794
+ brief and provide only the main ingredients required for the construction of the universal
795
+ Lie h-module.
796
+ Let {v1, · · · , vl}, l ∈ N∗, be a k-basis of the finite dimensional A-module V and denote
797
+ by γt
798
+ r,i,j ∈ k the structure constants of V with respect to its A-module structure ·, i.e.
799
+ for all r = 1, · · · , n, i ∈ I and j = 1, · · · , l we have:
800
+ xri · vj =
801
+ l
802
+
803
+ s=1
804
+ γs
805
+ r,i,j vs
806
+ (19)
807
+ Consider {wr | r ∈ T} to be a k-basis for W and if ⊲ denotes its Lie g-module structure,
808
+ then for all j ∈ I and r ∈ T we can find a finite subset Sj,r of T such that:
809
+ fj ⊲ wr =
810
+
811
+ p∈Sj,r
812
+ σp
813
+ j,r wp
814
+ (20)
815
+ where σp
816
+ j,r ∈ k for all j ∈ I, r ∈ T, and p ∈ Sj,r.
817
+ Now let S(V, W) be the free Lie h-module on the set {Yri | r ∈ T, i = 1, · · · , l} and
818
+ denote by V(V, W) the quotient of S(V, W) by its Lie h-submodule generated by the
819
+ following elements:
820
+
821
+ p∈Sj,r
822
+ σp
823
+ j,r Yps −
824
+ l
825
+
826
+ k=1
827
+ n
828
+
829
+ p=1
830
+ γs
831
+ p,j,k ep ◮ Yrk
832
+ (21)
833
+ for all s = 1, · · · , l, r ∈ T and j ∈ I, where ◮ denotes the h-module action on S(V, W).
834
+ By denoting yri := �
835
+ Yri, where �
836
+ Yri stands for the equivalence class of Yri in the quotient
837
+ module V(V, W), it follows that the relations below hold in V(V, W):
838
+
839
+ p∈Sj,r
840
+ σp
841
+ j,r yps =
842
+ l
843
+
844
+ k=1
845
+ n
846
+
847
+ t=1
848
+ γs
849
+ t,j,k et ◮ yrk
850
+ (22)
851
+ for all s = 1, · · · , l, r ∈ T and j ∈ I.
852
+ It can now be easily seen, as in the proof of Theorem 2.4, that the pair (V(V, W), τV(V, W ))
853
+ is the universal Lie h-module of V and W, where τV(V, W ): W → V(V, W) ⊗ V is the
854
+ morphism of Lie g-modules defined for all r ∈ T as follows:
855
+ τV(V, W )(wr) :=
856
+ l
857
+
858
+ s=1
859
+ yrs ⊗ vs.
860
+ (23)
861
+
862
+
863
+ UNIVERSAL MODULES
864
+ 11
865
+ 3. Functors between module categories
866
+ In this section we show that the two universal module constructions previously introduced
867
+ are functorial and, moreover, if certain conditions are fulfilled the corresponding functors
868
+ admit right adjoints. We start, however, by stating the following easy consequence of
869
+ Theorem 2.1:
870
+ Proposition 3.1. Let (U, ↷) ∈ hLM and (V, ·) ∈ AM. Then:
871
+ 1) We have a functor U ⊗ −: AM → gLM from the category of A-modules to the
872
+ category of Lie g-modules;
873
+ 2) We have a functor − ⊗ V : hLM → gLM between the categories of Lie modules
874
+ over h and g respectively.
875
+ Proof. In light of Theorem 2.1, we are only left to show that morphisms behave well with
876
+ respect to the corresponding associative or Lie module structures. We will treat only the
877
+ first statement and leave the second one to the reader. To this end, consider (V, ·) and
878
+ (V ′, •) two A-modules, ⇀ and ⇀′ the corresponding induced Lie g-module actions via
879
+ (7) and g: V → V ′ a morphism in AM . Then, for all i ∈ I, l ∈ U and t ∈ V we have:
880
+ (IdU ⊗ g)
881
+
882
+ fi ⇀ (l ⊗ t)
883
+ �(7)
884
+ =
885
+ n
886
+
887
+ j=1
888
+ (ej ↷ l) ⊗ g(xji · t) =
889
+ n
890
+
891
+ j=1
892
+ (ej ↷ l) ⊗ xji • g(t)
893
+ (7)
894
+ = fi ⇀′ �
895
+ l ⊗ g(t)
896
+
897
+
898
+ We consider now the universal module functors:
899
+ Theorem 3.2. Let U be a finite dimensional Lie h-module and V a finite dimensional
900
+ A-module.
901
+ (1) There exists a functor UU : gLM → AM defined as follows for all Lie g-modules
902
+ X, Y and all morphisms f : X → Y in gLM:
903
+ UU(X) = U(U, X),
904
+ UU(f) = f
905
+ where f : U(U, X) → U(U, Y ) is the unique A-modules morphism which makes
906
+ the following diagram commutative:
907
+ X
908
+ ρU(U, X)
909
+
910
+ ρU(U, Y )◦f
911
+ �◗
912
+
913
+
914
+
915
+
916
+
917
+
918
+
919
+
920
+
921
+
922
+
923
+
924
+
925
+
926
+ U ⊗ U(U, X)
927
+ IdU⊗f
928
+
929
+ U ⊗ U(U, Y )
930
+ (24)
931
+ (2) There exists a functor VV : gLM → hLM defined as follows for all Lie g-modules
932
+ X, Y and all morphisms f : X → Y in gLM:
933
+ VV (X) = V(V, X),
934
+ VV (f) = f
935
+
936
+ 12
937
+ A. L. AGORE
938
+ where f : V(V, X) → V(V, Y ) is the unique morphism of Lie h-modules which
939
+ makes the following diagram commutative:
940
+ X
941
+ τV(V, X)
942
+
943
+ τV(V, Y )◦f
944
+ �◗
945
+
946
+
947
+
948
+
949
+
950
+
951
+
952
+
953
+
954
+
955
+
956
+
957
+
958
+
959
+ V(V, X) ⊗ V
960
+ f⊗IdV
961
+
962
+ V(V, Y ) ⊗ V
963
+ (25)
964
+ Proof. As the result follows in a straightforward manner by a standard category the-
965
+ ory argument, we only sketch the proof of the first assertion. Indeed, if f = IdX then
966
+ IdU(U, X) is obviously the unique A-modules morphism which makes diagram (24) com-
967
+ mute and therefore UU(IdX) = IdU(U, X). Moreover, if f : X → Y and g: Y → W are two
968
+ morphisms in gLM, then g ◦f : U(U, X) → U(U, W) is obviously the unique A-modules
969
+ morphism which makes the following diagram commutative:
970
+ Z
971
+ ρU(U, X)
972
+
973
+ ρU(U, W )◦g◦f
974
+ �◗
975
+
976
+
977
+
978
+
979
+
980
+
981
+
982
+
983
+
984
+
985
+
986
+
987
+
988
+
989
+
990
+ U ⊗ U(U, X)
991
+ IdU⊗
992
+
993
+ g◦f
994
+
995
+
996
+ U ⊗ U(U, W)
997
+ and we can conclude that UU(g ◦ f) = UU(g) ◦ UU(f), as desired.
998
+
999
+ Under the appropriate finite-dimensionality assumptions, the functors constructed in
1000
+ Proposition 3.1 are right adjoints to the universal module functors:
1001
+ Theorem 3.3. Let (U, ↷) be a finite dimensional Lie h-module and (V, ·) a finite di-
1002
+ mensional A-module. Then:
1003
+ 1) The following functors form an adjunction:
1004
+ UU : gLM → AM,
1005
+ U ⊗ −: AM → gLM;
1006
+ 2) Similarly, the following functors also form an adjunction:
1007
+ VV : gLM → hLM,
1008
+ − ⊗ V : hLM → gLM.
1009
+ Proof. 1) As pointed out in Corollary 2.3, for all Lie g-modules Z and all A-modules
1010
+ X, there is a bijection between HomAM
1011
+
1012
+ UU(Z), X
1013
+
1014
+ and HomgLM (Z, U ⊗ X) given as
1015
+ follows for all morphisms of A-modules θ: UU(Z) → X:
1016
+ ΓZ,X : HomAM (UU(Z), X) → HomgLM (Z, U ⊗ X),
1017
+ ΓZ,X(θ) = (IdU ⊗ θ) ◦ ρU(U, Z).
1018
+ The desired conclusion now follows by showing that the above bijection is natural in
1019
+ both variables. This can be easily proved by a straightforward diagram chase and is left
1020
+ to the reader.
1021
+ 2) Using now Corollary 2.9, for all Lie g-modules W and all Lie h-modules Z, we obtain
1022
+ a bijection between HomhLM
1023
+
1024
+ VV (W), Z
1025
+
1026
+ and HomgLM (W, Z ⊗ V ) defined as follows
1027
+
1028
+ UNIVERSAL MODULES
1029
+ 13
1030
+ for all morphisms of Lie h-modules θ: VV (W) → Z:
1031
+ ΓW,Z : HomhLM
1032
+
1033
+ VV (W), Z
1034
+
1035
+ → HomgLM (W, Z ⊗ V ),
1036
+ ΓW,Z(θ) = (θ ⊗ IdV ) ◦ ρV(V, W ).
1037
+
1038
+ In particular, the two pairs of adjoint functors allow us to travel both ways between the
1039
+ representation categories of the two (arbitrary) Lie algebras h and g and respectively
1040
+ between the representation category of the associative algebra A and the representation
1041
+ category of the Lie algebra g.
1042
+ Example 3.4. Let ρi : g ⊗ Wi → Wi be representations of g, where i = 1, 2. By the
1043
+ colimit preservation property of left adjoints we can easily conclude that for any finite
1044
+ dimensional Lie h-module U, UU(W1) ⊕ UU(W2) is the direct sum of the A-modules
1045
+ UU(W1) and UU(W2).
1046
+ Similarly, for any finite dimensional A-module V , VV (W1) ⊕
1047
+ UU(W2) is the direct sum of the Lie h-modules UU(W1) and UU(W2). This can be easily
1048
+ extended to an arbitrary family of representations.
1049
+ References
1050
+ [1] Agore, A.L., Gordienko, A.S., Vercruysse, J. - V -universal Hopf algebras (co)acting on Ω-algebras,
1051
+ Commun. Contemp. Math. 25 (2023), 2150095.
1052
+ [2] Agore, A.L. - Universal coacting Poisson Hopf algebras, Manuscripta Math. 165 (2021), 255–268.
1053
+ [3] Agore, A.L., Gordienko, A.S., Vercruysse, J. - Equivalences of (co)module algebra structures over
1054
+ Hopf algebras, J. Noncommut. Geom., 15 (2021), 951–993.
1055
+ [4] Agore, A.L., Militaru, G. - A new invariant for finite dimensional Leibniz/Lie algebras, J. Algebra
1056
+ 562 (2020), 390–409.
1057
+ [5] Ardizzoni, A., El Kaoutit, L., Menini, C. - Categories of comodules and chain complexes of modules,
1058
+ Internat. J. Math. 23 (2012), 1250109
1059
+ [6] Bhattacharjee, S., Chirvˇasitu, A., Goswami, D. - Quantum Galois groups of subfactors, Internat. J.
1060
+ Math. 33 (2022), 2250013.
1061
+ [7] Chirvˇasitu, A., Walton, C., Wang, X. - On quantum groups associated to a pair of preregular forms,
1062
+ J. Noncommut. Geom. bf 13 (2019), 115—159.
1063
+ [8] Hyland, M., Lopez Franco, I., Vasilakopoulou, C. - Hopf measuring comonoids and enrichment,
1064
+ Proc. Lond. Math. Soc. 115 (2017), 1118—1148.
1065
+ [9] Jacobson, N. – Lie algebras, Dover Publications, NY, 1962.
1066
+ [10] Mac Lane, S. - Categories for the Working Mathematician, Graduate Texts in Mathematics 5,
1067
+ Springer, 1998.
1068
+ [11] Manin, Yu. I. - Quantum groups and noncommutative geometry, Universite de Montreal, Centre de
1069
+ Recherches Mathematiques, Montreal, QC, 1988.
1070
+ [12] Militaru, G. - The automorphisms group and the classification of gradings of finite dimensional
1071
+ associative algebras, Results Math. 77 (2022).
1072
+ [13] Raedschelders, T., Van den Bergh, M. - The Manin Hopf algebra of a Koszul Artin-Schelter regular
1073
+ algebra is quasi-hereditary, Adv. Math. 305 (2017), 601-–660.
1074
+ [14] Rodrıiguez-Romo, S., Taft, E. - Some quantum-like Hopf algebras which remain noncommutative
1075
+ when q = 1, Lett. Math. Phys. 61(2002), 41-–50.
1076
+ [15] Sweedler, M.E. - Hopf Algebras, Benjamin New York, 1969.
1077
+ [16] Tambara, D. - The coendomorphism bialgebra of an algebra. J. Fac. Sci. Univ. Tokyo Math. 37
1078
+ (1990), 425–456.
1079
+
1080
+ 14
1081
+ A. L. AGORE
1082
+ Vrije Universiteit Brussel, Pleinlaan 2, B-1050 Brussels, Belgium
1083
+ Simion Stoilow Institute of Mathematics of the Romanian Academy, P.O. Box 1-764, 014700
1084
+ Bucharest, Romania
1085
+ Email address: ana.agore@gmail.com
1086
+
0NE1T4oBgHgl3EQfRQO7/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf,len=433
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
3
+ page_content='03051v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
4
+ page_content='RA] 8 Jan 2023 FUNCTORS BETWEEN REPRESENTATION CATEGORIES.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
5
+ page_content=' UNIVERSAL MODULES A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
6
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
7
+ page_content=' AGORE Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
8
+ page_content=' Let g and h be two Lie algebras with h finite dimensional and consider A = A(h, g) to be the corresponding universal algebra as introduced in [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
9
+ page_content=' Given an A-module U and a Lie h-module V we show that U ⊗ V can be naturally endowed with a Lie g-module structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
10
+ page_content=' This gives rise to a functor between the category of Lie h-modules and the category of Lie g-modules and, respectively, to a functor between the category of A-modules and the category of Lie g-modules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
11
+ page_content=' Under some finite dimensionality assumptions, we prove that the two functors admit left adjoints which leads to the construction of universal A-modules and universal Lie h-modules as the representation theoretic counterparts of Manin-Tambara’s universal coacting objects [11, 16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
12
+ page_content=' Introduction The universal coacting bialgebra/Hopf algebra on a finite dimensional (graded) asso- ciative algebra originates in the work of Yu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
13
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
14
+ page_content=' Manin ([11]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
15
+ page_content=' The importance of this construction became obvious mostly due to its interaction with non-commutative geom- etry where it is seen as some sort of symmetry group (see [13] for more details on this view point).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
16
+ page_content=' The non-graded version of this construction appeared a few years later in a paper by D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
17
+ page_content=' Tambara ([16]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
18
+ page_content=' However, as remarked in [16], the universal coacting bialge- bra is in fact the dual of the so-called universal measuring bialgebra introduced by M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
19
+ page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
20
+ page_content=' Sweedler in [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
21
+ page_content=' We should note that, unlike Manin-Tambara’s construction, Sweedler’s universal measuring bialgebra/Hopf algebra exists even in the infinite-dimensional case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
22
+ page_content=' In recent years, universal (co)acting objects have been considered in various settings and for different purposes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
23
+ page_content=' For instance, [8] extends Sweedler’s construction to monoids in a braided monoidal category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
24
+ page_content=' On the other hand, the Manin-Tambara construction was introduced in the setting of Poisson algebras ([2]), finite index-subfactors ([6]), su- perpotential algebras ([7]), polynomial algebras ([14]), bialgebroids ([5]) or Lie/Leibniz algebras ([4]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
25
+ page_content=' The corresponding universal coacting bialgebras/Hopf algebras, which in certain cases carry some extra structure (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
26
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
27
+ page_content=' a Poisson Hopf algebra structure as in [2]), seem to play a prominent role in solving other seemingly unrelated problems such as the classification of gradings on various kinds of algebras ([4, 12]), the description of the automorphisms group of certain algebraic structures ([4]) and even in quantum Galois 2010 Mathematics Subject Classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
28
+ page_content=' 16D90, 16T05, 17A32, 17B10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
29
+ page_content=' Key words and phrases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
30
+ page_content=' universal module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
31
+ page_content=' This work was supported by a grant of Romanian Ministry of Research, Innovation and Digitization, CNCS/CCCDI – UEFISCDI, project number PN-III-P4-ID-PCE-2020-0458, within PNCDI III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
32
+ page_content=' 1 2 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
33
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
34
+ page_content=' AGORE theory ([6]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
35
+ page_content=' Another related universal (co)acting construction was considered in [3] as the Hopf algebraic analogue of the universal group of a grading and its connections to the problem of classifying Hopf algebra coactions have been highlighted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
36
+ page_content=' One of the most general constructions of universal (co)acting bialgebras/Hopf algebras, performed in the setting of Ω-algebras, was introduced in [1] together with generalized duality results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
37
+ page_content=' Necessary and sufficient conditions for the existence of universal coacting bialgebras/Hopf algebras are provided, explaining in this general setting the need for assuming finite dimensionality in both Manin and Tambara’s papers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
38
+ page_content=' It is worth to point out that both Sweedler and Manin-Tambara’s constructions have a categorical interpretation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
39
+ page_content=' More precisely, for Tambara’s construction one considers the left adjoint, say a(A, −), of the tensor product endofunctor A ⊗ − on the category of k-algebras, where A is a finite dimensional associative algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
40
+ page_content=' Tambara’s universal coacting bialgebra is precisely a(A, A) which turns out to be naturally endowed with a bialgebra structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
41
+ page_content=' Similarly, for an arbitrary associative algebra A, it can be proved that the contravariant functor Hom(−, A) taking coalgebras to (convolution) algebras has a right adjoint which hereafter we denote by M(A, −).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
42
+ page_content=' As before, Sweedler’s uni- versal measuring bialgebra is exactly M(A, A) which again has a bialgebra structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
43
+ page_content=' In this paper we deal with the representation theoretic version of Manin-Tambara’s con- struction in the Lie algebra setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
44
+ page_content=' Our approach is a categorical one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
45
+ page_content=' More precisely, given two fixed Lie algebras g and h, with h finite dimensional, and the corresponding universal algebra A = A(h, g) (see[4]), we first show that the tensor product between an A-module U and a Lie h-module V can be endowed with a Lie g-module structure (Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
46
+ page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
47
+ page_content=' As a consequence, we are able to construct two ”tensor product” func- tors between the categories of Lie modules over h and g and respectively between the category of A-modules and the category of Lie g-modules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
48
+ page_content=' Under the appropriate finite dimensionality assumptions, the two functors mentioned above are proved to admit left adjoints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
49
+ page_content=' These left adjoints are given precisely by what we have called the universal Lie h-module and the universal A-module, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
50
+ page_content=' The two universal modules are introduced in a constructive manner in Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
51
+ page_content='4 and Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
52
+ page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
53
+ page_content=' These are the counterparts for Lie and associative representations of Manin-Tambara’s constructions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
54
+ page_content=' Furthermore, the two aforementioned pairs of adjoint functors allow us to travel both ways between the representation categories of different algebraic structures, such as Lie and associative algebras, and to transfer certain properties which are usually preserved by left/right adjoints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
55
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
56
+ page_content=' Preliminaries This section will be used mostly as an opportunity to fix some notation and to provide certain useful references.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
57
+ page_content=' Let us start with a few words on notation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
58
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
59
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
60
+ page_content=' Notational conventions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
61
+ page_content=' All vector spaces, (bi)linear maps, unadorned tensor products, Lie or associative algebras, bialgebras and so on are over an arbitrary com- mutative field k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
62
+ page_content=' All (co)associative (co)algebras are assumed to be (co)unital.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
63
+ page_content=' The notation employed for coalgebras is standard: ∆ stands for the comultiplication and ε UNIVERSAL MODULES 3 for the counit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
64
+ page_content=' We use Sweedler’s notation with implied summation for both coalge- bras (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
65
+ page_content=' bialgebras), as in ∆(c) = c(1) ⊗ c2, and for comodule structures: a right C-comodule structure ρ on a vector space V will be denoted by ρ(v) = v(0) ⊗v(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
66
+ page_content=' When we need to be precise, the structures involved will be adorned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
67
+ page_content=' δij denotes Kronecker’s symbol while IdX stands for the identity map on the set X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
68
+ page_content=' In the sequel, k[Xsi |s = 1, · · · , n, i ∈ I] denotes the usual polynomial algebra on vari- ables Xsi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
69
+ page_content=' We shall denote by Liek and ComAlgk the categories of Lie and commutative associative algebras, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
70
+ page_content=' Given an associative algebra A and a Lie algebra g we denote by AM and gLM the categories of left A-modules and left Lie g-modules, re- spectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
71
+ page_content=' Recall that a (left) Lie g-module is a vector space V equipped with a bilinear map ⇀: g × V → V such that for all x, y ∈ g and v ∈ V we have: [x, y] ⇀ v = x ⇀ (y ⇀ v) − y ⇀ (x ⇀ v).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
72
+ page_content=' Throughout the paper, g and h will denote two arbitrary Lie algebras with h finite dimensional.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
73
+ page_content=' Let {fi | i ∈ I} and {e1, · · · , en} be two fixed basis in g and h, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
74
+ page_content=' We consider {τ s i,j | i, j, s = 1, · · · , n} to be the structure constants of h, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
75
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
76
+ page_content=' for any i, j = 1, · · · , n we have: [ei, ej]h = n � s=1 τ s i,j es.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
77
+ page_content=' (1) Similarly, for any i, j ∈ I, let Bi,j ⊆ I be a finite subset of I such that for any i, j ∈ I we have: [fi, fj]g = � u∈Bi,j βu i,j fu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
78
+ page_content=' (2) 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
79
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
80
+ page_content=' The universal algebra of h and g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
81
+ page_content=' We recall briefly, for further use, the con- struction of the universal commutative algebra A(h, g) of two given Lie algebras h and g (recall that h is always assumed to be finite dimensional).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
82
+ page_content=' It was first introduced in [4] in the more general setting of Leibniz algebras as the counterpart of Tambara’s con- struction ([16]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
83
+ page_content=' We restrict here to the Lie algebra version of the construction which can be summarized as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
84
+ page_content=' We have: A(h, g) := k[Xsi |s = 1, · · · , n, i ∈ I]/J (3) where J is the ideal generated by all polynomials of the form P (h, g) (a,i,j) := � u∈Bi,j βu i,j Xau − n � s,t=1 τ a s,t XsiXtj, for all a = 1, · · · , n and i, j ∈ I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
85
+ page_content=' (4) When working in the universal algebra A(h, g), we denote by xsi := � Xsi the class of Xsi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
86
+ page_content=' Consequently, the following relations hold in A(h, g): � u∈Bi,j βu i,j xau = n � s,t=1 τ a s,t xsixtj, for all a = 1, · · · , n, and i, j ∈ I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
87
+ page_content=' (5) When the (���nite dimensional) Lie algebra h is fixed, the universal algebra construction gives rise to a functor A(h, −): Liek → ComAlgk which turns out to be the left adjoint 4 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
88
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
89
+ page_content=' AGORE of the tensor product h ⊗ −: ComAlgk → Liek (see [4, Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
90
+ page_content='1]), where for any commutative algebra X the tensor product h ⊗ X is endowed with the current Lie algebra structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
91
+ page_content=' In order to avoid dealing with cumbersome notation, when there is no fear of confusion, we denote A = A(h, g).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
92
+ page_content=' Furthermore, If h = g, then the corresponding universal algebra A(h, h) will be denoted simply by B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
93
+ page_content=' The notation is meant to highlight the fact that B is a bialgebra;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
94
+ page_content=' in fact, it admits a unique bialgebra structure such that h becomes a right B-comodule with respect to ηh : h → h⊗B where η: 1Liek → h⊗A(h, −) denotes the unit of the adjunction between A(h, −) and h ⊗ −.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
95
+ page_content=' More precisely, the comultiplication and the counit on B are given for any i, j = 1, · · · , n by ∆(xij) = n � s=1 xis ⊗ xsj and ε(xij) = δi,j1k (6) For basic categorical concepts we refer the reader to [10] and for unexplained notions pertaining to Lie and Hopf algebras to [9] and [15], respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
96
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
97
+ page_content=' Universal modules Our first important result provides a way of defining a Lie g-module structure on the tensor product between a Lie h-module and an A-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
98
+ page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
99
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
100
+ page_content=' Let (U, ↷) ∈ hLM be a Lie h-module and (V, ·) ∈ AM an A-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
101
+ page_content=' Then (U ⊗ V, ⇀) ∈ gLM is a Lie g-module where the action of g on U ⊗ V is given for all i ∈ I, l ∈ U and t ∈ V by: fi ⇀ (l ⊗ t) = n � j=1 (ej ↷ l) ⊗ (xji · t) (7) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
102
+ page_content=' Indeed,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
103
+ page_content=' having in mind that (U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
104
+ page_content=' ↷) is a Lie module and A = A(h,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
105
+ page_content=' g) is a com- mutative algebra,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
106
+ page_content=' we have: [fi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
107
+ page_content=' fj] ⇀ (l ⊗ t) (2) = � u∈Bi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
108
+ page_content='j βu i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
109
+ page_content='j fu ⇀ (l ⊗ t) (7) = � u∈Vi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
110
+ page_content='j,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
111
+ page_content='r=1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
112
+ page_content='n βu i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
113
+ page_content='j (er ↷ l) ⊗ (xru · t) = � r=1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
114
+ page_content='n (er ↷ l) ⊗ � � u∈Bi,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
115
+ page_content='j βu i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
116
+ page_content='j xru � t (5) = � s,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
117
+ page_content='p,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
118
+ page_content='r=1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
119
+ page_content='n τ r s,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
120
+ page_content='p (er ↷ l) ⊗ (xsixpj) · t = � s,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
121
+ page_content='p=1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
122
+ page_content='n � n � r=1 τ r s,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
123
+ page_content='p er � ↷ l ⊗ (xsixpj) · t (1) = � s,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
124
+ page_content='p=1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
125
+ page_content='n [es,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
126
+ page_content=' ep] ↷ l ⊗ (xsixpj) · t = � s,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
127
+ page_content='p=1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
128
+ page_content='n es ↷ (ep ↷ l) ⊗ xsi · (xpj · t) − � s,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
129
+ page_content='p=1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
130
+ page_content='n ep ↷ (es ↷ l) ⊗ xpj · (xsi · t) (7) = fi ⇀ n � p=1 (ep ↷ l) ⊗ (xpj · t) − fj ⇀ n � s=1 (es ↷ l) ⊗ (xsi · t) (7) = fi ⇀ � fj ⇀ (l ⊗ t) � − fj ⇀ � fi ⇀ (l ⊗ t) � UNIVERSAL MODULES 5 for all i,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
131
+ page_content=' j ∈ I and l ∈ U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
132
+ page_content=' t ∈ V ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
133
+ page_content=' i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
134
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
135
+ page_content=' (U ⊗ V, ⇀) is a left Lie g-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
136
+ page_content=' □ Inspired by Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
137
+ page_content='1 we can consider two types of universal modules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
138
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
139
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
140
+ page_content=' The universal A-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
141
+ page_content=' The first such universal module is associated with a Lie h-module and a Lie g-module as follows: Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
142
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
143
+ page_content=' Given a Lie h-module U and a Lie g-module Z,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
144
+ page_content=' the universal A-module of U and Z is a pair � U(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
145
+ page_content=' Z),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
146
+ page_content=' ρU(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
147
+ page_content=' Z) � consisting of an A-module U(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
148
+ page_content=' Z) and a mor- phism of Lie g-modules ρU(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
149
+ page_content=' Z) : Z → U ⊗ U(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
150
+ page_content=' Z) such that for any other pair (X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
151
+ page_content=' f) consisting of an A-module X and a morphism of Lie g-modules f : Z → U ⊗X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
152
+ page_content=' there ex- ists a unique morphism of A-modules g: U(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
153
+ page_content=' Z) → X such that the following diagram is commutative: Z ρU(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
154
+ page_content=' Z) � f �❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ U ⊗ U(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
155
+ page_content=' Z) IdU⊗g � U ⊗ X (8) In other words,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
156
+ page_content=' the above definition is saying that,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
157
+ page_content=' when it exists,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
158
+ page_content=' the universal A-module of U and Z is in fact the initial object of the category whose objects are pairs (X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
159
+ page_content=' f) consisting of an A-module X and a morphism of Lie g-modules f : Z → U ⊗ X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
160
+ page_content=' while morphisms between two such objects (X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
161
+ page_content=' f) and (X′,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
162
+ page_content=' f ′) are defined to be A-module maps g: X → X′ satisfying (IdU ⊗ g) ◦ f = f ′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
163
+ page_content=' As direct consequences of the above definition, we obtain the following: Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
164
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
165
+ page_content=' Let U be a Lie h-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
166
+ page_content=' Then, for all Lie g-modules Z and all A- modules X, we have a bijective correspondence between: (1) Lie g-module maps f : Z → U ⊗ X;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
167
+ page_content=' (2) A-module maps g: U(U, Z) → X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
168
+ page_content=' Under the appropiate finite-dimensionality assumptions required for all Manin-Tambara type constructions, the universal A-module introduced in Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
169
+ page_content='2 exists: Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
170
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
171
+ page_content=' If U is a finite dimensional Lie h-module then the universal A -module of U and any other Lie g-module Z exists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
172
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
173
+ page_content=' Let {u1, · · · , um}, m ∈ N∗, be a k-basis of the Lie module U and denote by ωt ij ∈ k the structure constants of U with respect to its Lie h-module structure ↷, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
174
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
175
+ page_content=' for all i = 1, · · · , n, j = 1, · · · , m we have: ei ↷ uj = m � s=1 ωs i,j us (9) Furthermore, consider {zr | r ∈ J} to be a k-basis for the arbitrary Lie g-module Z and if ↬ denotes its Lie module structure, then for all j ∈ I and r ∈ J we can find a finite 6 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
176
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
177
+ page_content=' AGORE subset Tj,r of J such that: fj ↬ zr = � l∈Tj,r ηl j,r zl (10) where ηl j,r ∈ k for all j ∈ I, r ∈ J, and l ∈ Tj,r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
178
+ page_content=' Consider now T (U, Z) to be the free A-module on the set {Yij | i = 1, · · · , m, j ∈ J} and denote by U(U, Z) the quotient of T (U, Z) by its A-submodule generated by the following elements: � p∈Tj,i ηp j,i Ysp − m � t=1 n � r=1 ωs r,t xrj • Yti (11) for all s = 1, · · · , m, i ∈ J and j ∈ I, where • denotes the A-module action on T (U, Z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
179
+ page_content=' Denoting ytj := � Ytj, where � Ytj stands for the equivalence class of Ytj in the quotient module U(U, Z), it follows that the relations below hold in U(U, Z): � p∈Tj,i ηp j,i ysp = m � t=1 n � r=1 ωs r,t xrj • yti (12) for all s = 1, · · · , m, i ∈ J and j ∈ I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
180
+ page_content=' Furthermore, we can define a morphism of Lie g-modules ρU(U, Z): Z → U ⊗ U(U, Z) as follows: ρU(U, Z)(zr) := m � s=1 us ⊗ ysr, for all r ∈ J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
181
+ page_content=' (13) It follows now that for all j ∈ I and i ∈ J we have: ρU(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
182
+ page_content=' Z)(fj ↬ zi) (10) = ρU(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
183
+ page_content='Z) � � p∈Tj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
184
+ page_content='i ηp ji zp � = � p∈Tj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
185
+ page_content='i m � s=1 ηp ji us ⊗ ysp = m � s=1 � us ⊗ � p∈Tj,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
186
+ page_content='i ηp ji ysp � (12) = m � s,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
187
+ page_content='t=1 n � r=1 ωs r,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
188
+ page_content='t us ⊗ xrj • yti = m � t=1 n � r=1 � m � s=1 ωs r,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
189
+ page_content='t us � ⊗ xrj • yti (9) = m � t=1 n � r=1 er ↷ ut ⊗ xrj • yti (7) = m � t=1 fj ⇀ (ut ⊗ yti) = fj ⇀ m � t=1 ut ⊗ yti (13) = fj ⇀ ρU(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
190
+ page_content=' Z)(zi) which shows that ρU(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
191
+ page_content=' Z) is indeed a Lie g-modules map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
192
+ page_content=' We will show that the pair � U(U, Z), ρU(U, Z) � constructed above is in fact the universal A-module of U and Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
193
+ page_content=' To this end, consider a pair (X, f) consisting of an A-module X and a morphism of Lie g-modules f : Z → U ⊗ X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
194
+ page_content=' Let {wsr | s = 1, · · · , m, r ∈ J} be a family of elements of X such that for all r ∈ J we have: g(zr) = m � s=1 us ⊗ wsr (14) UNIVERSAL MODULES 7 Furthermore, as g: Z → U ⊗ X is a Lie g-modules map, a straightforward computation shows that the following compatibilities hold for all s = 1, · · · , m, i ∈ J and j ∈ I: � p∈Tj,i ηp j,i wsp = m � t=1 n � r=1 ωs r,t xrj · wti (15) where · denotes the A-module action on X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
195
+ page_content=' The universal property of the free module yields a unique A-module map g: T (U, Z) → X such that g(Ysr) = wsr, for all s = 1, · · · , m and r ∈ J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
196
+ page_content=' Moreover, Ker(g) contains the A- submodule of T (U, Z) generated by the elements listed in (11).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
197
+ page_content=' Indeed, as g : U(U, Z) → X is a morphism of A-modules we have: g � � p∈Tj,i ηp j,i Ysp − m � t=1 n � r=1 ωs r,t xrj • Yti � = � p∈Tj,i ηp j,i wsp − m � t=1 n � r=1 ωs r,t xrj · wti (15) = 0 for all s = 1, · · · , m, i ∈ J and j ∈ I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
198
+ page_content=' This shows that there exists a unique A-modules map g: U(U, Z) → X such that g(ysr) = zsr, for all s = 1, · · · , m and r ∈ J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
199
+ page_content=' This implies that for all r ∈ J we have: � IdU ⊗ g � ρU(U, Z)(zr) = � IdU ⊗ g �� m � s=1 us ⊗ ysr � = m � s=1 us ⊗ wsr (14) = g(zr) which means precisely that diagram (8) is commutative.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
200
+ page_content=' Moreover, g is obviously the unique A-modules map with this property and the proof is now finished.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
201
+ page_content=' □ The case g = h.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
202
+ page_content=' Particularizing the results of Section 2 for g = h, where h is the finite dimensional Lie algebra defined in (1), leads to the following interesting consequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
203
+ page_content=' According to the discussion in Preliminaries, the universal algebra A(h, h) denoted by B is in this case a bialgebra with coalgebra structure depicted in (6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
204
+ page_content=' This allows us to see the tensor product U(U, Z) ⊗ U(U, Z) as well as the base field k as B-modules via the comultiplication and the counit of B as follows: xij ∗ (y ⊗ t) = n � t=1 xit • y ⊗ xtj • t (16) xij · α = δijα (17) for all xij ∈ B, y, t ∈ U(U, Z) and α ∈ k, where • denotes the B-module strucuture on U(U, Z) as in the proof of Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
205
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
206
+ page_content=' First we show that if U is a finite dimensional Lie h-module as considered in (9), then the B-module U(U, U) denoted by U(U) admits a coalgebra structure with respect to which � U, ρU(U) � becomes a right U(U)-comodule.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
207
+ page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
208
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
209
+ page_content=' Let U be a finite dimensional Lie h-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
210
+ page_content=' There exists a unique coalgebra structure on U(U) such that � U, ρU(U) � becomes a right U(U)-comodule.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
211
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
212
+ page_content=' In particular both U(U) ⊗ U(U) and k are B-modules via the formulas (16) and (17) respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
213
+ page_content=' Therefore, U ⊗ U(U) ⊗ U(U) and U ⊗ k are Lie h-modules via (7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
214
+ page_content=' Furthermore, it can be easily checked that the maps � ρU(U) ⊗ IdU(U) � ρU(U) : U → U ⊗ 8 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
215
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
216
+ page_content=' AGORE U(U) ⊗ U(U) and canU : U → U ⊗ k are morphisms of Lie h-modules, where canU : U → U ⊗ k is the canonical isomorphism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
217
+ page_content=' Now Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
218
+ page_content='2 yields a unique B-modules map ∆: U(U) → U(U) ⊗ U(U) such that the following diagram is commutative: U ρU(U) � � ρU(U)⊗IdU(U) � ρU(U) �❆ ❆ ❆ ❆ ❆ ❆ ❆ ❆ ❆ ❆ ❆ ❆ ❆ ❆ ❆ ❆ ❆ U ⊗ U(U) IdU ⊗∆ � U ⊗ U(U) ⊗ U(U) Similarly,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
219
+ page_content=' we obtain a unique B-modules map ε: U(U) → k such that the following diagram is commutative: U ρU(U) � canU �■ ■ ■ ■ ■ ■ ■ ■ ■ ■ U ⊗ U(U) IdU⊗ε � U ⊗ k A straightforward computation shows that the commutativity of the two diagrams above imply that ∆ and ε take the following form for all l,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
220
+ page_content=' t = 1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
221
+ page_content=' · · · ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
222
+ page_content=' m: ∆(ylt) = m � s=1 yls ⊗ yst,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
223
+ page_content=' ε(ylt) = δlt1k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
224
+ page_content=' It is now obvious that � U(U), ∆, ε � form a coalgebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
225
+ page_content=' Finally, by the commutativity of the two diagrams above we obtain that � U, ρU(U) � is a right U(U)-comodule.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
226
+ page_content=' □ Remark 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
227
+ page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
228
+ page_content=' It is worth pointing out that with the coalgebra structure introduced above, U(U) becomes a B-module coalgebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
229
+ page_content=' Indeed, having in mind that both ∆ and ε are B-module maps, we have: ∆(xab • ylt) = xab ∗ ∆(ylt) = xab ∗ � m � s=1 yls ⊗ yst �(16) = n � c=1 m � s=1 xac • yls ⊗ xcb • yst = (xab)(1) • (ylt)(1) ⊗ (xab)(2) • (ylt)(2) and ε(xab • ylt) = xab · ε(ylt) (17) = δab ε(ylt) = ε(xab) ε(ylt).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
230
+ page_content=' This shows that • is a coalgebra map, as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
231
+ page_content=' It turns out that the pair � U(U), ρU(U) � is universal in the following way: Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
232
+ page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
233
+ page_content=' For any coalgebra X with a B-module structure and any Lie h-module morphism ψ: U → U ⊗X which makes U into a right X-comodule, there exists a unique UNIVERSAL MODULES 9 B-modules and coalgebra morphism θ: U(U) → X such that the following diagram is commutative: U ρU(U) � ψ �■ ■ ■ ■ ■ ■ ■ ■ ■ ■ U ⊗ U(U) IdU ⊗θ � U ⊗ X Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
234
+ page_content=' In light of Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
235
+ page_content='2, such a unique A-modules map θ exists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
236
+ page_content=' We are left to show that θ is also a coalgebra map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
237
+ page_content=' From the proof of Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
238
+ page_content='4 we know that θ is defined for all l, t = 1, · · · , m by θ(ylt) = zlt where zlt are elements of X such that for all r = 1, · · · , m we have ψ(ur) = �m s=1 us ⊗ zsr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
239
+ page_content=' As (U, ψ) is a right comodule, we obtain: ∆(zlt) = m � s=1 zls ⊗ zst, ε(zlt) = δlt1k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
240
+ page_content=' To this end, we have: ∆ � θ(ylt) � = ∆(zlt) = m � s=1 zls ⊗ zst = m � s=1 θ(yls) ⊗ θ(yst) = (θ ⊗ θ) ◦ ∆(ylt) Similarly one can check that ε◦θ = ε which shows that θ is indeed a coalgebra map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
241
+ page_content=' □ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
242
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
243
+ page_content=' The universal h-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
244
+ page_content=' The second type of universal module one can consider is the following: Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
245
+ page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
246
+ page_content=' Given an A-module V and a Lie g-module W,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
247
+ page_content=' the universal Lie h- module of V and W is a pair � V(V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
248
+ page_content=' W),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
249
+ page_content=' τV(V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
250
+ page_content=' W ) � consisting of a Lie h-module V(V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
251
+ page_content=' W) and a morphism of Lie g-modules τV(V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
252
+ page_content=' W ): W → V(V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
253
+ page_content=' W)⊗V such that for any other pair (Y,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
254
+ page_content=' f) consisting of a Lie h-module Y and a morphism of Lie g-modules f : W → Y ⊗ V ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
255
+ page_content=' there exists a unique morphism of Lie h-modules g: V(V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
256
+ page_content=' W) → Y such that the following diagram is commutative: W τV(V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
257
+ page_content=' W ) � f �❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ ❘ V(V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
258
+ page_content=' W) ⊗ V g⊗IdV � Y ⊗ V (18) The universal Lie h-module of V and W,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
259
+ page_content=' when it exists,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
260
+ page_content=' can again be seen as the initial object of the category whose objects are pairs (Y,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
261
+ page_content=' f) consisting of a Lie h-module Y and a morphism of Lie g-modules f : W → Y ⊗ V ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
262
+ page_content=' while morphisms between two such objects (Y,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
263
+ page_content=' f) and (Y ′,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
264
+ page_content=' f ′) are defined to be Lie h-module maps g: Y → Y ′ satisfying (g ⊗ IdV ) ◦ f = f ′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
265
+ page_content=' Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
266
+ page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
267
+ page_content=' Let V be an A-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
268
+ page_content=' Then, for all Lie g-modules W and all Lie h-modules Y , we have a bijective correspondence between: (1) Lie g-module maps f : W → Y ⊗ V ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
269
+ page_content=' (2) Lie h-module maps g: V(V, W) → Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
270
+ page_content=' 10 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
271
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
272
+ page_content=' AGORE The universal h-module introduced in Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
273
+ page_content='8 also exists provided that the A- module V is finite dimensional.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
274
+ page_content=' Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
275
+ page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
276
+ page_content=' If V is a finite dimensional A-module then the universal Lie h-module of V and any other Lie g-module W exists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
277
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
278
+ page_content=' As this proof is somewhat similar in spirit with the one of Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
279
+ page_content='4, we will be brief and provide only the main ingredients required for the construction of the universal Lie h-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
280
+ page_content=' Let {v1, · · · , vl}, l ∈ N∗, be a k-basis of the finite dimensional A-module V and denote by γt r,i,j ∈ k the structure constants of V with respect to its A-module structure ·, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
281
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
282
+ page_content=' for all r = 1, · · · , n, i ∈ I and j = 1, · · · , l we have: xri · vj = l � s=1 γs r,i,j vs (19) Consider {wr | r ∈ T} to be a k-basis for W and if ⊲ denotes its Lie g-module structure, then for all j ∈ I and r ∈ T we can find a finite subset Sj,r of T such that: fj ⊲ wr = � p∈Sj,r σp j,r wp (20) where σp j,r ∈ k for all j ∈ I, r ∈ T, and p ∈ Sj,r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
283
+ page_content=' Now let S(V, W) be the free Lie h-module on the set {Yri | r ∈ T, i = 1, · · · , l} and denote by V(V, W) the quotient of S(V, W) by its Lie h-submodule generated by the following elements: � p∈Sj,r σp j,r Yps − l � k=1 n � p=1 γs p,j,k ep ◮ Yrk (21) for all s = 1, · · · , l, r ∈ T and j ∈ I, where ◮ denotes the h-module action on S(V, W).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
284
+ page_content=' By denoting yri := � Yri, where � Yri stands for the equivalence class of Yri in the quotient module V(V, W), it follows that the relations below hold in V(V, W): � p∈Sj,r σp j,r yps = l � k=1 n � t=1 γs t,j,k et ◮ yrk (22) for all s = 1, · · · , l, r ∈ T and j ∈ I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
285
+ page_content=' It can now be easily seen, as in the proof of Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
286
+ page_content='4, that the pair (V(V, W), τV(V, W )) is the universal Lie h-module of V and W, where τV(V, W ): W → V(V, W) ⊗ V is the morphism of Lie g-modules defined for all r ∈ T as follows: τV(V, W )(wr) := l � s=1 yrs ⊗ vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
287
+ page_content=' (23) □ UNIVERSAL MODULES 11 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
288
+ page_content=' Functors between module categories In this section we show that the two universal module constructions previously introduced are functorial and, moreover, if certain conditions are fulfilled the corresponding functors admit right adjoints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
289
+ page_content=' We start, however, by stating the following easy consequence of Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
290
+ page_content='1: Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
291
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
292
+ page_content=' Let (U, ↷) ∈ hLM and (V, ·) ∈ AM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
293
+ page_content=' Then: 1) We have a functor U ⊗ −: AM → gLM from the category of A-modules to the category of Lie g-modules;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
294
+ page_content=' 2) We have a functor − ⊗ V : hLM → gLM between the categories of Lie modules over h and g respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
295
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
296
+ page_content=' In light of Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
297
+ page_content='1, we are only left to show that morphisms behave well with respect to the corresponding associative or Lie module structures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
298
+ page_content=' We will treat only the first statement and leave the second one to the reader.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
299
+ page_content=' To this end, consider (V, ·) and (V ′, •) two A-modules, ⇀ and ⇀′ the corresponding induced Lie g-module actions via (7) and g: V → V ′ a morphism in AM .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
300
+ page_content=' Then, for all i ∈ I, l ∈ U and t ∈ V we have: (IdU ⊗ g) � fi ⇀ (l ⊗ t) �(7) = n � j=1 (ej ↷ l) ⊗ g(xji · t) = n � j=1 (ej ↷ l) ⊗ xji • g(t) (7) = fi ⇀′ � l ⊗ g(t) � □ We consider now the universal module functors: Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
301
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
302
+ page_content=' Let U be a finite dimensional Lie h-module and V a finite dimensional A-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
303
+ page_content=' (1) There exists a functor UU : gLM → AM defined as follows for all Lie g-modules X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
304
+ page_content=' Y and all morphisms f : X → Y in gLM: UU(X) = U(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
305
+ page_content=' X),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
306
+ page_content=' UU(f) = f where f : U(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
307
+ page_content=' X) → U(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
308
+ page_content=' Y ) is the unique A-modules morphism which makes the following diagram commutative: X ρU(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
309
+ page_content=' X) � ρU(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
310
+ page_content=' Y )◦f �◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ U ⊗ U(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
311
+ page_content=' X) IdU⊗f � U ⊗ U(U,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
312
+ page_content=' Y ) (24) (2) There exists a functor VV : gLM → hLM defined as follows for all Lie g-modules X,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
313
+ page_content=' Y and all morphisms f : X → Y in gLM: VV (X) = V(V,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
314
+ page_content=' X),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
315
+ page_content=' VV (f) = f 12 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
316
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
317
+ page_content=' AGORE where f : V(V, X) → V(V, Y ) is the unique morphism of Lie h-modules which makes the following diagram commutative: X τV(V, X) � τV(V, Y )◦f �◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ V(V, X) ⊗ V f⊗IdV � V(V, Y ) ⊗ V (25) Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
318
+ page_content=' As the result follows in a straightforward manner by a standard category the- ory argument, we only sketch the proof of the first assertion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
319
+ page_content=' Indeed, if f = IdX then IdU(U, X) is obviously the unique A-modules morphism which makes diagram (24) com- mute and therefore UU(IdX) = IdU(U, X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
320
+ page_content=' Moreover, if f : X → Y and g: Y → W are two morphisms in gLM, then g ◦f : U(U, X) → U(U, W) is obviously the unique A-modules morphism which makes the following diagram commutative: Z ρU(U, X) � ρU(U, W )◦g◦f �◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ ◗ U ⊗ U(U, X) IdU⊗ � g◦f � � U ⊗ U(U, W) and we can conclude that UU(g ◦ f) = UU(g) ◦ UU(f), as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
321
+ page_content=' □ Under the appropriate finite-dimensionality assumptions, the functors constructed in Proposition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
322
+ page_content='1 are right adjoints to the universal module functors: Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
323
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
324
+ page_content=' Let (U, ↷) be a finite dimensional Lie h-module and (V, ·) a finite di- mensional A-module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
325
+ page_content=' Then: 1) The following functors form an adjunction: UU : gLM → AM, U ⊗ −: AM → gLM;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
326
+ page_content=' 2) Similarly, the following functors also form an adjunction: VV : gLM → hLM, − ⊗ V : hLM → gLM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
327
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
328
+ page_content=' 1) As pointed out in Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
329
+ page_content='3, for all Lie g-modules Z and all A-modules X, there is a bijection between HomAM � UU(Z), X � and HomgLM (Z, U ⊗ X) given as follows for all morphisms of A-modules θ: UU(Z) → X: ΓZ,X : HomAM (UU(Z), X) → HomgLM (Z, U ⊗ X), ΓZ,X(θ) = (IdU ⊗ θ) ◦ ρU(U, Z).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
330
+ page_content=' The desired conclusion now follows by showing that the above bijection is natural in both variables.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
331
+ page_content=' This can be easily proved by a straightforward diagram chase and is left to the reader.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
332
+ page_content=' 2) Using now Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
333
+ page_content='9, for all Lie g-modules W and all Lie h-modules Z, we obtain a bijection between HomhLM � VV (W), Z � and HomgLM (W, Z ⊗ V ) defined as follows UNIVERSAL MODULES 13 for all morphisms of Lie h-modules θ: VV (W) → Z: ΓW,Z : HomhLM � VV (W), Z � → HomgLM (W, Z ⊗ V ), ΓW,Z(θ) = (θ ⊗ IdV ) ◦ ρV(V, W ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
334
+ page_content=' □ In particular, the two pairs of adjoint functors allow us to travel both ways between the representation categories of the two (arbitrary) Lie algebras h and g and respectively between the representation category of the associative algebra A and the representation category of the Lie algebra g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
335
+ page_content=' Example 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
336
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
337
+ page_content=' Let ρi : g ⊗ Wi → Wi be representations of g, where i = 1, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
338
+ page_content=' By the colimit preservation property of left adjoints we can easily conclude that for any finite dimensional Lie h-module U, UU(W1) ⊕ UU(W2) is the direct sum of the A-modules UU(W1) and UU(W2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
339
+ page_content=' Similarly, for any finite dimensional A-module V , VV (W1) ⊕ UU(W2) is the direct sum of the Lie h-modules UU(W1) and UU(W2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
340
+ page_content=' This can be easily extended to an arbitrary family of representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
341
+ page_content=' References [1] Agore, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
342
+ page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
343
+ page_content=', Gordienko, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
344
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
345
+ page_content=', Vercruysse, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
346
+ page_content=' - V -universal Hopf algebras (co)acting on Ω-algebras, Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
347
+ page_content=' Contemp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
348
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
349
+ page_content=' 25 (2023), 2150095.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
350
+ page_content=' [2] Agore, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
351
+ page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
352
+ page_content=' - Universal coacting Poisson Hopf algebras, Manuscripta Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
353
+ page_content=' 165 (2021), 255–268.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
354
+ page_content=' [3] Agore, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
355
+ page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
356
+ page_content=', Gordienko, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
357
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
358
+ page_content=', Vercruysse, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
359
+ page_content=' - Equivalences of (co)module algebra structures over Hopf algebras, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
360
+ page_content=' Noncommut.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
361
+ page_content=' Geom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
362
+ page_content=', 15 (2021), 951–993.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
363
+ page_content=' [4] Agore, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
364
+ page_content='L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
365
+ page_content=', Militaru, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
366
+ page_content=' - A new invariant for finite dimensional Leibniz/Lie algebras, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
367
+ page_content=' Algebra 562 (2020), 390–409.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
368
+ page_content=' [5] Ardizzoni, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
369
+ page_content=', El Kaoutit, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
370
+ page_content=', Menini, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
371
+ page_content=' - Categories of comodules and chain complexes of modules, Internat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
372
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
373
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
374
+ page_content=' 23 (2012), 1250109 [6] Bhattacharjee, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
375
+ page_content=', Chirvˇasitu, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
376
+ page_content=', Goswami, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
377
+ page_content=' - Quantum Galois groups of subfactors, Internat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
378
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
379
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
380
+ page_content=' 33 (2022), 2250013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
381
+ page_content=' [7] Chirvˇasitu, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
382
+ page_content=', Walton, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
383
+ page_content=', Wang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
384
+ page_content=' - On quantum groups associated to a pair of preregular forms, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
385
+ page_content=' Noncommut.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
386
+ page_content=' Geom.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
387
+ page_content=' bf 13 (2019), 115—159.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
388
+ page_content=' [8] Hyland, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
389
+ page_content=', Lopez Franco, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
390
+ page_content=', Vasilakopoulou, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
391
+ page_content=' - Hopf measuring comonoids and enrichment, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
392
+ page_content=' Lond.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
393
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
394
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
395
+ page_content=' 115 (2017), 1118—1148.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
396
+ page_content=' [9] Jacobson, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
397
+ page_content=' – Lie algebras, Dover Publications, NY, 1962.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
398
+ page_content=' [10] Mac Lane, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
399
+ page_content=' - Categories for the Working Mathematician, Graduate Texts in Mathematics 5, Springer, 1998.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
400
+ page_content=' [11] Manin, Yu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
401
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
402
+ page_content=' - Quantum groups and noncommutative geometry, Universite de Montreal, Centre de Recherches Mathematiques, Montreal, QC, 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
403
+ page_content=' [12] Militaru, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
404
+ page_content=' - The automorphisms group and the classification of gradings of finite dimensional associative algebras, Results Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
405
+ page_content=' 77 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
406
+ page_content=' [13] Raedschelders, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
407
+ page_content=', Van den Bergh, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
408
+ page_content=' - The Manin Hopf algebra of a Koszul Artin-Schelter regular algebra is quasi-hereditary, Adv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
409
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
410
+ page_content=' 305 (2017), 601-–660.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
411
+ page_content=' [14] Rodrıiguez-Romo, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
412
+ page_content=', Taft, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
413
+ page_content=' - Some quantum-like Hopf algebras which remain noncommutative when q = 1, Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
414
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
415
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
416
+ page_content=' 61(2002), 41-–50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
417
+ page_content=' [15] Sweedler, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
418
+ page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
419
+ page_content=' - Hopf Algebras, Benjamin New York, 1969.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
420
+ page_content=' [16] Tambara, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
421
+ page_content=' - The coendomorphism bialgebra of an algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
422
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
423
+ page_content=' Fac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
424
+ page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
425
+ page_content=' Univ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
426
+ page_content=' Tokyo Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
427
+ page_content=' 37 (1990), 425–456.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
428
+ page_content=' 14 A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
429
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
430
+ page_content=' AGORE Vrije Universiteit Brussel, Pleinlaan 2, B-1050 Brussels, Belgium Simion Stoilow Institute of Mathematics of the Romanian Academy, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
431
+ page_content='O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
432
+ page_content=' Box 1-764, 014700 Bucharest, Romania Email address: ana.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
433
+ page_content='agore@gmail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
434
+ page_content='com' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/0NE1T4oBgHgl3EQfRQO7/content/2301.03051v1.pdf'}
1dAyT4oBgHgl3EQfPfaV/content/tmp_files/2301.00026v1.pdf.txt ADDED
@@ -0,0 +1,2071 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Killing Horizons Decohere Quantum Superpositions
2
+ Daine L. Danielson,1, ∗ Gautam Satishchandran,2, 1, † and Robert M. Wald1, ‡
3
+ 1Enrico Fermi Institute and Department of Physics,
4
+ The University of Chicago, 933 East 56th Street, Chicago, Illinois 60637, USA
5
+ 2Princeton Gravity Initiative, Princeton University,
6
+ Jadwin Hall, Washington Road, Princeton NJ 08544, USA
7
+ (Dated: January 3, 2023)
8
+ We recently showed that if a massive (or charged) body is put in a quantum spatial superposition, the
9
+ mere presence of a black hole in its vicinity will eventually decohere the superposition. In this paper
10
+ we show that, more generally, decoherence of stationary superpositions will occur in any spacetime
11
+ with a Killing horizon. This occurs because, in effect, the long-range field of the body is registered
12
+ on the Killing horizon which, we show, necessitates a flux of “soft horizon gravitons/photons”
13
+ through the horizon. The Killing horizon thereby harvests “which path” information of quantum
14
+ superpositions and will decohere any quantum superposition in a finite time. It is particularly
15
+ instructive to analyze the case of a uniformly accelerating body in a quantum superposition in
16
+ flat spacetime. As we show, from the Rindler perspective the superposition is decohered by “soft
17
+ gravitons/photons” that propagate through the Rindler horizon with negligible (Rindler) energy.
18
+ We show that this decoherence effect is distinct from—and larger than—the decoherence resulting
19
+ from the presence of Unruh radiation. We further show that from the inertial perspective, the
20
+ decoherence is due to the radiation of high frequency (inertial) gravitons/photons to null infinity.
21
+ (The notion of gravitons/photons that propagate through the Rindler horizon is the same notion
22
+ as that of gravitons/photons that propagate to null infinity.) We also analyze the decoherence of
23
+ a spatial superposition due to the presence of a cosmological horizon in de Sitter spacetime. We
24
+ provide estimates of the decoherence time for such quantum superpositions in both the Rindler and
25
+ cosmological cases.
26
+ 1.
27
+ INTRODUCTION
28
+ Consider a stationary spacetime in which an experimen-
29
+ talist, Alice, is present. Alice’s lab is stationary, and she
30
+ has control of a charged or massive body (hereinafter re-
31
+ ferred to as a “particle”). She sends her particle through a
32
+ Stern-Gerlach apparatus or other device that puts her par-
33
+ ticle in a quantum superposition of two spatially separated
34
+ states1. She keeps these spatially separated components
35
+ stationary for a time T and then recombines them. Will
36
+ Alice be able to maintain the coherence of these compo-
37
+ nents, so that, when recombined, the final state of her
38
+ particle will be pure—or will decoherence have occurred,
39
+ so that the final state of her particle will be mixed?
40
+ Ordinarily, any decoherence effects will be dominated
41
+ by “environmental influences,” i.e., additional degrees
42
+ of freedom present in Alice’s lab that interact with her
43
+ particle. We assume that Alice has perfect control of her
44
+ laboratory and its environment so that there is no deco-
45
+ herence from ordinary environmental effects. However,
46
+ for a charged or massive particle, Alice cannot perfectly
47
+ control the electromagnetic or gravitational field, since
48
+ her particle acts as a source for these fields and some
49
+ ∗ daine@uchicago.edu
50
+ † gautam.satish@princeton.edu
51
+ ‡ rmwa@uchicago.edu
52
+ 1 Quantum spatial superpositions of massive bodies have been of
53
+ recent interest in both theoretical as well as proposed experimental
54
+ probes of fundamental properties of quantum gravity, e.g., [1–13].
55
+ radiation will be emitted during the portions of her ex-
56
+ periment where she separates and recombines her particle.
57
+ Nevertheless, in Minkowski spacetime, if her lab is sta-
58
+ tionary in the ordinary, inertial sense, she can perform
59
+ her experiment in a sufficiently adiabatic manner that
60
+ negligible decohering radiation is emitted. In principle,
61
+ she can keep the particle separated for an arbitrarily long
62
+ time T and still maintain coherence when the components
63
+ are recombined.
64
+ In a recent paper [14], we showed that the above situ-
65
+ ation changes dramatically if a black hole is present in
66
+ the spacetime—even though the experiment is carried
67
+ out entirely in the black hole’s exterior. In effect, a black
68
+ hole horizon harvests “which path” information about any
69
+ quantum superposition in its exterior, via the long-range
70
+ fields sourced by the superposed matter. We showed that
71
+ this results in the unavoidable radiation of entangling
72
+ “soft photons or gravitons” through the horizon that carry
73
+ the “which path” information into the black hole. Con-
74
+ sequently, the mere presence of the black hole implies a
75
+ fundamental rate of decoherence on the quantum super-
76
+ position2. Although the rate of decoherence will be small
77
+ if the black hole is far away, the coherence decays expo-
78
+ nentially in the time, T, that the spatial superposition
79
+ is maintained. Thus, in any spacetime with a black hole,
80
+ there will be essentially complete decoherence within a
81
+ 2 In QED, this effect applies only to superpositions of charged particles.
82
+ However, since all matter sources gravity, the quantum gravitational
83
+ decoherence applies to all superpositions.
84
+ arXiv:2301.00026v1 [hep-th] 30 Dec 2022
85
+
86
+ 2
87
+ finite time3.
88
+ The purpose of this paper is to generalize the results of
89
+ [14] to spacetimes with Killing horizons, i.e., spacetimes
90
+ with a Killing vector field such that there is a null surface
91
+ to which the Killing field is normal (see, e.g., [15] for a
92
+ discussion of properties of Killing horizons). The event
93
+ horizon of a stationary black hole is a Killing horizon
94
+ [16–18], so spacetimes with Killing horizons encompass
95
+ the case of stationary spacetimes that contain black holes.
96
+ However, there are many cases of interest where Killing
97
+ horizons are present without the presence of black holes.
98
+ One such case is that of Minkowski spacetime, where
99
+ the Rindler horizon is a Killing horizon with respect to
100
+ the Lorentz boost Killing field.
101
+ Another such case is
102
+ de Sitter spacetime, where the cosmological horizon is a
103
+ Killing horizon. We will show that in these cases, a spatial
104
+ superposition that is kept stationary (with respect to the
105
+ symmetry generating the Killing horizon) will decohere
106
+ in a manner similar to the black hole case. We will also
107
+ provide an estimate of the maximum amount of time
108
+ during which coherence can be maintained.
109
+ The case of the Rindler horizon is particularly instruc-
110
+ tive.
111
+ The relevant symmetry here is that of Lorentz
112
+ boosts, so Alice’s lab will be “stationary” if it is uniformly
113
+ accelerating. Our analysis based upon radiation through
114
+ the Rindler horizon shows that decoherence of a uniformly
115
+ accelerating spatially separated superposition occurs be-
116
+ cause of the emission of “soft” (i.e., very low frequency)
117
+ gravitons or photons, where the frequency is defined rel-
118
+ ative to an affine parameter on the Rindler horizon. As
119
+ we shall show, the decoherence effect of this radiation of
120
+ soft gravitons or photons is distinct from the (smaller)
121
+ decoherence effect resulting from the presence of Unruh
122
+ radiation. To gain further insight, we also analyze the
123
+ decohering radiation in the electromagnetic case from the
124
+ inertial point of view, using the Liénard-Wiechert solution
125
+ to determine the radiation at future null infinity. As we
126
+ shall show, the decohering photons are of high frequency
127
+ at null infinity.
128
+ In sec. 2 we provide a general discussion of the deco-
129
+ herence of a quantum superposition due to radiation in a
130
+ stationary spacetime. In sec. 3 we consider the decoher-
131
+ ence of a uniformly accelerating superposition, analyzing
132
+ it from both the Rindler and Minkowski viewpoints. We
133
+ also show that this decoherence is distinct from (and larger
134
+ than) the decoherence effects due to the presence of Un-
135
+ ruh radiation. In sec. 4 we analyze the decoherence in de
136
+ Sitter spacetime associated with the cosmological horizon.
137
+ We will work in Planck units where G = c = ℏ = kB = 1
138
+ and, in electromagnetic formulas, we also put ϵ0 = 1, but
139
+ we will restore these constants in our formulas that give
140
+ estimates for decoherence times. Lower case Latin indices
141
+ represent abstract spacetime indices. Upper case Latin in-
142
+ dices from the early alphabet correspond to spatial indices
143
+ 3 This maximal coherence time for superpositions in the exterior can
144
+ be much smaller than the evaporation time of the black hole.
145
+ on horizons or null infinity.
146
+ 2.
147
+ DECOHERENCE DUE TO RADIATION IN A
148
+ STATIONARY SPACETIME
149
+ In this section, we will give a general analysis of the
150
+ decoherence of a spatial superposition in a stationary
151
+ spacetime due to emission of radiation by the body. Our
152
+ analysis applies both to the decoherence of a charged
153
+ body due to emission of electromagnetic radiation and to
154
+ the decoherence of a gravitating body due to emission of
155
+ linearized gravitational radiation. The analyses of these
156
+ two cases are very closely parallel.
157
+ In order to avoid
158
+ repetition, we will analyze only the electromagnetic case
159
+ in detail, but near the end of this section, we will state the
160
+ corresponding results in the linearized gravitational case,
161
+ which can be obtained straightforwardly by replacing the
162
+ vector potential Aa with the perturbed metric hab, the
163
+ charge-current ja with the stress-energy Tab, etc.
164
+ Consider a charged particle4 in a stationary spacetime.
165
+ We assume that the particle is initially in a stationary
166
+ state. The particle is then put through a Stern-Gerlach (or
167
+ other) apparatus, resulting in it being in a superposition
168
+ state5
169
+ |ψ⟩ =
170
+ 1
171
+
172
+ 2 (|ψ1⟩ + |ψ2⟩)
173
+ (2.1)
174
+ where |ψ1⟩ and |ψ2⟩ are normalized states that are spa-
175
+ tially separated after passing through the apparatus. The
176
+ particle is then recombined via a reversing Stern-Gerlach
177
+ (or other) apparatus and returns to a stationary state.
178
+ We are particularly interested in the case where, between
179
+ separation and recombination, |ψ1⟩ and |ψ2⟩ are kept
180
+ stationary for a long period of time, T, but we do not
181
+ make any such assumption in this section. We wish to
182
+ estimate how much decoherence due to emission of elec-
183
+ tromagnetic radiation will have occurred by the time of
184
+ recombination6.
185
+ 4 As already indicated above, the “particle” need not be an elementary
186
+ particle but could be a “nanoparticle” or any other body whose only
187
+ relevant degree of freedom for our analysis is its center of mass.
188
+ 5 For simplicity, we have assumed that we have a 50-50 superposition
189
+ of |ψ1⟩ and |ψ2⟩, but this assumption is not necessary.
190
+ 6 The decoherence of Alice’s particle can be experimentally deter-
191
+ mined as follows. We assume that Alice’s particle initially has spin
192
+ in the positive x-direction and thus is in a 50-50 superposition of
193
+ z-spin after passing through the initial Stern-Gerlach apparatus.
194
+ After recombination, Alice measures the x-spin of her particle. If
195
+ coherence of the superposition eq. (2.1) has been maintained, then
196
+ (assuming that Alice has made appropriate corrections if there are
197
+ any phase differences between the paths) the spin will always be
198
+ found to be in the positive x-direction. On the other hand, if any
199
+ coherence has been lost, the particle will not be in a state of definite
200
+ spin, and the spin will sometimes be found to be in the negative
201
+ x-direction. By repeating the experiment many times, Alice can, in
202
+ principle, determine the decoherence to any desired accuracy.
203
+
204
+ 3
205
+ A key assumption that we shall make is that the fluctu-
206
+ ations in the charge-current operator ja in the states |ψ1⟩
207
+ and |ψ2⟩ are negligibly small over the scales of interest
208
+ so that we can treat the charge current in each of these
209
+ states as c-number sources in Maxwell’s equations, given
210
+ by ja
211
+ 1 = ⟨ψ1|ja|ψ1⟩ and ja
212
+ 2 = ⟨ψ2|ja|ψ2⟩, respectively. In
213
+ the initial and final stationary eras, |ψ1⟩ and |ψ2⟩ are
214
+ assumed to coincide spatially (though they may differ in
215
+ other characteristics, such as spin) so that ja
216
+ 1 = ja
217
+ 2 at very
218
+ early and very late times.
219
+ In order to proceed further, we must specify the initial
220
+ state of the electromagnetic field. Since, prior to going
221
+ through the Stern-Gerlach apparatus, the charge is as-
222
+ sumed to be stationary, at early times we may subtract
223
+ the “Coulomb field” Cin
224
+ a of the charge, i.e., at early times
225
+ we may consider the electromagnetic field observable
226
+ Ain
227
+ a = Aa − Cin
228
+ a 1
229
+ (2.2)
230
+ where Cin
231
+ a is the (assumed to be unique) stationary clas-
232
+ sical solution to Maxwell’s equations with the early time
233
+ stationary charged particle source ja
234
+ 1 = ja
235
+ 2 and Aa is
236
+ the vector potential operator. We need not assume any
237
+ specific choice of gauge for Ain
238
+ a . Then Ain
239
+ a satisfies the
240
+ source-free Maxwell’s equations at early times, and we
241
+ may extend its definition to all times by requiring it to
242
+ satisfy the source-free Maxwell equations everywhere.
243
+ The initial state of the electromagnetic field may be
244
+ specified by giving the “radiation state” of Ain
245
+ a .
246
+ The
247
+ choice of this state depends on the physical situation being
248
+ considered. If the spacetime were globally stationary—i.e.,
249
+ if the stationary Killing field were everywhere timelike, so,
250
+ in particular, there are no Killing horizons—it would be
251
+ natural to assume that the initial state of the radiation
252
+ is the stationary vacuum state, i.e., the ground state
253
+ relative to the time translations. For the case of a black
254
+ hole spacetime, it would be correspondingly natural to
255
+ assume that the initial state of the radiation is that of
256
+ the Unruh vacuum, since for a black hole formed by
257
+ gravitational collapse, the state of a quantum field is
258
+ expected to approach the Unruh vacuum after the black
259
+ hole has “settled down” to a stationary state. For the
260
+ case of Minkowski spacetime, we take the initial state
261
+ of the radiation to be the ordinary (inertial) Minkowski
262
+ vacuum. For de Sitter spacetime, we take the initial state
263
+ of the radiation to be the de Sitter invariant vacuum7 for
264
+ the electromagnetic field [20]. We denote the initial state
265
+ of the radiation in all of the above cases by |Ψ0⟩.
266
+ In each of the above cases, |Ψ0⟩ is a pure, quasi-free (i.e.,
267
+ Gaussian) state. It follows (see, e.g., [22] or appendix A
268
+ of [15]) that we can construct a one-particle Hilbert space
269
+ Hin and corresponding Fock space F(Hin) wherein |Ψ0⟩
270
+ plays the role of the vacuum state and the field operator
271
+ 7
272
+ A de Sitter invariant vacuum state does not exist for the massless
273
+ scalar field [19] but such a state does exist for the electromagnetic
274
+ field [20] and linearized gravitational field [21].
275
+ Ain
276
+ a is represented on F(Hin) by
277
+ Ain
278
+ a (f a) = ia(Kσf) − ia†(Kσf).
279
+ (2.3)
280
+ Here f a a divergence-free8 test function, σf denotes the
281
+ advanced minus retarded solution to Maxwell’s equations
282
+ with source f a, and K : S → Hin denotes the map taking
283
+ the space S of classical solutions to their representatives
284
+ in the one-particle Hilbert space Hin. The commutator
285
+ of the creation and annihilation operators in eq. (2.3) is
286
+ given by
287
+ [a(Kσf), a†(Kσg)] = ⟨Kσf|Kσg⟩ 1.
288
+ (2.4)
289
+ where ��Kσf|Kσg⟩ is the inner product on Hin, which is
290
+ given by a natural generalization of the Klein-Gordon
291
+ inner product to electromagnetic fields.
292
+ For the case of a globally stationary spacetime in the
293
+ stationary vacuum state, Kσf corresponds to taking the
294
+ positive frequency part of σf with respect to the time
295
+ translations generating the stationary symmetry. For the
296
+ case of a stationary black hole in the Unruh vacuum state,
297
+ Kσf corresponds to taking the positive frequency part of
298
+ σf with respect to affine time on the past horizon and
299
+ with respect to Killing time at past null infinity. For
300
+ Minkowski spacetime in the inertial Minkowski vacuum,
301
+ Kσf corresponds to taking the positive frequency part
302
+ of σf with respect to inertial time translations. Equiv-
303
+ alently, Kσf, in this case, corresponds to the solution
304
+ obtained by taking the positive frequency part of the re-
305
+ striction of σf to any null hyperplane N (i.e., any Rindler
306
+ horizon) with respect to an affine parametrization of the
307
+ null geodesics generating N. For de Sitter spacetime in
308
+ the de Sitter invariant vacuum, Kσf corresponds to the
309
+ solution obtained by taking the positive frequency part
310
+ of the restriction of σf to any cosmological horizon with
311
+ respect to an affine parametrization of the null geodesics
312
+ generating that horizon.
313
+ Under the above assumption that the charge-current
314
+ of |ψ1⟩ and |ψ2⟩ can be treated as c-number sources, the
315
+ electromagnetic field Ai,a in the presence of the charge
316
+ in state |ψi⟩ for i = 1, 2 is given in terms of the source
317
+ free field Ain
318
+ a by [23]
319
+ Ai,a = Ain
320
+ a + Gret
321
+ a (jb
322
+ i )1
323
+ (2.5)
324
+ where Gret
325
+ a (jb
326
+ i ) denotes the classical retarded solution for
327
+ source jb
328
+ i . In particular, since the field Ain
329
+ a is in state
330
+ |Ψ0⟩, the correlation functions of the electromagnetic field
331
+ 8 Restriction of the smearing to divergence-free test functions is
332
+ necessary and sufficient to eliminate the gauge dependence of Ain
333
+ a
334
+ (see, e.g., P.101 of [22]).
335
+
336
+ 4
337
+ Ai,a for |ψi⟩ are given by9
338
+ ⟨Ai,a1(x1) . . . Ai,an(xn)⟩
339
+ = ⟨Ψ0|
340
+
341
+ Ain
342
+ a1(x1) + Gret
343
+ a1 (jb
344
+ i )(x1)1)
345
+
346
+ . . .
347
+
348
+ Ain
349
+ an(xn) + Gret
350
+ an (jb
351
+ i )(xn)1)
352
+
353
+ |Ψ0⟩.
354
+ (2.6)
355
+ Equation (2.6) is valid at all times.
356
+ However, at
357
+ late times—i.e., to the future of any Cauchy surface Σ
358
+ corresponding to the time at which recombination has
359
+ occurred—we can again subtract off the common sta-
360
+ tionary Coulomb field, Cout
361
+ a
362
+ , of ja
363
+ 1 = ja
364
+ 2 to obtain the
365
+ source-free field10 Aout
366
+ i,a that describes the radiation at
367
+ late times for the states |ψi⟩,
368
+ Aout
369
+ i,a = Ai,a − Cout
370
+ a
371
+ 1 .
372
+ (2.7)
373
+ By eq. (2.6), at late times, the correlation functions of
374
+ Aout
375
+ a
376
+ are given by
377
+ ⟨Aout
378
+ i,a1(x1) . . . Aout
379
+ i,an(xn)⟩
380
+ = ⟨Ψ0|
381
+
382
+ Ain
383
+ a1(x1) + Ai,a1(x1)1)
384
+
385
+ . . .
386
+
387
+ Ain
388
+ an(xn) + Ai,an(xn)1)
389
+
390
+ |Ψ0⟩
391
+ (2.8)
392
+ where
393
+ Ai,a = Gret
394
+ a (jb
395
+ i ) − Cout
396
+ a
397
+ .
398
+ (2.9)
399
+ Note that Ai,a is a classical solution of the source-free
400
+ Maxwell equations in the late-time region.
401
+ The correlation functions eq. (2.8) on any late-time
402
+ Cauchy surface are precisely those of the coherent state
403
+ |Ψi⟩ = e− 1
404
+ 2 ∥KAi∥2 exp
405
+
406
+ a†(KAi)
407
+
408
+ |Ψ0⟩ ,
409
+ (2.10)
410
+ where the norm is that of the one-particle inner product
411
+ of eq. (2.4). Thus, the coherent state |Ψ1⟩ describes the
412
+ “out” radiation state corresponding to charged particle
413
+ state |ψ1⟩ and the coherent state |Ψ2⟩ describes the “out”
414
+ radiation state corresponding to charged particle state
415
+ |ψ2⟩. The joint “out” state, |Υ⟩, of the particle-radiation
416
+ system is given by
417
+ |Υ⟩ =
418
+ 1
419
+
420
+ 2 (|ψ1⟩ ⊗ |Ψ1⟩ + |ψ2⟩ ⊗ |Ψ2⟩) .
421
+ (2.11)
422
+ Therefore, the decoherence of |ψ1⟩ and |ψ2⟩ due to emis-
423
+ sion of electromagnetic radiation is given by
424
+ D = 1 − | ⟨Ψ1|Ψ2⟩ |.
425
+ (2.12)
426
+ 9 It is understood that each of the xk variables should be smeared
427
+ with a divergence-free test vector field fa
428
+ k .
429
+ 10Note that Ain
430
+ a
431
+ did not have a subscript “i” whereas Ai,a and
432
+ Aout
433
+ i,a do carry such subscripts. This is a consequence of the fact
434
+ that we are working in the “in” representation—i.e., the Heisenberg
435
+ representation on the Hilbert space F(Hin)—so Ain
436
+ a does not depend
437
+ on the sources, but the other fields do.
438
+ We wish to evaluate D.
439
+ By the general formula for the inner product of coherent
440
+ states, we have
441
+ | ⟨Ψ1|Ψ2⟩ | = exp
442
+
443
+ −1
444
+ 2||K(A1 − A2)||2
445
+
446
+ .
447
+ (2.13)
448
+ Now, in the late-time era, A1,a−A2,a is just the difference
449
+ between the classical retarded solutions with sources ja
450
+ 1
451
+ and ja
452
+ 2,
453
+ A1,a −A2,a = Gret
454
+ a (jb
455
+ 1)−Gret
456
+ a (jb
457
+ 2) = Gret
458
+ a (jb
459
+ 1 −jb
460
+ 2). (2.14)
461
+ Consider the coherent state associated with Gret
462
+ a (jb
463
+ 1 − jb
464
+ 2)
465
+ in the late-time era. We refer to photons in this state as
466
+ entangling photons. By the general properties of coherent
467
+ states, the expected number, ⟨N⟩, of entangling photons
468
+ is given by
469
+ ⟨N⟩ ≡ ||K
470
+
471
+ Gret(j1 − j2)
472
+
473
+ ||2.
474
+ (2.15)
475
+ Thus, we have
476
+ | ⟨Ψ1|Ψ2⟩ | = exp
477
+
478
+ −1
479
+ 2⟨N⟩
480
+
481
+ (2.16)
482
+ so
483
+ D = 1 − | ⟨Ψ1|Ψ2⟩ | = 1 − exp
484
+
485
+ −1
486
+ 2⟨N⟩
487
+
488
+ (2.17)
489
+ and we see that the necessary and sufficient condition for
490
+ significant decoherence (D ∼ 1) is ⟨N⟩ ≳ 1.
491
+ We summarize the results that we have obtained above
492
+ as follows. Under the assumptions we have made above,
493
+ in order to calculate the decoherence, D, of the particle
494
+ due to radiation, we carry out the following steps:
495
+ (1) We obtain the expected charge current, ja
496
+ 1 and ja
497
+ 2,
498
+ for the particle in states |ψ1⟩ and |ψ2⟩ of the super-
499
+ position.
500
+ (2) We
501
+ calculate
502
+ the
503
+ classical
504
+ retarded
505
+ solution,
506
+ Gret
507
+ a (jb
508
+ 1 − jb
509
+ 2) for the difference of these charge cur-
510
+ rents, which is a source-free solution at late times,
511
+ since ja
512
+ 1 = ja
513
+ 2 at late times.
514
+ (3) We calculate the one-particle state KGret(j1 − j2)
515
+ corresponding to Gret
516
+ a (jb
517
+ 1 − jb
518
+ 2) at late times.
519
+ In
520
+ the various cases, this corresponds to the follow-
521
+ ing: (i) For a globally stationary spacetime initially
522
+ in the stationary vacuum state, this one-particle
523
+ state is the positive frequency part of the solution
524
+ with respect to the time translations generating the
525
+ stationary symmetry. (ii) For the case of a station-
526
+ ary black hole initially in the Unruh vacuum, the
527
+ one-particle state is the positive frequency part of
528
+ the solution with respect to affine time on the past
529
+ horizon and with respect to Killing time at past
530
+ null infinity. (iii) For Minkowski spacetime initially
531
+ in the Minkowski vacuum, the one-particle state
532
+ is the positive frequency part of the solution with
533
+
534
+ 5
535
+ respect to inertial time or, equivalently, the posi-
536
+ tive frequency part with respect to affine time on
537
+ any Rindler horizon. (iv) For de Sitter spacetime
538
+ initially in the de Sitter invariant vacuum, the one-
539
+ particle state is the positive frequency part of the
540
+ solution with respect to affine time on any cosmo-
541
+ logical horizon.
542
+ (4) We compute the squared norm, ∥K[Gret(j1 −j2)]∥2,
543
+ of this one-particle state at late times. This quan-
544
+ tity is equal to the expected number of entangling
545
+ photons, ⟨N⟩. The decoherence due to radiation is
546
+ then given by
547
+ D = 1 − exp
548
+
549
+ −1
550
+ 2∥K
551
+
552
+ Gret(j1 − j2)
553
+
554
+ ∥2
555
+
556
+ .
557
+ (2.18)
558
+ As previously stated, the above analysis extends
559
+ straightforwardly to the linearized gravitational case,
560
+ where the perturbed metric, hab, is treated as a linear
561
+ quantum field propagating in the background classical
562
+ stationary spacetime. To compute the decoherence due
563
+ to gravitational radiation in this case, we carry out the
564
+ above steps, replacing Aa by hab and the charge-current
565
+ ja by the stress-energy tensor Tab. The retarded solu-
566
+ tion Gret
567
+ a (jb) for Maxwell’s equations is replaced by the
568
+ retarded solution Gret
569
+ ab (Tcd) for the linearized Einstein
570
+ equation. The map K : S → Hin is again obtained as
571
+ in item (3) above and the inner product on Hin is again
572
+ given by a natural generalization of the Klein-Gordon
573
+ inner product to linearized gravitational fields. The de-
574
+ coherence due to gravitational radiation is then given by
575
+ the analog of eq. (2.18).
576
+ The above analysis applies for any motion of the compo-
577
+ nents of Alice’s superposition. We are primarily interested
578
+ in the case where, during a time interval T1, Alice puts
579
+ a particle of charge q (or mass m) into a spatial super-
580
+ position, where the distance between the components of
581
+ the particle wavefunction is d. She then keeps this super-
582
+ position stationary in her lab for a time T. Finally, she
583
+ recombines her particle over a time interval T2.
584
+ In Minkowski spacetime in the case where Alice’s lab is
585
+ inertial, Gret
586
+ a (jb
587
+ 1 − jb
588
+ 2) will be nonzero at null infinity only
589
+ at the retarded times corresponding to the time intervals
590
+ T1 and T2. A rough estimate of the number of entangling
591
+ photons was obtained in [3] using the Larmor formula for
592
+ radiation in these eras, which, in natural units, yields
593
+ ⟨N⟩ ∼
594
+ q2d2
595
+ [min(T1, T2)]2
596
+ (Minkowski, EM).
597
+ (2.19)
598
+ The corresponding result in the linearized gravitational
599
+ case is [3]
600
+ ⟨N⟩ ∼
601
+ m2d4
602
+ [min(T1, T2)]4
603
+ (Minkowski, GR).
604
+ (2.20)
605
+ Therefore, if Alice recombines her particle sufficiently
606
+ slowly that T1, T2 ≫ qd in the electromagnetic case or
607
+ T1, T2 ≫ md2 in the gravitational case, then she can main-
608
+ tain the quantum coherence of her particle. In particular,
609
+ Alice can keep the components of her particle separated
610
+ for as long a time T as she likes without destruction of
611
+ the coherence.
612
+ As shown in [14], the situation is quite different if a
613
+ black hole is present. In the electromagnetic case, even
614
+ if T1, T2 ≫ qd so that a negligible number of entangling
615
+ photons is emitted to infinity, there will be entangling
616
+ radiation emitted into the black hole. For large T, the
617
+ number of entangling photons increases with T as11
618
+ ⟨N⟩ ∼ M 3q2d2
619
+ D6
620
+ T
621
+ (black hole, EM)
622
+ (2.21)
623
+ where M is the mass of the black hole, D is the proper
624
+ distance of Alice’s lab from the horizon of the black hole,
625
+ and we assume that D ≳ M. The corresponding result
626
+ in the linearized gravitational case is
627
+ ⟨N⟩ ∼ M 5m2d4
628
+ D10
629
+ T
630
+ (black hole, GR).
631
+ (2.22)
632
+ Thus, the coherence of Alice’s particle will always be
633
+ destroyed within a finite time.
634
+ In the next two sections, we will apply the above anal-
635
+ ysis to the cases of Rindler spacetime and de Sitter space-
636
+ time. Although we will explicitly analyze only the Rindler
637
+ and de Sitter cases, it will be clear from our analysis of the
638
+ next two sections—as well as our analysis in [14]—that it
639
+ can be applied to any Killing horizon, provided only that
640
+ the initial “vacuum state” |Ψ0⟩ of the electromagnetic
641
+ and/or linearized gravitational field corresponds to one-
642
+ particle states that are positive frequency with respect to
643
+ affine time on the future Killing horizon.
644
+ 3.
645
+ RINDLER HORIZONS DECOHERE
646
+ QUANTUM SUPERPOSITIONS
647
+ We now consider the case of Minkowski spacetime with
648
+ Alice’s lab uniformly accelerating with acceleration a.
649
+ Specifically, we take Alice’s lab to follow the orbit
650
+ t = 1
651
+ a sinh(aτ),
652
+ z = 1
653
+ a cosh(aτ)
654
+ (3.1)
655
+ of the boost Killing field
656
+ ba = a
657
+
658
+ z
659
+ � ∂
660
+ ∂t
661
+ �a
662
+ + t
663
+ � ∂
664
+ ∂z
665
+ �a�
666
+ .
667
+ (3.2)
668
+ Here we have normalized ba such that baba = −1 on
669
+ the worldline of Alice’s laboratory. Thus, ba is the four-
670
+ velocity of Alice’s laboratory and τ is the proper time in
671
+ 11In the analysis of [14], we used the fact that the Unruh vacuum is
672
+ well approximated by the Hartle-Hawking vacuum at low frequencies
673
+ near the horizon of the black hole.
674
+
675
+ 6
676
+ her lab. We introduce the null coordinates
677
+ U ≡ t − z,
678
+ V ≡ t + z
679
+ (3.3)
680
+ and the corresponding vector fields
681
+ na ≡ (∂/∂V )a,
682
+ ℓa ≡ (∂/∂U)a,
683
+ (3.4)
684
+ which are globally defined, future-directed null vector
685
+ fields that satisfy ℓana = −1. In terms of these coordi-
686
+ nates, the Minkowski spacetime metric is
687
+ η = −dUdV + dx2 + dy2
688
+ (3.5)
689
+ and the boost vector field is given by
690
+ ba = a
691
+
692
+ − Uℓa + V na�
693
+ .
694
+ (3.6)
695
+ The boost Killing field is null on the two “Rindler hori-
696
+ zons,” i.e., the two null planes U = 0 and V = 0, which
697
+ divide Minkowski spacetime into four wedges. The orbits
698
+ of the boost Killing field are future-directed and time-
699
+ like within the “right Rindler wedge” WR which is the
700
+ region U < 0 and V > 0. Thus, the “right Rindler wedge”
701
+ WR—where Alice performs her experiment—is a static,
702
+ globally hyperbolic spacetime where the notion of “time
703
+ translations” is defined by Lorentz boosts.
704
+ We refer to the null surface U = 0 as the future Rindler
705
+ horizon and denote it as H +
706
+ R . On the region V > 0 of
707
+ H +
708
+ R , it is useful to introduce the coordinate v by
709
+ V = V0eav
710
+ (3.7)
711
+ where V0 is an arbitrary constant. Then, for V > 0 on
712
+ H +
713
+ R , we have
714
+ ba��
715
+ HR+ = aV
716
+ � ∂
717
+ ∂V
718
+ �a����
719
+ HR+
720
+ =
721
+ � ∂
722
+ ∂v
723
+ �a����
724
+ HR+
725
+ .
726
+ (3.8)
727
+ Since (∂/∂V )a on the horizon is tangent to the affinely
728
+ parameterized null geodesic generators of H +
729
+ R , we refer
730
+ to V as the “affine time” on H +
731
+ R , whereas we refer to v
732
+ as the “boost Killing time” on H +
733
+ R .
734
+ 1.
735
+ Decoherence Due to Radiation of Soft
736
+ Photons/Gravitons Through the Rindler Horizon
737
+ We are now in position to apply the results of sec. 2
738
+ to the Rindler case. We will first analyze the electromag-
739
+ netic case and then give the corresponding results in the
740
+ gravitational case.
741
+ We assume that the electromagnetic field is initially
742
+ in the Minkowski vacuum state. We assume that Alice
743
+ possesses a charged particle that is initially stationary
744
+ (with respect to the boost Killing field) in her (uniformly
745
+ accelerating) lab. She then creates a quantum spatial
746
+ superposition which is held stationary (with respect to
747
+ the boost Killing field) for a proper time T and is then
748
+ recombined. We wish to know the degree of decoherence
749
+ of Alice’s particle due to emission of radiation. We may
750
+ directly apply the analysis of sec. 2 to answer this question.
751
+ The future Rindler horizon H +
752
+ R (U = 0) does not meet
753
+ the technical requirements of being a Cauchy surface for
754
+ Minkowski spacetime, since there are inextendible time-
755
+ like curves that remain in the past of H +
756
+ R as well as
757
+ inextendible timelike curves that lie in the future of H +
758
+ R .
759
+ However, as argued in [24], it is effectively a Cauchy sur-
760
+ face for determining evolution of solutions to the wave
761
+ equation. This is most easily seen in the conformally
762
+ completed spacetime, where H +
763
+ R is the past light cone of
764
+ a point p ∈ I + except for the single generator that lies
765
+ on I + and it also is the future light cone of a point on
766
+ p′ ∈ I − except for the single generator that lies on I −.
767
+ Data on the full past light cone of p would determine a
768
+ solution to the past of H +
769
+ R and data on the full future
770
+ light cone of p′ would determine a solution to the future
771
+ of H +
772
+ R , thereby determining a solution everywhere in
773
+ Minkowski spacetime. However, for solutions with ap-
774
+ propriate decay, the data on the missing null geodesic
775
+ generators of I + and I − can be determined by conti-
776
+ nuity from the data on H +
777
+ R . Consequently, data on H +
778
+ R
779
+ suffices to uniquely characterize solutions with appropri-
780
+ ate decay. Consequently, the “out” states |Ψ1⟩ and |Ψ2⟩
781
+ of the radiation are completely determined by data on
782
+ H +
783
+ R . Note that this contrasts sharply with the black hole
784
+ case, where one would need data on both the future event
785
+ horizon and future null infinity to characterize the “out”
786
+ state of radiation.
787
+ The decoherence of Alice’s particle due to radiation is
788
+ given by eq. (2.17). In order to evaluate this, we first
789
+ consider a classical point charge of charge q in the “right
790
+ Rindler wedge” WR that is stationary with respect to the
791
+ boost Killing field and lies at proper distance D from the
792
+ bifurcation surface of the Rindler horizon. Such a charge
793
+ will be uniformly accelerating with acceleration a given
794
+ by
795
+ a = 1
796
+ D .
797
+ (3.9)
798
+ The explicit solution for such a stationary charge in the
799
+ Rindler wedge has long been known [25–30]. The only
800
+ nonvanishing component of the electromagnetic field in
801
+ the region V > 0 of H +
802
+ R is
803
+ EU ≡ Fabℓanb =
804
+ 2a2q
805
+ π(1 + a2ρ2)2
806
+ (3.10)
807
+ where ρ2 ≡ x2 + y2. Electromagnetic radiation through
808
+ the Rindler horizon is described by the pullback, EA, of
809
+ the electric field Ea = Fabnb to H +
810
+ R , where the capital
811
+ Latin indices from the early alphabet denote spatial com-
812
+ ponents in the x and y directions. Since EA = 0 on the
813
+ horizon for a uniformly accelerated charge, one may say
814
+ that a charge held stationary in Alice’s lab does not pro-
815
+ duce any radiation as determined on H +
816
+ R —even though
817
+ a uniformly accelerated charge radiates (inertial) energy
818
+
819
+ 7
820
+ to future null infinity12.
821
+ Now consider the case where the point charge is initially
822
+ uniformly accelerating with acceleration a at a proper
823
+ distance D = 1/a from the bifurcation surface of the
824
+ Rindler horizon.
825
+ The charge is then moved in the z-
826
+ direction to a different orbit of the same boost Killing
827
+ field, so that it has uniform acceleration a′ and lies at
828
+ proper distance D′ = 1/a′ from the Rindler horizon. After
829
+ the charge has reached its new location, the electric field
830
+ on H +
831
+ R is again given by eq. (3.10), but its value, E′
832
+ U,
833
+ will be different from its value at early times. Maxwell’s
834
+ equations on H +
835
+ R imply that
836
+ DAEA = ∂V EU
837
+ (3.11)
838
+ where DA is the derivative operator on the R2 cross-
839
+ sections of the horizon and capital Latin indices from
840
+ the early alphabet are raised and lowered with the met-
841
+ ric, δAB, on the cross sections. Eq. (3.11) implies that
842
+ EA ̸= 0 whenever ∂V EU ̸= 0, so there will be radiation
843
+ through the horizon as the charge is being moved. Most
844
+ importantly, it implies that
845
+ DA
846
+
847
+
848
+
849
+
850
+ −∞
851
+ dV EA
852
+
853
+ � = ∆EU
854
+ (3.12)
855
+ where ∆EU = E′
856
+ U −EU is the change in the radial electric
857
+ field between the charge at positions D′ and D. Now, in
858
+ a gauge where Aana = 0 on the horizon, the transverse
859
+ (i.e., x-y) components of the electric field are related to
860
+ the corresponding components of the vector potential by
861
+ EA = −∂V AA.
862
+ (3.13)
863
+ Since the transverse components of the Coulomb field of a
864
+ static charge vanish, we may replace the vector potential
865
+ AA by the “Coulomb subtracted” vector potential AA
866
+ defined by eq.(2.9), so we have
867
+ EA = −∂V AA.
868
+ (3.14)
869
+ It then follows immediately from eq. (3.12) that the dif-
870
+ ference, ∆AA, between the final and initial values of AA
871
+ is given by
872
+ DA(∆AA) = −∆EU
873
+ (3.15)
874
+ independently of the manner in which the charge is moved
875
+ from D to D′. Equation (3.15) is an exact mathemati-
876
+ cal analog of the electromagnetic memory effect at null
877
+ infinity [31].
878
+ 12A uniformly accelerating charge has a nonvanishing inertial energy
879
+ current flux Tabta through both H +
880
+ R and I +, where ta denotes a
881
+ Minkowski time translation. However, the flux of “boost energy”
882
+ Tabba vanishes at both H +
883
+ R and I +.
884
+ For the explicit solution eq. (3.10), we have
885
+ ∆EU ≈ qda3(1 − a2ρ2)
886
+ (1 + a2ρ2)3
887
+ .
888
+ (3.16)
889
+ where d = D′ − D and we have assumed that
890
+ d ≪ D = 1
891
+ a .
892
+ (3.17)
893
+ From eq. (3.15), we find that ∆AA points in the ˆρ-
894
+ direction and has magnitude
895
+ |∆AA| = ∆Aρ ≈
896
+ qda4ρ2
897
+ (1 + a2ρ2)2 .
898
+ (3.18)
899
+ The key point is that even though EA = 0 at both late
900
+ and early times, AA does return to its original value at
901
+ late times, and the change, ∆AA, in the vector potential
902
+ between late and early times is determined only by the
903
+ initial and final positions of the charge.
904
+ We now consider the quantized radiation through the
905
+ horizon resulting from the displacement of the charge,
906
+ assuming that, after the displacement, the charge is held
907
+ at its new position, D′, forever.
908
+ For the Fock space
909
+ associated with the Minkowski vacuum state, the map K :
910
+ S → Hin that associates one-particle states to classical
911
+ solutions is given by taking the positive frequency part of
912
+ the classical solution with respect to inertial time, with the
913
+ inner product on Hin given by the Klein-Gordon product.
914
+ For the electromagnetic field on H +
915
+ R in a gauge where
916
+ Aana on H +
917
+ R , the “free data” on H +
918
+ R is the pull-back,
919
+ AA, of the vector potential. For two classical solutions
920
+ with data A1,A and A2,A on H +
921
+ R , the inner product of
922
+ their corresponding one-particle states is given by [15, 32]
923
+ ⟨KA1| KA2⟩H +
924
+ R = 2
925
+
926
+ R2
927
+ dxdy
928
+
929
+
930
+ 0
931
+ ωdω
932
+ 2π δAB ˆ
933
+ A1,A ˆ
934
+ A2,B
935
+ (3.19)
936
+ where ˆ
937
+ AA(ω, xB) is the Fourier transform of AA(V, xB)
938
+ with respect to the affine parameter V . By the same
939
+ reasoning as led to eq. (2.15), the expected number of
940
+ photons on H +
941
+ R in the coherent state associated to any
942
+ classical solution AA is simply
943
+ ⟨N⟩ = ∥KA∥2
944
+ H +
945
+ R
946
+ (3.20)
947
+ where the norm is defined by the inner product eq. (3.19).
948
+ However, since ∆AA
949
+ ̸=
950
+ 0, the Fourier transform,
951
+ ˆ
952
+ AA(ω, xB), of AA diverges as 1/ω as ω → 0.
953
+ It fol-
954
+ lows that the integrand of the expression for the norm
955
+ given by the right side of eq. (3.19) also diverges as 1/ω as
956
+ ω → 0, so the integral is logarithmically divergent. Thus,
957
+ ||KA||2
958
+ H +
959
+ R = ∞. Therefore, if Alice displaces a charged
960
+ particle to a different orbit of the boost Killing field and
961
+ the particle remains on this new uniformly accelerated
962
+ trajectory forever, an infinite number of “soft horizon
963
+
964
+ 8
965
+ photons” will be radiated through the Rindler horizon
966
+ regardless of how quickly or slowly this process is done.
967
+ This is an exact mathematical analog of the infrared di-
968
+ vergences that occur at null infinity in QED for processes
969
+ with nonzero memory (see e.g., [33–35]).
970
+ Now suppose that Alice displaces the particle a z-
971
+ distance d ≪ D = 1/a from D to D′ = D+d as above, but
972
+ instead of leaving the particle at D′ forever, she leaves it
973
+ there for proper time13 T and then returns it to D. In this
974
+ case, the transverse components of the vector potential,
975
+ AA, return to their initial values at late times, so there
976
+ is no “memory effect” at the horizon. Correspondingly,
977
+ there are no infrared divergences in the expected number
978
+ of photons that propagate through H +
979
+ R . Nevertheless, if
980
+ T is very large then the expected number of photons ⟨N⟩
981
+ will be correspondingly large. To see this, we note that
982
+ if, for convenience, we work in a gauge where AA = 0
983
+ initially, then during the era at which the particle is at D′,
984
+ AA will be given by the right side of eq. (3.18). If we keep
985
+ the manner in which the particle is moved from D to D′
986
+ as well as from D′ to D fixed but take T to be very large,
987
+ the asymptotic behavior of the norm eq. (3.19) will be
988
+ dominated by the low-frequency contribution from the era
989
+ of time T that the particle is displaced. The logarithmic
990
+ divergence at ω = 0 that would occur if the particle re-
991
+ mained at D′ forever is now effectively cut off at frequency
992
+ ω ∼ 1/V , where V denotes the affine time duration on
993
+ the horizon H +
994
+ R over which the particle remains at D′.
995
+ We obtain
996
+ ⟨N⟩ = ||KA||2
997
+ HR ∼ q2d2a2 ln
998
+
999
+ V
1000
+ min[V1, V2]
1001
+
1002
+ (3.21)
1003
+ where V1, V2 ≪ V are the durations of affine time over
1004
+ which the particle is displaced from D to D′ and from
1005
+ D′ back to D, so that 1/min[V1, V2] provides an effective
1006
+ high-frequency cutoff. However, the affine time V on the
1007
+ horizon is related to boost Killing time on the horizon by
1008
+ V = V0 exp(av)
1009
+ (3.22)
1010
+ and the boost Killing time v corresponds to the proper
1011
+ time T in Alice’s lab. Thus, we obtain
1012
+ ⟨N⟩ ∼ q2d2a3T
1013
+ (Rindler, EM) .
1014
+ (3.23)
1015
+ Therefore, no matter how slowly the particle is displaced,
1016
+ it is forced to radiate a number of “soft Rindler horizon
1017
+ photons” through the Rindler horizon that is proportional
1018
+ to the time T that the particle remains on the displaced
1019
+ trajectory.
1020
+ We are now in a position to fully analyze Alice’s exper-
1021
+ iment. Alice’s lab is uniformly accelerating with acceler-
1022
+ 13We have normalized the boost Killing field ba so that Killing time
1023
+ equals proper time on the orbit at D with acceleration a. Since we
1024
+ assume d = D′ − D ≪ D, Killing time and proper time are also
1025
+ (nearly) equal on the orbit at D′. Thus, T is also the elapsed Killing
1026
+ time that Alice keeps the particle at D′.
1027
+ ation a in Minkowski spacetime. She puts her particle
1028
+ of charge q into a superposition of states separated by
1029
+ z-distance d ≪ 1/a and keeps these components sta-
1030
+ tionary in her lab for a proper time T.
1031
+ She then re-
1032
+ combines the components and determines their coher-
1033
+ ence14. By the analysis of sec. 2, the decoherence is given
1034
+ by eq. (2.18). However, for large T, the calculation of
1035
+ ||K [Gret(j1 − j2)] ||2 corresponds precisely to the calcu-
1036
+ lation we have given above of the number of photons
1037
+ radiated through the Rindler horizon when a charge is
1038
+ displaced for a time T. Thus, we obtain
1039
+ ||K
1040
+
1041
+ Gret(j1 − j2)
1042
+
1043
+ ||2 ∼ q2d2a3T.
1044
+ (3.24)
1045
+ In other words, for large T, Alice’s superposition will de-
1046
+ cohere due to radiation of “soft Rindler horizon photons,”
1047
+ as
1048
+ D = 1 − exp(−ΓradT)
1049
+ (3.25)
1050
+ where the “decoherence rate” Γrad, is given by,
1051
+ Γrad = q2d2a3.
1052
+ (3.26)
1053
+ Thus, restoring the constants c, ℏ, and ϵ0, Alice’s par-
1054
+ ticle will decohere within a time
1055
+ TD ∼ ϵ0ℏc6
1056
+ a3q2d2
1057
+ (Rindler, EM)
1058
+ (3.27)
1059
+ ∼ 1033 years
1060
+ �g
1061
+ a
1062
+ �3
1063
+ ·
1064
+ �e
1065
+ q
1066
+ �2
1067
+ ·
1068
+ �m
1069
+ d
1070
+ �2
1071
+ .
1072
+ (3.28)
1073
+ Thus, if Alice’s lab uniformly accelerates at one g in
1074
+ flat spacetime and she separates an electron into two
1075
+ components one meter apart, she would not be able to
1076
+ maintain coherence of the electron for more than 1033
1077
+ years.
1078
+ A similar analysis holds in the gravitational case15
1079
+ where Alice separates a massive body with mass m across
1080
+ a distance d and maintains this superposition for a time
1081
+ T. In the gravitational case, the “electric part” of the
1082
+ perturbed Weyl tensor Eab = Cacbdncnd plays an analo-
1083
+ gous role to the electric field Ea in the electromagnetic
1084
+ version of the gedankenexperiment. For a uniformly ac-
1085
+ celerating point mass, the only non-vanishing compo-
1086
+ nent of the electric part of the Weyl tensor on H +
1087
+ R is
1088
+ EUU = Cacbdℓancℓbnd.
1089
+ Gravitational radiation on the horizon is described
1090
+ by the pullback, EAB, of Eab, which vanishes for the
1091
+ static point mass. However, the process of quasistatically
1092
+ moving the static point mass involves a change in EUU
1093
+ on H +
1094
+ R . The (once-contracted) Bianchi identity on the
1095
+ 14The coherence can be determined as described in footnote 6.
1096
+ 15In the gravitational case, additional stress-energy will be needed
1097
+ to keep Alice’s particle in uniform acceleration. We will ignore the
1098
+ gravitational effects of this additional stress-energy.
1099
+
1100
+ 9
1101
+ horizon yields
1102
+ DAEAB = ∂V EUB,
1103
+ DAEUA = ∂V EUU
1104
+ (3.29)
1105
+ which implies that
1106
+ DADBEAB = ∂2
1107
+ V EUU
1108
+ (3.30)
1109
+ which is closely analogous to eq. (3.11). As in the elec-
1110
+ tromagnetic case, if a uniformly accelerating point mass
1111
+ is quasistatically moved there is necessarily gravitational
1112
+ radiation through H +
1113
+ R .
1114
+ To determine the number of “Rindler horizon gravitons”
1115
+ emitted we quantize the linearized gravitational field. For
1116
+ a metric perturbation hab in a gauge where habna = 0
1117
+ and δABhAB = 0, the “free data” on H +
1118
+ R
1119
+ is hAB. A
1120
+ “particle” in the standard Fock space associated to the
1121
+ Poincaré invariant vacuum is then a positive frequency
1122
+ solution with respect to affine parameter V and the inner
1123
+ product on the one-particle Hilbert space is given by a
1124
+ direct analog of eq. (3.19) with the vector potential AA
1125
+ replaced with the metric perturbation hAB, namely
1126
+ ⟨Kh1| Kh2⟩H +
1127
+ R = 1
1128
+ 8
1129
+
1130
+ R2
1131
+ dxdy
1132
+
1133
+
1134
+ 0
1135
+ ωdω
1136
+ 2π δABδCDˆh1,ACˆh2,BD.
1137
+ (3.31)
1138
+ Finally, EAB is related to the metric perturbation hAB
1139
+ by
1140
+ EAB = −1
1141
+ 2∂2
1142
+ V hAB .
1143
+ (3.32)
1144
+ Equations (3.30) and (3.32) directly imply that a per-
1145
+ manent change, ∆EUU ̸= 0, in the U-U component of
1146
+ the electric part of the Weyl tensor on H +
1147
+ R
1148
+ implies a
1149
+ permanent change, ∆hAB ̸= 0, in the perturbed metric
1150
+ on H +
1151
+ R between early and late times. In the quantum
1152
+ theory, as in the electromagnetic case, this implies a log-
1153
+ arithmic infrared divergence in the number of gravitons
1154
+ emitted through H +
1155
+ R in the process where a uniformly
1156
+ accelerating charge is moved to a new orbit of the same
1157
+ boost Killing field and then remains at the new position
1158
+ forever.
1159
+ The analysis of Alice’s experiment proceeds in a similar
1160
+ manner to the electromagnetic case. Alice does not main-
1161
+ tain the relative separation of her wavefunction forever
1162
+ but closes her superposition after a proper time T. As
1163
+ before, the number of entangling gravitons emitted to
1164
+ the Rindler horizon is logarithmically growing in affine
1165
+ time and therefore linearly growing in the proper time
1166
+ duration T of Alice’s experiment. We obtain
1167
+ ⟨N⟩ ∼ m2d4a5T
1168
+ (Rindler, GR) .
1169
+ (3.33)
1170
+ Thus, restoring constants, we find that the Rindler hori-
1171
+ zon decoheres the quantum superposition of a uniformly
1172
+ accelerating massive body in a time
1173
+ T GR
1174
+ D
1175
+
1176
+ ℏc10
1177
+ Gm2d4a5
1178
+ (Rindler, GR)
1179
+ (3.34)
1180
+ ∼ 2 fs
1181
+ �MMoon
1182
+ m
1183
+ �2
1184
+ ·
1185
+ �RMoon
1186
+ d
1187
+ �4
1188
+ ·
1189
+ �g
1190
+ a
1191
+ �5
1192
+ .
1193
+ (3.35)
1194
+ Therefore, if the Moon were accelerating at one g and
1195
+ occupied a quantum state with its center of mass super-
1196
+ posed by a spatial separation of the order of its own radius
1197
+ then it would decohere within about 2 femtoseconds. Of
1198
+ course, it would not be easy to put the moon in such a
1199
+ coherent quantum superposition.
1200
+ Note the acceleration of a stationary observer outside
1201
+ of a black hole who is reasonably far16 (D ≳ M) from the
1202
+ event horizon is a ∼ M/D2. If we substitute a = M/D2
1203
+ in eqs. (3.27) and (3.34), we obtain eqs. (2.21) and (2.22),
1204
+ respectively. Therefore, it might be tempting to believe
1205
+ that what is important in all cases is the acceleration of
1206
+ Alice’s lab. However, this is not the case. In particular,
1207
+ if we replace the black hole by an ordinary star (and if
1208
+ there are no dissipative effects in the star), then there
1209
+ will not be any analogous decoherence effect, even though
1210
+ the acceleration of Alice’s lab is the same as in the case
1211
+ of a black hole. Furthermore, as we shall see in sec. 4,
1212
+ decoherence effects associated with the cosmological hori-
1213
+ zon occur in de Sitter spacetime even for nonaccelerating
1214
+ observers. It is the presence of a Killing horizon that
1215
+ is the essential ingredient for the fundamental rate of
1216
+ decoherence of quantum superpositions as described in
1217
+ this paper.
1218
+ We now consider another potential cause of decoherence,
1219
+ namely Unruh radiation.
1220
+ 2.
1221
+ Decoherence Due to Scattering of Unruh
1222
+ Radiation
1223
+ The Minkowski vacuum state restricted to a Rindler
1224
+ wedge is a thermal state at the Unruh temperature
1225
+ T = a
1226
+
1227
+ (3.36)
1228
+ relative to the notion of time translations defined by
1229
+ the Lorentz boost Killing field ba, eq. (3.2). Thus, the
1230
+ superposition state of Alice’s particle will be buffeted by
1231
+ this thermal bath of Unruh radiation. Scattering of this
1232
+ radiation will cause some decoherence of Alice’s particle.
1233
+ Indeed, since this decoherence should occur at a steady
1234
+ rate while the superposition is kept stationary (and thus
1235
+ the decoherence will be proportional to T), one might even
1236
+ 16It should be emphasized that the estimates made in [14] that yielded
1237
+ eqs.(2.21) and (2.22) assumed that Alice’s lab is reasonably far from
1238
+ the black hole. If Alice’s lab is extremely close to the black hole
1239
+ (i.e., at a distance D ≪ M from the horizon), then the black hole
1240
+ analysis would reduce to the Rindler case analyzed here.
1241
+
1242
+ 10
1243
+ suspect that scattering of Unruh radiation could be the
1244
+ same effect as found in the previous section but expressed
1245
+ in a different language. The purpose of this subsection
1246
+ is to show that this is not the case, i.e., decoherence
1247
+ due to scattering of Unruh radiation and decoherence
1248
+ due to radiation of “soft” photons/gravitons through the
1249
+ horizon are distinct effects. Furthermore, we shall show
1250
+ that, for reasonable parameter choices, the decoherence
1251
+ rate due to the scattering of Unruh radiation is smaller
1252
+ than the decoherence rate due to emitted radiation as
1253
+ obtained in the previous section. We will consider only
1254
+ the electromagnetic case in this subsection.
1255
+ The decoherence rate of a spatial superposition due
1256
+ to collisions with particles in an environment has been
1257
+ analyzed in [36–39], and we will adapt this analysis to
1258
+ obtain a rough estimate of the decoherence caused by the
1259
+ scattering of Unruh radiation. As in eq. (2.1), Alice has
1260
+ a particle of charge q in a state |ψ⟩ = (|ψ1⟩ + |ψ2⟩)/
1261
+
1262
+ 2,
1263
+ where |ψ1⟩ and |ψ2⟩ are spatially separated by a distance
1264
+ d. Since we require d ≪ 1/a (see eq. (3.17)) and since
1265
+ the typical wavelength of Unruh photons at temperature
1266
+ eq. (3.36) is λ ∼ 1/a, we are in the scattering regime
1267
+ where λ ≫ d.
1268
+ In an elastic scattering event between
1269
+ Alice’s particle and a photon in the Unruh radiation, the
1270
+ final outgoing state of the photon will depend upon which
1271
+ branch of the superposition the photon scattered off of.
1272
+ Let |χ1⟩ denote the outgoing state of the Unruh photon
1273
+ for scattering off of |ψ1⟩ and let |χ2⟩ denote the outgoing
1274
+ state for scattering off of |ψ2⟩. Decoherence will occur to
1275
+ the extent to which these outgoing states of the scattered
1276
+ Unruh photon are distinguishable, i.e., D = 1−| ⟨χ1|χ2⟩ |.
1277
+ In order to obtain a rough estimate of the decoherence
1278
+ resulting from a single scattering event, we consider the
1279
+ corresponding Minkowski process of the scattering of a
1280
+ photon of momentum p off of an inertial superposition
1281
+ separated by d, with d ≪ 1/p. Assuming that the charged
1282
+ particle states |ψ1⟩ and |ψ2⟩ are identical except for their
1283
+ location, the scattered photon states |χ1⟩ and |χ2⟩ should
1284
+ differ only by the action of the translation operator e−i ⃗P·⃗d,
1285
+ i.e.,
1286
+ |χ2⟩ ≈ e−i ⃗P·⃗d |χ1⟩
1287
+ (3.37)
1288
+ where ⃗P denotes the photon momentum operator. Ex-
1289
+ panding the exponential, we obtain the following rough
1290
+ estimate of the decoherence resulting from a single scat-
1291
+ tering event involving a photon of momentum p
1292
+ 1 − | ⟨χ1|χ2⟩ | ∼ p2d2
1293
+ (3.38)
1294
+ where we have ignored any dependence on the angle be-
1295
+ tween the incoming momentum ⃗p and the separation ⃗d.
1296
+ We will take eq. (3.38) as our estimate of the decoherence
1297
+ of Alice’s particle resulting from the scattering of a single
1298
+ Unruh photon of “Rindler momentum” p (i.e., of energy
1299
+ ϵ = p with respect to the boost Killing field ba).
1300
+ The total decoherence rate due to scattering of Unruh
1301
+ radiation is then given by
1302
+ Γscatt ∼ d2
1303
+
1304
+
1305
+ 0
1306
+ dp p2ϱ(p)σ(p)
1307
+ (3.39)
1308
+ where ϱ(p) is the number density of photons at momentum
1309
+ p (so ϱ(p) is also the incoming flux of photons) and σ(p)
1310
+ is the scattering cross-section. For a thermal distribution
1311
+ of photons17 we have
1312
+ ϱ(p) ∼
1313
+ p2
1314
+ ep/T − 1.
1315
+ (3.40)
1316
+ We take σ to be given by the Thomson cross-section
1317
+ σ = 8π
1318
+ 3
1319
+ q4
1320
+ (4πm)2 ,
1321
+ (3.41)
1322
+ where m is the mass of Alice’s particle. Putting this all
1323
+ together, our estimate of the decoherence rate due to
1324
+ scattering of Unruh photons is
1325
+ Γscatt ∼ q4d2a5
1326
+ m2
1327
+ (Rindler, EM) .
1328
+ (3.42)
1329
+ Comparing eq. (3.42) to the rate of decoherence, Γrad
1330
+ due to the emission of soft photons given by eq. (3.26),
1331
+ one can immediately see that the effects are distinct.
1332
+ In particular, Γrad has no dependence on the mass, m,
1333
+ of Alice’s particle, whereas Γscatt does depend on m on
1334
+ account of the mass dependence of the scattering cross-
1335
+ section. The ratio of these decoherence rates is given
1336
+ by
1337
+ Γscatt
1338
+ Γrad
1339
+ ∼ q2a2
1340
+ m2 =
1341
+ �q/m
1342
+ D
1343
+ �2
1344
+ (3.43)
1345
+ Now, q/m is the “charge radius” of Alice’s particle and,
1346
+ as argued in [3], it represents a fundamental lower bound
1347
+ to the spread of a charged particle due to vacuum fluc-
1348
+ tuations of the electromagnetic field. Therefore, in order
1349
+ that |ψ1⟩ and |ψ2⟩ not overlap, we must have d > q/m.
1350
+ Since d ≪ D, we conclude that
1351
+ Γscatt
1352
+ Γrad
1353
+ ≪ 1
1354
+ (3.44)
1355
+ i.e., the contribution to decoherence from the scattering
1356
+ of Unruh radiation is negligible compared with the de-
1357
+ coherence due to emission of soft photons through the
1358
+ Rindler horizon.
1359
+ A similar analysis holds for a charged particle superpo-
1360
+ sition outside of a black hole. It is worth noting, that the
1361
+ 17The factor of p2 in the numerator of eq. (3.40) arises from the density
1362
+ of states in Minkowski spacetime. We ignore here any differences
1363
+ between the Minkowski and Rindler densities of states.
1364
+
1365
+ 11
1366
+ decoherence effects due to scattering of Hawking radiation
1367
+ will decrease with distance, D, from the black hole only
1368
+ as 1/D2 for large D, giving,
1369
+ Γscatt ∼
1370
+ q4d2
1371
+ m2M 3
1372
+ 1
1373
+ D2
1374
+ (black hole, EM).
1375
+ (3.45)
1376
+ On the other hand, by eq. (2.21) the decoherence effects
1377
+ of radiation of soft photons through the horizon decreases
1378
+ with D as 1/D6. Thus at sufficiently large D, the deco-
1379
+ herence effects due to scattering of Hawking radiation
1380
+ will dominate. However, in this regime, both effects are
1381
+ extremely small.
1382
+ 3.
1383
+ Decoherence From the Inertial Perspective
1384
+ In our analysis of the decoherence of a spatial superpo-
1385
+ sition in the presence of a black hole [14] as well as in our
1386
+ analysis of the decoherence of a spatial superposition in
1387
+ Rindler spacetime given above in sec. 3.1, it may appear
1388
+ that we have introduced a radical new mechanism for de-
1389
+ coherence, namely radiation of soft photons and gravitons
1390
+ through a horizon. The main purpose of this subsection
1391
+ is to show that, in fact, the decoherence we derived in the
1392
+ Rindler case can also be obtained by entirely conventional
1393
+ means. In the Rindler case, we are simply considering a
1394
+ uniformly accelerating superposition in Minkowski space-
1395
+ time. The radiation of entangling photons to infinity from
1396
+ such a superposition can be calculated in the inertial view-
1397
+ point by standard methods, without introducing concepts
1398
+ such as a Rindler horizon. It is instructive to calculate
1399
+ the decoherence from the inertial viewpoint both in order
1400
+ to validate the results of sec. 3.1 as well as to gain insight
1401
+ into how the emitted “soft photons” would be interpreted
1402
+ by an inertial observer. As we shall see, the entangling
1403
+ photons as seen by inertial observer at large distances
1404
+ near θ = 0 will be “hard” even though, from her point of
1405
+ view, Alice has performed the experiment adiabatically.
1406
+ We will restrict our analysis in this subsection to the
1407
+ electromagnetic case.
1408
+ The Liénard-Wiechert solution for the potential of a
1409
+ point charge in Minkowski spacetime following an arbi-
1410
+ trary worldline Xµ(τ) is, in Lorenz gauge,
1411
+ Aµ(x) = 1
1412
+
1413
+ 1
1414
+ α
1415
+ q
1416
+ |⃗x − ⃗X(tret)|
1417
+ dXµ
1418
+ dt (tret)
1419
+ (3.46)
1420
+ where
1421
+ α ≡ 1 − ˆn · d ⃗X
1422
+ dt (tret)
1423
+ and ˆn = ⃗x − ⃗X(tret)
1424
+ |⃗x − ⃗X(tret)|
1425
+ .
1426
+ (3.47)
1427
+ For a uniformly accelerated trajectory with acceleration
1428
+ a, we have
1429
+ Xµ(τ) =
1430
+ �1
1431
+ a sinh(aτ), 0, 0, 1
1432
+ a cosh(aτ)
1433
+
1434
+ .
1435
+ (3.48)
1436
+ In Bondi coordinates (u, r, θ, φ) with
1437
+ u ≡ t − r
1438
+ (3.49)
1439
+ the future light cone of an event at proper time τ on the
1440
+ worldline eq. (3.48) reaches null infinity at
1441
+ au = sinh(aτ) − cos θ cosh(aτ).
1442
+ (3.50)
1443
+ Electromagnetic radiation is described by the pullback
1444
+ of the electromagnetic field, eq. (3.46), to null infinity.
1445
+ Taking the limit as r → ∞ at fixed u, we obtain18
1446
+ AA(u, θ, φ) = −q
1447
+
1448
+ sinh(aτ) sin θ
1449
+ cosh(aτ) − cos θ sinh(aτ)(dθ)A
1450
+ (3.51)
1451
+ where, in this subsection, capital indices from the early
1452
+ alphabet denote angular components on the 2-sphere cross-
1453
+ sections of I +. We will be concerned with the difference,
1454
+ at fixed (u, θ, φ), between the electromagnetic radiation
1455
+ of a particle following the trajectory eq. (3.48) and a
1456
+ particle following a similar trajectory that is displaced in
1457
+ the z-direction by a proper distance d ≪ 1/a and thus
1458
+ has
1459
+ δa = a2d.
1460
+ (3.52)
1461
+ We denote this difference by
1462
+ Ad
1463
+ A(u, θ, φ) ≡ AA(a + δa) − AA(a) ≈ δa
1464
+ �∂AA
1465
+ ∂a
1466
+
1467
+ u,θ
1468
+ (3.53)
1469
+ From eq. (3.51), we obtain
1470
+ Ad
1471
+ A = −a2qd
1472
+
1473
+ u sin θ
1474
+ (cosh(aτ) − cos θ sinh(aτ))3 (dθ)A
1475
+ (3.54)
1476
+ where eq. (3.50) was used to compute (∂τ/∂a)(u,θ).
1477
+ In her experiment, Alice starts with her particle in a
1478
+ uniformly accelerating state. Over a proper time T1, she
1479
+ separates it into two uniformly accelerating components
1480
+ separated by a distance d as above.
1481
+ She keeps these
1482
+ components separated for a proper time T, and she then
1483
+ recombines them over a proper time T2. The difference
1484
+ between the radiation fields of these components is given
1485
+ by
1486
+ AA ≡ A1,A − A2,A = F(τ)Ad
1487
+ A
1488
+ (3.55)
1489
+ where the smooth function F is such that F(τ) = 0 for
1490
+ τ < −T1 and τ > T +T2, whereas F(τ) = 1 for 0 < τ < T.
1491
+ 18The vector potential is not smooth at I + in Lorenz gauge but
1492
+ one can do an asymptotic gauge transformation such that Aa is
1493
+ smooth at I +. Such a gauge transformation does not affect the
1494
+ angular components AA at I + [35], so we can calculate AA using
1495
+ our Lorenz gauge expression.
1496
+
1497
+ 12
1498
+ The entangling photon content is then given by
1499
+ ⟨N⟩ = ||KA||2 = 2
1500
+
1501
+ S2
1502
+ dΩ
1503
+
1504
+
1505
+ 0
1506
+ ωdω
1507
+
1508
+ ˆ
1509
+ AA ˆ
1510
+ AA
1511
+ (3.56)
1512
+ where
1513
+ ˆ
1514
+ AA(ω, θ, φ) denotes the Fourier transform of
1515
+ AA(u, θ, φ) with respect to u, i.e.,
1516
+ ˆ
1517
+ AA(ω, θ, φ) =
1518
+
1519
+
1520
+ −∞
1521
+ du eiωuAA(u, θ, φ).
1522
+ (3.57)
1523
+ We are interested in estimating ⟨N⟩ for large T.
1524
+ In order to evaluate the Fourier transform integral, it
1525
+ is useful to note that, at fixed a, we have
1526
+ du
1527
+ dτ = cosh(aτ) − cos θ sinh(aτ)
1528
+ (3.58)
1529
+ and
1530
+ d2u
1531
+ dτ 2 = a2u.
1532
+ (3.59)
1533
+ It follows that
1534
+ d
1535
+ du
1536
+
1537
+ 1
1538
+ du/dτ
1539
+
1540
+ =
1541
+ 1
1542
+ du/dτ
1543
+ d
1544
+
1545
+
1546
+ 1
1547
+ du/dτ
1548
+
1549
+ =
1550
+ −a2u
1551
+ (cosh(aτ) − cos θ sinh(aτ))3
1552
+ (3.60)
1553
+ Thus, we have
1554
+ Ad
1555
+ A = qd sin θ
1556
+
1557
+ (dθ)A
1558
+ d
1559
+ du
1560
+
1561
+ 1
1562
+ du/dτ
1563
+
1564
+ (3.61)
1565
+ and
1566
+ ˆ
1567
+ AA = qd sin θ
1568
+
1569
+ (dθ)A
1570
+
1571
+
1572
+ −∞
1573
+ du eiωuF(τ) d
1574
+ du
1575
+
1576
+ 1
1577
+ du/dτ
1578
+
1579
+ .
1580
+ (3.62)
1581
+ Integrating by parts, we obtain
1582
+ ˆ
1583
+ AA(ω, xA) = − qd sin θ
1584
+
1585
+ (dθ)A
1586
+
1587
+
1588
+
1589
+
1590
+ −∞
1591
+ du eiωu F(τ)
1592
+ du/dτ
1593
+ +
1594
+
1595
+
1596
+ −∞
1597
+ du eiωu
1598
+ F ′(τ)
1599
+ (du/dτ)2
1600
+
1601
+ .
1602
+ (3.63)
1603
+ The second term in this equation contributes only during
1604
+ the time intervals (−T1, 0) and (T, T + T2) when Alice
1605
+ opens and closes the superposition. For large T, its con-
1606
+ tribution can be shown to be negligible compared with
1607
+ the first term. Therefore, we have
1608
+ ˆ
1609
+ AA(ω, xA) ≈ −(dθ)A
1610
+ iωqd sin θ
1611
+
1612
+ I
1613
+ (3.64)
1614
+ where
1615
+ I ≡
1616
+
1617
+
1618
+ −∞
1619
+ du eiωu F(τ)
1620
+ du/dτ .
1621
+ (3.65)
1622
+ To evaluate I, we approximate F by a step function in
1623
+ the τ-interval [0, T]. The corresponding interval, [u0, uT ],
1624
+ in u is
1625
+ u0 = −1
1626
+ a cos θ
1627
+ uT = 1
1628
+ 2a
1629
+
1630
+ eaT (1 − cos θ) − e−aT (1 + cos θ)
1631
+
1632
+ .
1633
+ (3.66)
1634
+ Noting that
1635
+ du
1636
+ dτ =
1637
+
1638
+ a2u2 + sin2 θ
1639
+ (3.67)
1640
+ we obtain
1641
+ I ≈
1642
+ uT
1643
+
1644
+ u0
1645
+ du
1646
+ eiωu
1647
+
1648
+ a2u2 + sin2 θ
1649
+ .
1650
+ (3.68)
1651
+ It can be seen that for large T, the dominant contribution
1652
+ to I will come from small angles, θ ≪ 1. For aT ≫ 1, the
1653
+ upper limit of the integral may then be approximated as
1654
+ uT ≈ 1
1655
+ 4aeaT θ2 − 1
1656
+ ae−aT
1657
+ for θ ≪ 1
1658
+
1659
+
1660
+ 0
1661
+ for θ2/4 < e−aT
1662
+ 1
1663
+ 4aθ2eaT
1664
+ for θ2/4 ≥ e−aT .
1665
+ (3.69)
1666
+ For aT ≫ 1, the contribution to I from θ2/4 < e−aT
1667
+ can be shown to make a negligible contribution to ⟨N⟩,
1668
+ eq. (3.56). Therefore, we may approximate I as
1669
+ I ∼ Θ(θ2 − 4e−aT )
1670
+ exp(aT )θ2/(4a)
1671
+
1672
+ −1/a
1673
+ du
1674
+ eiωu
1675
+
1676
+ a2u2 + sin2 θ
1677
+ (3.70)
1678
+ where
1679
+ Θ(x) ≡
1680
+
1681
+ 0
1682
+ for x < 0
1683
+ 1
1684
+ for x ≥ 0.
1685
+ (3.71)
1686
+ For 0 < ω < 4ae−aT /θ2, we may bound I by replacing
1687
+ eiωu by 1. The integral can then be evaluated explic-
1688
+ itly, and it can be shown that for aT ≫ 1, the con-
1689
+ tribution to ⟨N⟩ from this frequency range is negligi-
1690
+ ble. For ω > 4ae−aT /θ2, the integrand is oscillatory for
1691
+ u > exp(aT)θ2/(4a), and, for aT ≫ 1, we will make neg-
1692
+ ligible error in our estimate of ⟨N⟩ if we replace the upper
1693
+ limit of eq. (3.70) by ∞. We will also make a negligible
1694
+ error by replacing the lower limit by 0. Thus, for aT ≫ 1,
1695
+
1696
+ 13
1697
+ we may approximate I as
1698
+ I ∼ Θ(θ2−4e−aT )Θ(ω−4ae−aT /θ2)
1699
+
1700
+
1701
+ 0
1702
+ du
1703
+ eiωu
1704
+
1705
+ a2u2 + sin2 θ
1706
+ .
1707
+ (3.72)
1708
+ Evaluating the integral we obtain
1709
+ I ∼ 1
1710
+ aΘ(θ2 − 4e−aT )Θ(ω − 4ae−aT /θ2)
1711
+ �1
1712
+ 2iπI0(sin θω/a)
1713
+ +K0(sin θω/a) − 1
1714
+ 2iπLLL0(sin θω/a)
1715
+
1716
+ (3.73)
1717
+ where I0, K0 are Bessel functions and LLL0 is a Struve
1718
+ function. This expression is highly suppressed for ω > a/θ,
1719
+ so we can expand in θω/a and truncate the function above
1720
+ ω = a/θ to obtain,
1721
+ I ∼ −1
1722
+ aΘ(1−θω/a)Θ(θ2−4e−aT )Θ(ω−4ae−aT /θ2) ln (θω/a) .
1723
+ (3.74)
1724
+ Note that the restrictions ω < a/θ, and θ > 2e−aT/2 im-
1725
+ ply a frequency cutoff at ω ∼ aeaT/2/2. By eqs.(3.74) and
1726
+ (3.64), the frequency spectrum of ˆ
1727
+ AA goes as ω ln(ω/a)
1728
+ up to this cutoff, i.e., the spectrum is “hard” and becomes
1729
+ increasingly so for large T. This contrasts with the in-
1730
+ creasingly “soft” spectrum on the Rindler horizon, which
1731
+ goes as 1/ω down to a low frequency cutoff ∼ 1/V ∝ e−aT .
1732
+ Thus, the “soft horizon photons” from the Rindler per-
1733
+ spective are “hard” photons from the inertial perspective.
1734
+ From eq. (3.56) for ⟨N⟩ together with our expression
1735
+ eq. (3.64) for ˆ
1736
+ AA and the expression eq. (3.74) that we
1737
+ have just derived for I, we obtain
1738
+ ⟨N⟩ ∼
1739
+ �qd
1740
+ a
1741
+ �2 �
1742
+ dωdθ θ3ω3
1743
+
1744
+ ln ωθ
1745
+ a
1746
+ �2
1747
+ (3.75)
1748
+ where the region of ω-θ integration is determined by the Θ-
1749
+ functions appearing in eq. (3.74) as well as the geometrical
1750
+ restriction θ ≲ 1. We can break up this region into the
1751
+ portion with ω ≤ a and the portion with ω > a. Since
1752
+ the region with ω ≤ a and θ ≲ 1 is bounded and the
1753
+ integrand of eq. (3.75) is bounded in this region, the
1754
+ contribution to ⟨N⟩ from ω ≲ a is bounded by a constant
1755
+ that is independent of T. We may therefore discard this
1756
+ contribution. In the region ω > a, the third Θ-function
1757
+ in eq. (3.74) is redundant, and the integration region is
1758
+ a ≤ω≤ aeaT/2/2
1759
+ (3.76)
1760
+ 2e−aT/2 ≤θ≤ a
1761
+ ω .
1762
+ (3.77)
1763
+ For aT ≫ 1, we will make negligible error by replacing
1764
+ the lower limit of θ by 0. We thereby obtain
1765
+ ⟨N⟩ ∼
1766
+ �qd
1767
+ a
1768
+ �2 a exp(aT/2)/2
1769
+
1770
+ a
1771
+
1772
+ a/ω
1773
+
1774
+ 0
1775
+ dθ θ3ω3
1776
+
1777
+ ln ωθ
1778
+ a
1779
+ �2
1780
+ .
1781
+ (3.78)
1782
+ Making the change of variables from θ to
1783
+ x = ω
1784
+ a θ
1785
+ (3.79)
1786
+ we find that the θ-integral becomes
1787
+ a/ω
1788
+
1789
+ 0
1790
+ dθ θ3ω3
1791
+
1792
+ ln ωθ
1793
+ a
1794
+ �2
1795
+ = a
1796
+ ω a3
1797
+ 1
1798
+
1799
+ 0
1800
+ dx x3(ln x)2 ∼ a4
1801
+ ω .
1802
+ (3.80)
1803
+ Thus, we obtain
1804
+ ⟨N⟩ ∼
1805
+ �qd
1806
+ a
1807
+ �2
1808
+ a4
1809
+ a exp(aT/2)/2
1810
+
1811
+ a
1812
+
1813
+ ω
1814
+ ∼ a2q2d2 ln[exp(aT/2)]
1815
+ ∼ a3q2d2T.
1816
+ (3.81)
1817
+ This estimate agrees with eq. (3.23).
1818
+ Thus, we have succeeded—with considerable effort!—in
1819
+ our goal of deriving the decoherence of Alice’s superpo-
1820
+ sition by entirely conventional means. It is notable how
1821
+ much simpler the calculation of sec. 3.1 was compared to
1822
+ the calculation that we have just completed.
1823
+ 4.
1824
+ COSMOLOGICAL HORIZONS DECOHERE
1825
+ QUANTUM SUPERPOSITIONS
1826
+ In this section, we apply our analysis to de Sitter space-
1827
+ time.
1828
+ The de Sitter metric in a static patch is given
1829
+ by
1830
+ ds2 = −f(r)dt2 + f(r)−1dr2 + r2qABdxAdxB
1831
+ (4.1)
1832
+ where, in this section, xA are angular coordinates on the
1833
+ 2-sphere, qAB is the unit round metric on the 2-sphere,
1834
+ and
1835
+ f(r) = 1 − r2/R2
1836
+ H
1837
+ (4.2)
1838
+ where RH (the “Hubble radius”) is a constant.
1839
+ The
1840
+ coordinate singularity at r = RH corresponds to the
1841
+ “cosmological horizon,” which is a Killing horizon of the
1842
+ static Killing field (∂/∂t)a. The relation between “affine
1843
+ time,” V , and “Killing time,” v, on the future cosmological
1844
+ horizon is
1845
+ V = ev/RH.
1846
+ (4.3)
1847
+ The general analysis of sec. 2 applies to the decoherence
1848
+ of a static superposition in de Sitter spacetime. The esti-
1849
+ mates of the decoherence due to emission of soft photons
1850
+ and gravitons through the cosmological horizon when Al-
1851
+ ice keeps the superposition present for a time T can be
1852
+ made in exact parallel with the analysis of sec. 3 in the
1853
+ Rindler case and [14] in the black hole case. The only
1854
+ noteworthy new ingredient in de Sitter spacetime is that
1855
+
1856
+ 14
1857
+ the worldline r = 0 is an orbit of the static Killing field
1858
+ that is inertial, i.e., non-accelerating. We now estimate
1859
+ the decoherence of a spatial superposition created in Al-
1860
+ ice’s lab at r = 0 and thereby show that decoherence will
1861
+ occur even though Alice’s lab is not accelerating.
1862
+ By Gauss’ law, a point charge placed at r = 0 will give
1863
+ rise to a radial electric field EU on the future cosmological
1864
+ horizon given by
1865
+ EU ∼
1866
+ q
1867
+ R2
1868
+ H
1869
+ (4.4)
1870
+ where EU = Fabℓanb on the horizon with na = (∂/∂V )a
1871
+ tangent to the affinely parametrized null generators of
1872
+ the horizon and ℓa = (∂/∂U)a a radial null vector with
1873
+ naℓa = −1. The change in the electric field on the horizon
1874
+ resulting from a displacement of the charge to r = d ≪
1875
+ RH is
1876
+ ∆EU ∼ qd
1877
+ R3
1878
+ H
1879
+ .
1880
+ (4.5)
1881
+ By paralleling the steps that led to eq. (3.18) above, we
1882
+ find that the change in the tangential components of the
1883
+ vector potential at the horizon is
1884
+ |∆AA| ≡
1885
+
1886
+ R−2
1887
+ H qAB∆AA∆AB
1888
+ �1/2 ∼ qd
1889
+ R2
1890
+ H
1891
+ .
1892
+ (4.6)
1893
+ By paralleling the steps that led to eq. (3.23)—assuming
1894
+ that the electromagnetic field is initially in the de Sitter
1895
+ invariant vacuum (see footnote 7)—we obtain the estimate
1896
+ ⟨N⟩ ∼ q2d2
1897
+ R3
1898
+ H
1899
+ T
1900
+ (de Sitter, EM) .
1901
+ (4.7)
1902
+ Thus, restoring constants, the decoherence time due to
1903
+ the presence of the cosmological horizon is
1904
+ TD ∼ ℏϵ0R3
1905
+ H
1906
+ q2d2
1907
+ (de Sitter, EM) .
1908
+ (4.8)
1909
+ Since d ≪ RH, the decoherence time will be much larger
1910
+ than the Hubble time RH/c unless q is extremely large
1911
+ relative to the Planck charge qP ≡ √ϵ0ℏc. Nevertheless,
1912
+ we see that decoherence does occur despite the fact that
1913
+ Alice’s lab is inertial.
1914
+ A similar analysis applies in the gravitational case for
1915
+ a spatial superposition of a massive particle in Alice’s lab
1916
+ at r = 0. In parallel with the derivation given in sec. 3.1
1917
+ above, we find
1918
+ ⟨N⟩ ∼ m2d4
1919
+ R5
1920
+ H
1921
+ T
1922
+ (de Sitter, GR)
1923
+ (4.9)
1924
+ which leads to a decoherence time
1925
+ T GR
1926
+ D
1927
+
1928
+ ℏR5
1929
+ H
1930
+ Gm2d4
1931
+ (de Sitter, GR) .
1932
+ (4.10)
1933
+ ACKNOWLEDGMENTS
1934
+ D.L.D. acknowledges support as a Fannie and John
1935
+ Hertz Foundation Fellow holding the Barbara Ann Cana-
1936
+ van Fellowship and as an Eckhardt Graduate Scholar
1937
+ in the Physical Sciences Division at the University of
1938
+ Chicago. This research was supported in part by NSF
1939
+ Grant No. 21-05878 to the University of Chicago.
1940
+ [1] S. Bose, A. Mazumdar, G. W. Morley, H. Ulbricht,
1941
+ M. Toroš, M. Paternostro, A. Geraci, P. Barker, M. S.
1942
+ Kim, and G. Milburn, Spin Entanglement Witness for
1943
+ Quantum Gravity, Phys. Rev. Lett. 119, 240401 (2017),
1944
+ arXiv:1707.06050 [quant-ph].
1945
+ [2] C. Marletto and V. Vedral, Gravitationally-induced en-
1946
+ tanglement between two massive particles is su���cient
1947
+ evidence of quantum effects in gravity, Phys. Rev. Lett.
1948
+ 119, 240402 (2017), arXiv:1707.06036 [quant-ph].
1949
+ [3] A. Belenchia, R. M. Wald, F. Giacomini, E. Castro-Ruiz,
1950
+ v. Brukner, and M. Aspelmeyer, Quantum Superposition
1951
+ of Massive Objects and the Quantization of Gravity, Phys.
1952
+ Rev. D 98, 126009 (2018), arXiv:1807.07015 [quant-ph].
1953
+ [4] M. Christodoulou and C. Rovelli, On the possibility of lab-
1954
+ oratory evidence for quantum superposition of geometries,
1955
+ Physics Letters B 792, 64 (2019).
1956
+ [5] F. Giacomini, E. Castro-Ruiz, and v. Brukner, Quantum
1957
+ mechanics and the covariance of physical laws in quan-
1958
+ tum reference frames, Nature Commun. 10, 494 (2019),
1959
+ arXiv:1712.07207 [quant-ph].
1960
+ [6] C. Gonzalez-Ballestero, M. Aspelmeyer, L. Novotny,
1961
+ R. Quidant, and O. Romero-Isart, Levitodynamics: Lev-
1962
+ itation and control of microscopic objects in vacuum,
1963
+ Science 374, 3027 (2021), arXiv:2111.05215 [quant-ph].
1964
+ [7] D. L. Danielson, G. Satishchandran, and R. M. Wald,
1965
+ Gravitationally mediated entanglement: Newtonian field
1966
+ versus gravitons, Phys. Rev. D 105, 086001 (2022),
1967
+ arXiv:2112.10798 [quant-ph].
1968
+ [8] D. Carney, Newton, entanglement, and the graviton, Phys.
1969
+ Rev. D 105, 024029 (2022), arXiv:2108.06320 [quant-ph].
1970
+ [9] M.
1971
+ Christodoulou,
1972
+ A.
1973
+ Di
1974
+ Biagio,
1975
+ M.
1976
+ Aspelmeyer,
1977
+ v. Brukner, C. Rovelli, and R. Howl, Locally mediated en-
1978
+ tanglement through gravity from first principles, (2022),
1979
+ arXiv:2202.03368 [quant-ph].
1980
+ [10] D. Carney, Y. Chen, A. Geraci, H. Müller, C. D. Panda,
1981
+ P. C. E. Stamp, and J. M. Taylor, Snowmass 2021
1982
+ White Paper: Tabletop experiments for infrared quan-
1983
+ tum gravity, in 2022 Snowmass Summer Study (2022)
1984
+ arXiv:2203.11846 [gr-qc].
1985
+ [11] T. Feng and V. Vedral, Amplification of gravitationally
1986
+ induced entanglement, Phys. Rev. D 106, 066013 (2022),
1987
+ arXiv:2202.09737 [quant-ph].
1988
+ [12] R. Zhou, R. J. Marshman, S. Bose, and A. Mazum-
1989
+ dar, Catapulting towards massive and large spatial quan-
1990
+ tum superposition, Phys. Rev. Res. 4, 043157 (2022),
1991
+ arXiv:2206.04088 [quant-ph].
1992
+
1993
+ 15
1994
+ [13] C. Overstreet, J. Curti, M. Kim, P. Asenbaum, M. A.
1995
+ Kasevich, and F. Giacomini, Inference of gravitational
1996
+ field superposition from quantum measurements, (2022),
1997
+ arXiv:2209.02214 [quant-ph].
1998
+ [14] D. L. Danielson, G. Satishchandran, and R. M. Wald,
1999
+ Black holes decohere quantum superpositions, Int. J. Mod.
2000
+ Phys. D 31, 2241003 (2022), arXiv:2205.06279 [hep-th].
2001
+ [15] B. S. Kay and R. M. Wald, Theorems on the Unique-
2002
+ ness and Thermal Properties of Stationary, Nonsingular,
2003
+ Quasifree States on Space-Times with a Bifurcate Killing
2004
+ Horizon, Phys. Rept. 207, 49 (1991).
2005
+ [16] S. W. Hawking and G. F. R. Ellis, The Large Scale Struc-
2006
+ ture of Space-Time, Cambridge Monographs on Mathe-
2007
+ matical Physics (Cambridge University Press, 2011).
2008
+ [17] S. W. Hawking, Black holes in general relativity, Commun.
2009
+ Math. Phys. 25, 152 (1972).
2010
+ [18] S. Alexakis, A. D. Ionescu, and S. Klainerman, Hawking’s
2011
+ local rigidity theorem without analyticity, Geometric and
2012
+ Functional Analysis 20, 845 (2010), arXiv:0902.1173 [gr-
2013
+ qc].
2014
+ [19] B. Allen, Vacuum States in de Sitter Space, Phys. Rev.
2015
+ D 32, 3136 (1985).
2016
+ [20] B. Allen and T. Jacobson, Vector Two Point Functions
2017
+ in Maximally Symmetric Spaces, Commun. Math. Phys.
2018
+ 103, 669 (1986).
2019
+ [21] B. Allen, The Graviton Propagator in De Sitter Space,
2020
+ Phys. Rev. D 34, 3670 (1986).
2021
+ [22] R. M. Wald, Quantum Field Theory in Curved Space-
2022
+ Time and Black Hole Thermodynamics, Chicago Lectures
2023
+ in Physics (University of Chicago Press, Chicago, IL,
2024
+ 1995).
2025
+ [23] C. Yang and D. Feldman, The S Matrix in the Heisenberg
2026
+ Representation, Phys. Rev. 79, 972 (1950).
2027
+ [24] W. G. Unruh and R. M. Wald, What happens when an
2028
+ accelerating observer detects a rindler particle, Phys. Rev.
2029
+ D 29, 1047 (1984).
2030
+ [25] E. T. Whittaker, On electric phenomena in gravitational
2031
+ fields, Proc. Roy. Soc. Lond. A, 116, 720 (1927).
2032
+ [26] H. Bondi and T. Gold, The field of a uniformly accel-
2033
+ erated charge, with special reference to the problem of
2034
+ gravitational acceleration, Proc. Roy. Soc. Lond. A 229,
2035
+ 416 (1955).
2036
+ [27] F. Rohrlich, The equations of motion of classical charges,
2037
+ Annals of Physics 13, 93 (1961).
2038
+ [28] D. G. Boulware, Radiation from a uniformly accelerated
2039
+ charge, Annals of Physics 124, 169 (1980).
2040
+ [29] H. Padmanabhan and T. Padmanabhan, Aspects of elec-
2041
+ trostatics in a weak gravitational field, Gen. Rel. Grav.
2042
+ 42, 1153 (2010), arXiv:0910.0926 [gr-qc].
2043
+ [30] E. Eriksen and Ø. Grøn, Electrodynamics of hyperboli-
2044
+ cally accelerated charges v. the field of a charge in the
2045
+ rindler space and the milne space, Annals of Physics 313,
2046
+ 147 (2004).
2047
+ [31] L. Bieri and D. Garfinkle, An electromagnetic analogue
2048
+ of gravitational wave memory, Class. Quant. Grav. 30,
2049
+ 195009 (2013), arXiv:1307.5098 [gr-qc].
2050
+ [32] C. Dappiaggi, V. Moretti, and N. Pinamonti, Hadamard
2051
+ States From Light-like Hypersurfaces (Springer, Cham,
2052
+ 2017) arXiv:1706.09666 [math-ph].
2053
+ [33] A. Ashtekar, Asymptotic Quantization: Based On 1984
2054
+ Naples Lectures, Monographs and Textbooks in Physical
2055
+ Science (Bibliopolis, Naples, Italy, 1987).
2056
+ [34] A. Strominger, Lectures on the Infrared Structure of Grav-
2057
+ ity and Gauge Theory (Princeton University Press, 2018)
2058
+ arXiv:1703.05448 [hep-th].
2059
+ [35] G. Satishchandran and R. M. Wald, Asymptotic behavior
2060
+ of massless fields and the memory effect, Phys. Rev. D
2061
+ 99, 084007 (2019), arXiv:1901.05942 [gr-qc].
2062
+ [36] E. Joos and H. D. Zeh, The emergence of classical proper-
2063
+ ties through interaction with the environment, Zeitschrift
2064
+ für Physik B Condensed Matter 59, 223 (1985).
2065
+ [37] M. R. Gallis and G. N. Fleming, Environmental and
2066
+ spontaneous localization, Phys. Rev. A 42, 38 (1990).
2067
+ [38] L. Diósi, Quantum master equation of a particle in a gas
2068
+ environment, Europhysics Letters 30, 63 (1995).
2069
+ [39] K. Hornberger and J. E. Sipe, Collisional decoherence
2070
+ reexamined, Phys. Rev. A 68, 012105 (2003).
2071
+
1dAyT4oBgHgl3EQfPfaV/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
3NA0T4oBgHgl3EQfNP9P/content/2301.02143v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b46f303c0fc990475fe91d53f87ab2344684af6c2cc932630ab2e0134ccda28
3
+ size 4296161
3NA0T4oBgHgl3EQfNP9P/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fc374b1374e9cab45470eaad4a0db42c0839ba2739c259ba63b85392d6739db
3
+ size 3670061
3NFAT4oBgHgl3EQfEBxO/content/2301.08419v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a2569df1bac0621cda7fe968294e0d41b2e66f1199c63ff8600f0c7327e6890
3
+ size 714369
3NFAT4oBgHgl3EQfEBxO/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffd119dd3e4bf124c248198799b7f835c6197d111dfda66316be67764ad98c2e
3
+ size 3604525
3tFRT4oBgHgl3EQfojeI/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3741779a19a468e659dd0b120f02a4acdb41a0c70514f12def938946eead8b55
3
+ size 283986
4NE1T4oBgHgl3EQfAgLJ/content/2301.02841v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e2c5e52ca7e9b5a4c884da031d5ab6dd91f198418048b556edcc08f51cdf008
3
+ size 456256
4NE1T4oBgHgl3EQfAgLJ/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6099efa82abe5fc76b82aaebca286eff835b8633a7d96fcede38abb45ad39c88
3
+ size 149627
4dFIT4oBgHgl3EQf6yuB/content/2301.11395v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbb09a19f1add832cade81dab8c459cb4a858afb181a7d32429d513ddddee089
3
+ size 6626500
59E1T4oBgHgl3EQfmwQa/content/2301.03300v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09971f5cda51446ac034eccdebe4f6527f3548ed3e6e593ae9a62a65f1528aa0
3
+ size 1490636
59E1T4oBgHgl3EQfmwQa/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c37edbc394c7846d6e1a8da27dca93d9f7d96566d8c940a96232f1af171d8627
3
+ size 4653101
59E1T4oBgHgl3EQfmwQa/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05108be771636fd648287e3724b851dcdd3b89bb5e9f4515817f57b08a9534d5
3
+ size 156562
69AyT4oBgHgl3EQf2vl7/content/2301.00756v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c1ada22bafe201ffb11009139e594371e6642ff67651c36e8d1f6c972e2ff25
3
+ size 21767940
6dE4T4oBgHgl3EQfcQxJ/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:257110295baee25f12bd91bc1b7473f34fb1b240eabc0f8dd3635cc9a1d8951d
3
+ size 2752557
7NFAT4oBgHgl3EQfoB2V/content/tmp_files/2301.08632v1.pdf.txt ADDED
@@ -0,0 +1,1161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Generative Slate Recommendation with Reinforcement
2
+ Learning
3
+ Romain Deffayet
4
+ Naver Labs Europe
5
+ Meylan, France
6
+ University of Amsterdam
7
+ Amsterdam, The Netherlands
8
+ romain.deffayet@naverlabs.com
9
+ Thibaut Thonet
10
+ Naver Labs Europe
11
+ Meylan, France
12
+ thibaut.thonet@naverlabs.com
13
+ Jean-Michel Renders
14
+ Naver Labs Europe
15
+ Meylan, France
16
+ jean-michel.renders@naverlabs.com
17
+ Maarten de Rijke
18
+ University of Amsterdam
19
+ Amsterdam, The Netherlands
20
+ m.derijke@uva.nl
21
+ ABSTRACT
22
+ Recent research has employed reinforcement learning (RL) algo-
23
+ rithms to optimize long-term user engagement in recommender
24
+ systems, thereby avoiding common pitfalls such as user boredom
25
+ and filter bubbles. They capture the sequential and interactive na-
26
+ ture of recommendations, and thus offer a principled way to deal
27
+ with long-term rewards and avoid myopic behaviors. However, RL
28
+ approaches are intractable in the slate recommendation scenario
29
+ – where a list of items is recommended at each interaction turn –
30
+ due to the combinatorial action space. In that setting, an action
31
+ corresponds to a slate that may contain any combination of items.
32
+ While previous work has proposed well-chosen decompositions
33
+ of actions so as to ensure tractability, these rely on restrictive and
34
+ sometimes unrealistic assumptions. Instead, in this work we pro-
35
+ pose to encode slates in a continuous, low-dimensional latent space
36
+ learned by a variational auto-encoder. Then, the RL agent selects
37
+ continuous actions in this latent space, which are ultimately de-
38
+ coded into the corresponding slates. By doing so, we are able to
39
+ (i) relax assumptions required by previous work, and (ii) improve
40
+ the quality of the action selection by modeling full slates instead
41
+ of independent items, in particular by enabling diversity. Our ex-
42
+ periments performed on a wide array of simulated environments
43
+ confirm the effectiveness of our generative modeling of slates over
44
+ baselines in practical scenarios where the restrictive assumptions
45
+ underlying the baselines are lifted. Our findings suggest that repre-
46
+ sentation learning using generative models is a promising direction
47
+ towards generalizable RL-based slate recommendation.
48
+ CCS CONCEPTS
49
+ • Information systems → Recommender systems.
50
+ Permission to make digital or hard copies of all or part of this work for personal or
51
+ classroom use is granted without fee provided that copies are not made or distributed
52
+ for profit or commercial advantage and that copies bear this notice and the full citation
53
+ on the first page. Copyrights for components of this work owned by others than the
54
+ author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or
55
+ republish, to post on servers or to redistribute to lists, requires prior specific permission
56
+ and/or a fee. Request permissions from permissions@acm.org.
57
+ WSDM ’23, February 27-March 3, 2023, Singapore, Singapore
58
+ © 2023 Copyright held by the owner/author(s). Publication rights licensed to ACM.
59
+ ACM ISBN 978-1-4503-9407-9/23/02...$15.00
60
+ https://doi.org/10.1145/3539597.3570412
61
+ KEYWORDS
62
+ Slate recommendation, Reinforcement learning, Variational auto-
63
+ encoder
64
+ ACM Reference Format:
65
+ Romain Deffayet, Thibaut Thonet, Jean-Michel Renders, and Maarten de
66
+ Rijke. 2023. Generative Slate Recommendation with Reinforcement Learn-
67
+ ing. In Proceedings of the Sixteenth ACM International Conference on Web
68
+ Search and Data Mining (WSDM ’23), February 27-March 3, 2023, Singa-
69
+ pore, Singapore. ACM, New York, NY, USA, 9 pages. https://doi.org/10.1145/
70
+ 3539597.3570412
71
+ 1
72
+ INTRODUCTION
73
+ Ubiquitous in online services, recommender systems (RSs) play a
74
+ key role personalization by catering to users’ identified tastes. Ide-
75
+ ally, they also diversify their offerings and help users discover new
76
+ interests [19]. In the latter case, RSs take on an active role, which
77
+ means that recommendations influence future user behavior, and
78
+ therefore their effects on users must be explicitly controlled. Such
79
+ effects can be detrimental: users may get bored if too many simi-
80
+ lar recommendations are made, and it has been well-documented
81
+ that users can end up in so-called filter bubbles or echo chambers
82
+ [4, 13, 28]. From the perspective of the online platform or the con-
83
+ tent provider, user boredom leads to poor retention and conversion
84
+ rates [17], while filter bubbles raise fairness and ethical issues for
85
+ which providers can be held accountable [26]. Conversely, RSs can
86
+ also positively impact users, for example, when users get interested
87
+ in new, unexpected topics or when the RS offers a fair represen-
88
+ tation of available options [1]. It is natural, therefore, to balance
89
+ exploitation (i.e., sticking to the known interests of the user) and
90
+ exploration (i.e., further probing the user’s interests) so as to avoid
91
+ always recommending similar items, and encourage recommenda-
92
+ tions that boost future engagement. The reinforcement learning
93
+ (RL) literature has proposed models and algorithms that aim to
94
+ optimize long-term metrics by acknowledging the causal effect of
95
+ recommendations on users [8, 36].
96
+ In this work we consider the common scenario of slate recom-
97
+ mendation [8, 18, 31], which comes with specific challenges. At each
98
+ interaction turn, a slate recommender system recommends a list of
99
+ items from the collection, and the user interacts with zero, one or
100
+ several of those items. As a consequence, users may not examine
101
+ arXiv:2301.08632v1 [cs.IR] 20 Jan 2023
102
+
103
+ WSDM ’23, February 27-March 3, 2023, Singapore, Singapore
104
+ Romain Deffayet, Thibaut Thonet, Jean-Michel Renders, & Maarten de Rijke
105
+ all the recommended items, which leads to biases in the observed
106
+ interactions along with a complex interplay between items in the
107
+ same slate [27]. More importantly, the size of the action space, i.e.,
108
+ the number of possible slates, prohibits the use of off-the-shelf RL
109
+ approaches [12]. Indeed, as slate recommendation is a combinato-
110
+ rial problem, the evaluation of all actions by the RL agent through
111
+ trial and error is simply intractable: even with as few as 1, 000
112
+ items in the collection, the number of possible slates of size 10 is
113
+ approximately 9.6 × 1029. We propose to tackle this problem in
114
+ the context of a practical scenario, (S), which fits the second-stage
115
+ ranking phase [11] of many content recommendation platforms:
116
+ (S) The collection contains around a thousand items, and at each
117
+ turn of interaction the proposed model must select and rank
118
+ 10 items to be presented to the user.
119
+ All our tractability and feasibility statements in this paper must
120
+ therefore be understood through the lens of this scenario (S).
121
+ To reduce the prohibitively large size of the combinatorial action
122
+ space, previous studies have proposed to decompose slates in a
123
+ tractable manner [8, 18, 31] – but at the cost of restrictive assump-
124
+ tions, e.g., concerning mutual independence of items in the slate,
125
+ knowledge of the user click model, availability of high-quality item
126
+ embeddings, or that at most one item per slate is clicked.
127
+ In contrast, in this work we propose to first learn a continuous,
128
+ low-dimensional latent representation of actions (i.e., slates), and
129
+ then let the agent take actions within this latent space during its
130
+ training phase. In practice, we obtain the latent representations
131
+ by introducing a generative modeling of slates (GeMS) based on a
132
+ variational auto-encoder (VAE) pre-trained on a dataset of observed
133
+ slates and clicks, collected from a previous version of the recom-
134
+ mender system. Such a dataset is usually available in industrial
135
+ recommendation settings. Therefore, we do not rely on restrictive
136
+ assumptions, and the fact that we represent full slates enables the
137
+ agent to improve the quality of its recommendations, instead of
138
+ using individual item representations.
139
+ Our contributions can be summarized as follows:
140
+ • We propose GeMS, a novel way to represent actions in RL for slate
141
+ recommendation, by pre-training a VAE on slates and associated
142
+ clicks. Unlike previous methods, GeMS is free of overly restrictive
143
+ assumptions and only requires logged interaction data.
144
+ • We provide a unified terminology to classify existing slate recom-
145
+ mendation approaches based on their underlying assumptions.
146
+ • We show on a wide array of simulated environments that previ-
147
+ ous methods underperform when their underlying assumptions
148
+ are lifted (i.e., in practical settings), while GeMS allows us to re-
149
+ cover highly rewarding policies without restrictive assumptions.
150
+ • To support the reproducibility of this work, we publicly release
151
+ the code for our approach, baselines and simulator.1
152
+ 2
153
+ RELATED WORK
154
+ Long-term user engagement. Several studies have documented
155
+ the misalignment between short-term benefits and long-term user
156
+ engagement [1, 17], as well as the tendency of traditional recom-
157
+ mender systems to be detrimental to long-term outcomes [29]. Such
158
+ myopic behavior is known to cause boredom and decrease user re-
159
+ tention [1], which is prejudicial for both users and content providers.
160
+ 1https://github.com/naver/gems.
161
+ This behavior also raises concerns such as the rich-get-richer issue
162
+ [8] and feeding close-mindedness [29]. Some previous studies tried
163
+ to counter this effect by explicitly maximizing diversity [33] or
164
+ by finding metrics correlated with long-term outcomes [2, 7]. In
165
+ contrast, in our work we directly optimize long-term metrics by
166
+ using reinforcement learning algorithms [8, 16, 36].
167
+ Reinforcement learning for slate recommendation. The prob-
168
+ lem of slate recommendation with reinforcement learning (RL) has
169
+ been tackled in several previous studies, although the settings in
170
+ which solutions were tested vary and are sometimes not applicable
171
+ to our scenario (S). Chen et al. [8] and Bai et al. [3] assume a simple
172
+ user click model and independence of items within a slate in order
173
+ to reduce the problem to choosing individual items, which they
174
+ solve with the REINFORCE algorithm on a SoftMax policy. Ie et al.
175
+ [18] assume knowledge of the user’s click model and item relevance,
176
+ which allows them to perform combinatorial optimization for the
177
+ computation of Q-values. Sunehag et al. [31] take a continuous
178
+ action in the product space of item embeddings, i.e., one embed-
179
+ ding per slot in the slate, and pre-select nearest-neighbor items
180
+ for full-slate Q-function evaluation. Chen et al. [9] use properties
181
+ of the optimal Q-function to propose an elegant decomposition
182
+ of it and generate optimal slates autoregressively. We detail the
183
+ assumptions made by each of these approaches in Section 4, but
184
+ we had to discard [9] due to its prohibitively heavy computation: it
185
+ requires a number of neural network forward passes proportional
186
+ to the slate size times the number of items in the collection (i.e.,
187
+ 10,000 passes in scenario (S)), for each training or inference step.
188
+ Our proposed approach differs from previous work because we
189
+ do not manually decompose the slates using tractable heuristics
190
+ based on restrictive assumptions, but instead approximate the slate
191
+ generation process with a deep generative model. Our proposed
192
+ framework only has a single requirement, viz. the availability of
193
+ logged data with slates and associated clicks, as we will detail in
194
+ Section 4. The latter assumption is by no means restrictive as such
195
+ logged data is readily available in common industrial recommenda-
196
+ tion settings.
197
+ Latent action representations. While learning a latent repre-
198
+ sentation of states is very common in the RL literature [14, 30],
199
+ few studies have tackled the problem of latent action representa-
200
+ tion. Chandak et al. [6] train an action generation function in a
201
+ supervised manner, by learning to predict the action taken from
202
+ a pair of successive states. This is not directly applicable in our
203
+ case, because the true user state is not observable and successive
204
+ observations are simply clicks that appear to be too weak of a signal
205
+ to infer the slates leading to these clicks. Botteghi et al. [5] learn a
206
+ state-action world model and jointly train latent state and action
207
+ representations in a model-based fashion.
208
+ Learning a world model in our setting essentially amounts to the
209
+ latent modeling of slates and clicks (similar to our approach), while
210
+ also conditioning on an internal hidden state.2 The work by Zhou
211
+ et al. [35] is perhaps the closest work to ours, as it uses a variational
212
+ auto-encoder (VAE) to embed actions into a controllable latent space
213
+ before training an RL agent. However, it does not consider slates
214
+ but only simple, atomic actions. In contrast, Jiang et al. [20], Liu
215
+ 2We tried a similar method in pilot experiments, but the additional conditioning only
216
+ deteriorated the results, so we only present the condition-free method in this paper.
217
+
218
+ Generative Slate Recommendation with Reinforcement Learning
219
+ WSDM ’23, February 27-March 3, 2023, Singapore, Singapore
220
+ Figure 1: Our proposed framework for slate recommendation with reinforcement learning. We first pretrain our GeMS model on previously
221
+ collected logged data composed of slates and associated clicks (left), then we use the frozen decoder of GeMS to decode the RL agent’s low-
222
+ dimensional proto-action vector into a slate (right).
223
+ et al. [25] train VAEs to represent slates and their associated clicks,
224
+ but they do not investigate training an RL agent from the learned
225
+ latent representation.
226
+ To the best of our knowledge, we are the first to learn a latent
227
+ representation of slates for RL-based recommendation.
228
+ 3
229
+ METHOD
230
+ 3.1
231
+ Notations and problem definition
232
+ We consider a slate recommendation scenario in which a user inter-
233
+ acts with a recommender system (RS) throughout an episode of 𝑇
234
+ turns. At every turn 𝑡 ∈ {1, . . . ,𝑇 }, the system recommends a slate
235
+ 𝑎𝑡 = (𝑖1
236
+ 𝑡 , . . . ,𝑖𝑘
237
+ 𝑡 ) where (𝑖 𝑗
238
+ 𝑡 )1⩽𝑗⩽𝑘 are items from the collection I
239
+ and 𝑘 is the size of the slate set by the RS designer. The user can
240
+ click on zero, one or several items in the slate and the resulting
241
+ click vector 𝑐𝑡 = (𝑐1
242
+ 𝑡 , . . . ,𝑐𝑘
243
+ 𝑡 ),𝑐 𝑗
244
+ 𝑡 ∈ {0, 1} is returned to the RS.
245
+ The problem of maximizing the cumulative number of clicks
246
+ over an episode can be modeled as a partially observable Markov
247
+ decision process (POMDP) M𝑃 = (S, O, A, 𝑅,𝑇, Ω) defined by:
248
+ • A set of states S, which represent the unobservable state of the
249
+ user’s mind;
250
+ • A set of observations O accessible to the system. Here, obser-
251
+ vations are clicks from the previous interaction (𝑜𝑡 = 𝑐𝑡−1) and
252
+ therefore lie in the space of binary vectors of size 𝑘: O = {0, 1}𝑘;
253
+ • A set of actions A, which is the set of all possible slates composed
254
+ of items from the collection, i.e., |A| =
255
+ |I |!
256
+ ( |I |−𝑘)!;
257
+ • A reward function 𝑅 : S × A → R, which we set to 𝑅(𝑠𝑡,𝑎𝑡) =
258
+ 𝑟𝑡 = �𝑘
259
+ 𝑗=1 𝑐 𝑗
260
+ 𝑡 in order to reflect our long-term objective of maxi-
261
+ mizing the cumulative number of clicks; and
262
+ • A set of unknown transition and observation probabilities, re-
263
+ spectively 𝑇 : S × A × S → [0, 1] and Ω : S × A × O → [0, 1],
264
+ as well as a distribution over initial states 𝑆1 : S → [0, 1].
265
+ Due to the unobserved nature of the true user state in the POMDP, it
266
+ is common to train agents by relying on a proxy of the state inferred
267
+ from available observations. The function that provides such proxy
268
+ is traditionally referred to as the belief encoder [21]. We also define
269
+ the concepts of a policy 𝜋 : S × A → [0, 1] and trajectory 𝜏 =
270
+ (𝑜𝑡,𝑎𝑡,𝑟𝑡)1⩽𝑡⩽𝑇 . In the remainder, we write 𝜏 ∼ 𝜋 to signify that
271
+ we obtain a trajectory by first sampling an initial state 𝑠1 from 𝑆1
272
+ and then recursively sampling actions𝑇 −1 times from the policy 𝜋.
273
+ The goal can now be formulated as finding an optimal policy, i.e., a
274
+ policy maximizing the expected return 𝜋∗ ∈ arg max𝜋 E𝜏∼𝜋 [R(𝜏)]
275
+ with R(𝜏) = �𝑇
276
+ 𝑡=1 𝑟𝑡. Finally, given a state 𝑠 and action 𝑎, we define
277
+ the Q-function 𝑄𝜋 (𝑠,𝑎) = E𝜏∼𝜋,𝑠1=𝑠,𝑎1=𝑎 [R(𝜏)] and V-function
278
+ 𝑉 𝜋 (𝑠) = E𝑎∼𝜋 (𝑠) [𝑄𝜋 (𝑠,𝑎)].
279
+ 3.2
280
+ Overview of the framework
281
+ In our proposed framework, the interactions with the environment,
282
+ i.e., the user, can be described by the following repeated steps:
283
+ (1) The belief encoder summarizes the history of interactions with
284
+ the user into a state vector;
285
+ (2) The agent selects a proto-action based on this state; and
286
+ (3) The ranker (here resulting from a VAE model) decodes this
287
+ proto-action into a slate that is served to the user.
288
+ In the remainder of this section, we first detail our proposed gener-
289
+ ative modeling of slates (GeMS). GeMS is a deep generative model
290
+ that learns a low-dimensional latent space for slates and associated
291
+ clicks – thus constituting a convenient proto-action space for the RL
292
+ agent and allowing for tractable RL without resorting to restrictive
293
+ assumptions as in prior work [3, 8, 18, 31]. Then we describe how
294
+ GeMS is integrated as a ranker in our RL framework and we briefly
295
+ discuss the remaining RL components. This two-step process is
296
+ depicted in Figure 1.
297
+ 3.3
298
+ Generative Modeling of Slates (GeMS)
299
+ In order to instantiate our GeMS model, we propose to train a vari-
300
+ ational auto-encoder (VAE) on a precollected dataset D of logged
301
+ interactions, as illustrated in Figure 1 (left). A VAE aims to learn
302
+ a joint distribution over data samples (i.e., slates and clicks de-
303
+ noted as 𝑎 and 𝑐, respectively) and latent encodings (i.e., proto-
304
+ actions denoted as 𝑧) [22]. To do so, a parameterized distribution
305
+ 𝑝𝜃 (𝑎,𝑐,𝑧) is trained to maximize the marginal likelihood of the data
306
+ 𝑝𝜃 (𝑎,𝑐) =
307
+
308
+ 𝑧 𝑝𝜃 (𝑎,𝑐,𝑧)𝑑𝑧. In practice, due to the intractability of
309
+ this integral, a parameterized distribution 𝑞𝜙 (𝑧|𝑎,𝑐) is introduced
310
+ as a variational approximation of the true posterior 𝑝𝜃 (𝑧|𝑎,𝑐) and
311
+ the VAE is trained by maximizing the evidence lower bound (ELBO):
312
+ LELBO
313
+ 𝜃,𝜙
314
+ =E𝑎,𝑐∼D
315
+
316
+ E𝑧∼𝑞𝜙 (·|𝑎,𝑐) [log 𝑝𝜃 (𝑎,𝑐|𝑧)]−KL
317
+
318
+ 𝑞𝜙 (𝑧|𝑎,𝑐)∥𝑝(𝑧)
319
+ ��
320
+ ,
321
+ where 𝑝(𝑧) is the prior distribution over the latent space, KL is the
322
+ Kullback-Leibler divergence [24], and 𝑧 is a sample from a Gaussian
323
+
324
+ WSDM ’23, February 27-March 3, 2023, Singapore, Singapore
325
+ Romain Deffayet, Thibaut Thonet, Jean-Michel Renders, & Maarten de Rijke
326
+ distribution obtained using the reparameterization trick [22]. The
327
+ distributions 𝑞𝜙 (𝑧|𝑎,𝑐) and 𝑝𝜃 (𝑎,𝑐|𝑧) are usually referred to as the
328
+ encoder and the decoder, respectively.
329
+ The downstream performance of the RL agent we wish to ulti-
330
+ mately learn clearly depends on the upstream ability of the VAE
331
+ to properly reconstruct slates. However, as Liu et al. [25] observe,
332
+ an accurate reconstruction of slates may limit the agent’s capacity
333
+ to satisfy the user’s interests. Indeed, finding high-performance
334
+ continuous control policies requires smoothness and structure in
335
+ the latent space, which may be lacking if too much emphasis is
336
+ given to the reconstruction objective in comparison to the prior
337
+ matching objective enforced by the KL-divergence. Therefore, it
338
+ is necessary to balance reconstruction and controllability, which
339
+ is done by introducing an hyperparameter 𝛽 as weight for the KL
340
+ term in Eq. ??. Moreover, in order to promote additional structure
341
+ in the latent space, we add a click reconstruction term in the loss:
342
+ slates with similar short-term outcomes (i.e., clicks) are grouped
343
+ together during pre-training. Yet, we may want to avoid biasing
344
+ the learned representations towards click reconstruction too much,
345
+ as it may come at the cost of quality of the slate reconstruction.
346
+ Therefore, we introduce a hyperparameter 𝜆 to adjust this second
347
+ trade-off. We show the empirical impact of 𝛽 and 𝜆 in Section 6.3.
348
+ In our implementation, the prior 𝑝(𝑧) is set as a standard Gauss-
349
+ ian distribution N (0, I). The encoder 𝑞𝜙 (𝑧|𝑎,𝑐) is a Gaussian dis-
350
+ tribution with diagonal covariance N (𝜇𝜙 (𝑎,𝑐), diag(𝜎2
351
+ 𝜙 (𝑎,𝑐))), pa-
352
+ rameterized by a multi-layer perceptron (MLP). This MLP inputs
353
+ the concatenation of learnable item embeddings and associated
354
+ clicks over the whole slate, and outputs (𝜇𝜙 (𝑎,𝑐), log 𝜎𝜙 (𝑎,𝑐)). For
355
+ the decoder 𝑝𝜃 (𝑎,𝑐|𝑧), another MLP takes as input the latent sam-
356
+ ple 𝑧, and outputs the concatenation of reconstructed embeddings
357
+ e𝑗
358
+ 𝜃 (𝑧) and click probabilities 𝑝 𝑗,𝑐
359
+ 𝜃 (𝑐𝑗 |𝑧) for each slot 𝑗 in the slate.
360
+ We then derive logits for the item probabilities 𝑝 𝑗,𝑎
361
+ 𝜃 (𝑎𝑗 |𝑧) by taking
362
+ the dot-product of the reconstructed embedding e𝑗
363
+ 𝜃 (𝑧) with the
364
+ embeddings of all items in the collection. For collection items, we
365
+ use the current version of embeddings learned within the encoder,
366
+ but we prevent the gradient from back-propagating to them using
367
+ the stop-gradient operator to avoid potential degenerate solutions.
368
+ In summary, the VAE is pre-trained by maximizing the ELBO on
369
+ the task of reconstructing slates and corresponding clicks, i.e., by
370
+ minimizing LGeMS
371
+ 𝜃,𝜙
372
+ = E𝑎,𝑐∼D [LGeMS
373
+ 𝜃,𝜙
374
+ (𝑎,𝑐)] with:
375
+ LGeMS
376
+ 𝜃,𝜙
377
+ (𝑎,𝑐) =
378
+ slate reconstruction
379
+ ������������������������������������������������������
380
+ 𝑘
381
+ ∑︁
382
+ 𝑗=1
383
+ log 𝑝 𝑗,𝑎
384
+ 𝜃 (𝑎𝑗 |𝑧𝜙 (𝑎,𝑐)) +
385
+ 𝜆
386
+ click reconstruction
387
+ ������������������������������������������������������
388
+ 𝑘
389
+ ∑︁
390
+ 𝑗=1
391
+ log 𝑝 𝑗,𝑐
392
+ 𝜃 (𝑐𝑗 |𝑧𝜙 (𝑎,𝑐)) +
393
+ 𝛽
394
+ KL-divergence
395
+ ������������������������������������������������������������������������
396
+ 𝑑
397
+ ∑︁
398
+ 𝑖=1
399
+
400
+ 𝜎2
401
+ 𝜙,𝑖 + 𝜇2
402
+ 𝜙,𝑖 − log 𝜎𝜙,𝑖 − 1
403
+
404
+ (1)
405
+ where 𝑧𝜙 (𝑎,𝑐) = 𝜇𝜙 (𝑎,𝑐) + diag(𝜎2
406
+ 𝜙 (𝑎,𝑐)) · 𝜖, for 𝜖 ∼ N (0, I). Here,
407
+ 𝑑 is the dimension of the latent space, and 𝛽 and 𝜆 are hyperparam-
408
+ eters controlling the respective weight of the KL term and the click
409
+ reconstruction term. Note that the KL term takes this simple form
410
+ due to the Gaussian assumption on 𝑞𝜙 (𝑧|𝑎,𝑐) and the N (0, I) prior.
411
+ 3.4
412
+ RL agent and belief encoder
413
+ After the pre-training step described in Section 3.3, the parameters
414
+ of GeMS are frozen and we use its decoder as the ranker in our
415
+ RL framework. The RL agent can then be trained to maximize the
416
+ discounted return by taking proto-actions within the VAE’s latent
417
+ space. To generate a slate (𝑖1, . . . ,𝑖𝑘) from the agent’s proto-action
418
+ 𝑧, we take for each slot 𝑗 ∈ {1, . . . ,𝑘} the most likely item according
419
+ to the decoder: 𝑖 𝑗 = arg max𝑖 ∈I 𝑝 𝑗,𝑎
420
+ 𝜙 (𝑖|𝑧).
421
+ Since our focus within the RL framework is on the choice of the
422
+ ranker, we adopt a standard implementation of the belief encoder
423
+ and the agent: the former is modeled by a gated recurrent unit
424
+ (GRU) [10] taking as input the concatenation of item embeddings
425
+ and respective clicks from each slate, and the latter is a soft actor-
426
+ critic (SAC) [15] algorithm. We chose SAC because it is a well-
427
+ established RL algorithm, known for its strong performance and
428
+ data-efficiency in continuous control. Additionally, SAC adds an
429
+ entropy term incentivizing exploration which we have noticed
430
+ during our experiments to be important to attain high performance
431
+ in highly stochastic recommendation environments.
432
+ 4
433
+ BASELINES AND THEIR ASSUMPTIONS
434
+ We evaluate our proposed method against four main baselines
435
+ derived from prior work. In this section, we describe these baselines
436
+ as well the assumptions on user behavior that they formulate in
437
+ order to make the combinatorial problem of slate recommendation
438
+ tractable. By doing so, we are able to compare the assumptions
439
+ made by these baselines and highlight the generality of our method
440
+ in Table 1. Note that we only report from previous studies the
441
+ mechanism used for slate generation, which is the topic of this
442
+ study, and ignore other design choices.
443
+ SoftMax. In [3, 8], the authors reduce the combinatorial problem
444
+ of slate optimization to the simpler problem of item optimization:
445
+ the policy network output is a softmax layer over all items in the
446
+ collection, and items are sampled with replacement to form slates.
447
+ Doing so requires the mild assumption that the Q-value of the slate
448
+ can be linearly decomposed into item-specific Q-values (DQ). But
449
+ more importantly, it also requires two strong assumptions, namely
450
+ users can click on at most one item per slate (1CL) and the returns
451
+ of items in the same slate are mutually independent (MI). Together,
452
+ these assumptions are restrictive, because their conjunction means
453
+ that the click probability of an item in the slate does not depend
454
+ on the item itself. Indeed, having dependent click probabilities
455
+ (to enforce the single click) and independent items in the slate is
456
+ compatible only if click probabilities do not depend on items.
457
+ SlateQ. Ie et al. [18] propose a model-based approach in which
458
+ the click behavior of the user is given, and Q-learning [34] is used
459
+ to plan and approximate users’ dynamic preferences. On top of
460
+ the earlier DQ and 1CL, it requires access to the true relevance and
461
+ click model (CM), which is an unfair advantage compared to other
462
+ methods. For computational efficiency reasons, we adopt the faster
463
+ variant referred to as QL-TT-TS in the original paper.
464
+ TopK. Even though, to the best of our knowledge, no work has
465
+ proposed this approach, we include it in our set of baselines as
466
+
467
+ Generative Slate Recommendation with Reinforcement Learning
468
+ WSDM ’23, February 27-March 3, 2023, Singapore, Singapore
469
+ Table 1: Comparison of assumptions made by prior work. Our
470
+ method only requires access to logged interaction data.
471
+ 1CL
472
+ DQ
473
+ MI
474
+ CM
475
+ SP
476
+ EIB
477
+ LD
478
+ SoftMax [3, 8]
479
+
480
+
481
+
482
+
483
+
484
+
485
+
486
+ SlateQ [18]
487
+
488
+
489
+
490
+
491
+
492
+
493
+
494
+ WkNN [31]
495
+
496
+
497
+
498
+
499
+
500
+
501
+
502
+ TopK
503
+
504
+
505
+
506
+
507
+
508
+
509
+
510
+ GeMS (Ours)
511
+
512
+
513
+
514
+
515
+
516
+
517
+
518
+ it is a natural way to deal with slate recommendation. The agent
519
+ takes continuous actions in the space of item embeddings, and we
520
+ generate slates by taking the 𝑘 items from the collection with the
521
+ closest embeddings to the action, according to a similarity metric
522
+ (the dot-product in practice). This method therefore assumes the
523
+ availability of logged data of past interactions (LD), in order to
524
+ pre-train item embeddings. In our experiments, we evaluate two
525
+ variants of this baseline: TopK (MF), where item embeddings are
526
+ learned by matrix factorization [23], and TopK (ideal), which uses
527
+ ideal item embeddings, i.e., the embeddings used internally by the
528
+ simulator (see Section 5.1). The latter version clearly has an unfair
529
+ advantage. Also, because ranking items this way assumes that the
530
+ most rewarding items should appear on top, it makes the sequential
531
+ presentation (SP) assumption from [31] that the true click model
532
+ is top-down and fading, i.e., if 𝑐(𝑖) indicates that item 𝑖 has been
533
+ clicked and 𝑙 ⩽ 𝑘 is the position of 𝑖 in slate 𝑎, then 𝑃(𝑐(𝑖)|𝑠,𝑎) =
534
+ 𝑃(𝑐(𝑖)|𝑠,𝑎⩽𝑙) ⩽ 𝑃(𝑐(𝑖)|𝑠, ˜𝑎⩽𝑙−1), where 𝑎⩽𝑙 = (𝑖1, . . . ,𝑖𝑙−1,𝑖) and
535
+ ˜𝑎⩽𝑙−1 = (𝑖1, . . . ,𝑖𝑙−2,𝑖).
536
+ WkNN. In [31], the authors propose a finer-grained and potentially
537
+ more capable variant of TopK referred to as Wolpertinger [12]: the
538
+ agent takes actions in the product-space of item embeddings over
539
+ slate slots, i.e., continuous actions of dimension 𝑘 ×𝑑, where 𝑑 is the
540
+ dimension of item embeddings. Then, for each slot in the slate, 𝑝
541
+ candidate items are selected by Euclidean distance with embeddings
542
+ of items from the collection, and every candidate item’s contribution
543
+ to the Q-value is evaluated in a greedy fashion. Besides LD and DQ,
544
+ WkNN requires two strong assumptions to ensure submodularity
545
+ of the Q-function: sequential presentation SP and execution is best
546
+ (EIB), i.e., recommendations that are risky on the short term are
547
+ never worth it. Formally, this translates as: P(𝑅(𝑠, 𝜋1(𝑠)) = 0) ⩾
548
+ P(𝑅(𝑠, 𝜋2(𝑠)) = 0) ⇒ 𝑉 𝜋1 (𝑠) ⩽ 𝑉 𝜋2 (𝑠) for any policies 𝜋1, 𝜋2.
549
+ Note that it partly defeats the purpose of long-term optimization.
550
+ In Table 1, we summarize the assumptions made by each baseline.
551
+ In comparison to prior work, our proposed framework has a single
552
+ assumption: the availability of logged data with slates and asso-
553
+ ciated clicks (LD), as Table 1 indicates. This assumption is by no
554
+ means restrictive as such logged data is readily available in common
555
+ industrial recommendation settings.
556
+ On top of these baselines, we also include a random policy and
557
+ a short-term oracle as reference points. The short-term oracle
558
+ has access to the true user and item embeddings, enabling it to
559
+ select the items with the highest relevance probability in each slate.
560
+ Therefore, at each turn of interaction, it gives an upper bound on
561
+ the immediate reward but it is unable to cope with boredom and
562
+ influence phenomena.
563
+ 5
564
+ EXPERIMENTAL SETUP
565
+ 5.1
566
+ Simulator
567
+ We design a simulator that allows us to observe the effect of lifting
568
+ the assumptions required by the baselines, and we experiment with
569
+ several simulator variants to ensure generalizability. We summarize
570
+ our main design choices below and refer the reader to our code
571
+ available online3 for a more detailed description.
572
+ Item and user embeddings. Following scenario (S), our simula-
573
+ tor includes 1, 000 items. We consider a cold-start situation where
574
+ users are generated on-the-fly for each new trajectory. Items and
575
+ users are randomly assigned embeddings of size 20, corresponding
576
+ to ten 2-dimensional topics: e = (e1, . . . , e10). Each 2-dimensional
577
+ vector e𝑡 is meant to capture the existence of subtopics within
578
+ topic 𝑡. The embedding of a user or item 𝑥 is generated using the
579
+ following process: (i) sample topic propensities 𝑤𝑡𝑥 ∼ U(0, 1) and
580
+ normalize such that �
581
+ 𝑡 𝑤𝑡𝑥 = 1; (ii) sample topic-specific compo-
582
+ nents 𝜖𝑡𝑥 ∼ N (0, 0.4 · I2) and rescale as e𝑡𝑥 = 𝑤𝑡𝑥 · min(|𝜖𝑡𝑥 |, 1));
583
+ and (iii) normalize the embedding e𝑥 = (e1𝑥, . . . , e10
584
+ 𝑥 ) such that
585
+ ∥e𝑥 ∥ = 1. Each item is associated to a main topic, defined as
586
+ 𝑡(𝑖) = arg max1⩽𝑡⩽10 ∥e𝑡
587
+ 𝑖 ∥.
588
+ To accomodate different types of content and platforms, we
589
+ derive two variants of item embeddings in the simulator: one with
590
+ embeddings obtained as described above, and one with embeddings
591
+ for which we square and re-normalize each component. In Section 6,
592
+ we highlight this difference in peakedness by referring to the former
593
+ as diffuse embeddings and the latter as focused embeddings.
594
+ Relevance computation. The relevance probability of item 𝑖 for
595
+ user 𝑢 is a monotonically increasing function of the dot-product
596
+ between their respective embeddings: rel(𝑖,𝑢) = 𝜎(e𝑖𝑇 e𝑢), where
597
+ 𝜎 is a sigmoid function.
598
+ Boredom and influence effects. User embeddings can be af-
599
+ fected by two mechanisms: boredom and influence. Each item 𝑖
600
+ clicked by user 𝑢 influences the user embedding in the next interac-
601
+ tion turn as: e𝑢 ← 𝜔e𝑢 +(1−𝜔)e𝑖, where we set 𝜔 = 0.9 in practice.
602
+ Additionally, if in the last 10 items clicked by user 𝑢 five have the
603
+ same main topic 𝑡𝑏, then 𝑢 gets bored with this topic, meaning we
604
+ put e𝑡𝑏
605
+ 𝑢 = 0 for 5 turns. These mechanisms have been defined to
606
+ penalize myopic behavior and encourage long-term strategies.
607
+ Click model. Users click on recommended items according to a
608
+ position-based model, i.e., the click probability is the product of
609
+ item-specific attractiveness and rank-specific examination probabil-
610
+ ities: P(𝑐|𝑖,𝑟) = 𝐴𝑖 × 𝐸𝑟. Specifically, we define for an item located
611
+ at rank 𝑟: 𝐸𝑟 = 𝜈𝜀𝑟 + (1 − 𝜈)𝜀𝑘+1−𝑟 with 𝜀 = 0.85. It is a mixture of
612
+ the terms 𝜀𝑟 and 𝜀𝑘+1−𝑟, which respectively capture the top-down
613
+ and bottom-up browsing behaviors. We use two variants of this
614
+ click model in our experiments: TopDown with 𝜈 = 1.0 and Mixed
615
+ with 𝜈 = 0.5. The attractiveness of an item is set to its relevance
616
+ in TopDown and Mixed. In addition, we consider a third variant
617
+ DivPen which also penalizes slates that lack diversity: 𝐴𝑖 is down-
618
+ weighted by a factor of 3 if more than 4 items from the slate have
619
+ the same main topic (as in Mixed, we also set 𝜈 = 0.5 for DivPen).
620
+ In summary, our experiments are performed on 6 simulator variants
621
+ defined by the choice of item embedding peakedness (diffuse item
622
+ 3https://naver/github/gems
623
+
624
+ WSDM ’23, February 27-March 3, 2023, Singapore, Singapore
625
+ Romain Deffayet, Thibaut Thonet, Jean-Michel Renders, & Maarten de Rijke
626
+ Table 2: Average cumulative number of clicks on the test set for our 6 simulated environments. Bold: best method; underlined: 2nd-best
627
+ method; †: statistically significantly better than all other methods. 95% confidence intervals are given in parentheses. Methods grouped under
628
+ “Disclosed env.” have access to privileged information about the environment and can therefore not be fairly compared with “Undisclosed
629
+ env.” methods.
630
+ Focused item embeddings
631
+ Diffuse item embeddings
632
+ Method
633
+ TopDown
634
+ Mixed
635
+ DivPen
636
+ TopDown
637
+ Mixed
638
+ DivPen
639
+ Disclosed
640
+ env.
641
+ 
642
+ 
643
+ Short-term oracle
644
+ SAC+TopK (ideal)
645
+ SlateQ
646
+ 107.7
647
+ 101.6
648
+ 85.4
649
+ 96.7
650
+ 94.6
651
+ 78.8
652
+ 429.0 (±5.9)
653
+ 384.1 (±13.5)
654
+ 386.3 (±15.5)
655
+ 373.9 (±25.0)
656
+ 371.9 (±36.4)
657
+ 341.3 (±55.3)
658
+ 206.5 (±4.1)
659
+ 202.7 (±3.4)
660
+ 119.0 (±3.9)
661
+ 209.5 (±5.4)
662
+ 192.7 (±5.1)
663
+ 117.8 (±5.8)
664
+ Undisclosed
665
+ env.
666
+ 
667
+ 
668
+ Random
669
+ REINFORCE+SoftMax
670
+ SAC+WkNN
671
+ SAC+TopK (MF)
672
+ SAC+GeMS (Ours)
673
+ 33.8 (±0.2)
674
+ 33.9 (±0.2)
675
+ 33.6 (±0.2)
676
+ 33.3 (±0.2)
677
+ 33.2 (±0.2)
678
+ 32.9 (±0.2)
679
+ 248.1 (±19.3)
680
+ 233.5 (±18.5)
681
+ 249.1 (±11.6)
682
+ 249.5 (±15.3)
683
+ 214.7 (±25.0)
684
+ 213.8 (±27.1)
685
+ 98.5 (±8.9)
686
+ 97.7 (±10.8)
687
+ 95.5 (±9.9)
688
+ 107.2 (±8.9)
689
+ 89.8 (±7.4)
690
+ 92.5 (±5.0)
691
+ 254.4 (±17.1)
692
+ 232.7 (±19.4)
693
+ 242.2 (±15.4)
694
+ 249.7 (±10.3)
695
+ 184.1 (±1.3)
696
+ 231.4 (±13.3)
697
+ 305.3†(±21.9)
698
+ 242.6 (±21.5)
699
+ 254.1 (±27.7)
700
+ 300.0†(±42.8)
701
+ 260.6†(±27.2)
702
+ 249.6 (±37.6)
703
+ embeddings or focused item embeddings) and the choice of click
704
+ model (TopDown, Mixed, or DivPen).
705
+ 5.2
706
+ Implementation and evaluation details
707
+ Our implementation aims to be as standard as possible, considering
708
+ the literature on RL, in order to ensure reproducibility. All base-
709
+ lines are paired with SAC [15], except SlateQ which is based on
710
+ Q-Learning [34], and SoftMax, which we pair with REINFORCE [32]
711
+ because it requires a discrete action space and a discretized variant
712
+ of SAC led to lower performance in our experiments. We implement
713
+ all agents using two-layer neural networks as function approxima-
714
+ tors, and use target networks for Q-functions in Slate-Q and SAC.
715
+ For hyperparameters common to baselines and our method, we
716
+ first performed a grid search over likely regions of the space on
717
+ baselines, and re-used the selected values for our method. For all
718
+ methods we use the Adam optimizer with learning rates of 0.001
719
+ for Q-networks and 0.003 for policy networks when applicable, as
720
+ well as a discount factor 𝛾 = 0.8 and a polyak averaging parameter
721
+ 𝜏 = 0.002. For the hyperparameters specific to our method (𝑑, 𝛽
722
+ and 𝜆), we perform a grid search on the TopDown environment
723
+ with focused item embeddings and select the combination with
724
+ the highest validation return. This combination is then re-used
725
+ on all other environments. The searched ranges were defined as
726
+ 𝑑 ∈ {16, 32}, 𝛽 ∈ {0.1, 0.2, 0.5, 1.0, 2.0} and 𝜆 ∈ {0.0, 0.2, 0.5, 1.0}.
727
+ For methods making the (LD) assumption, we generated a dataset
728
+ of 100K user trajectories (with 100 interactions turns each) from an
729
+ 𝜖-greedy oracle policy with 𝜖 = 0.5, i.e., each recommended item is
730
+ selected either uniformly randomly or by an oracle, with equal prob-
731
+ abilities. The VAE in GeMS is trained on this dataset for 10 epochs
732
+ with a batch size of 256 and a learning rate of 0.001. For approaches
733
+ requiring pre-trained item embeddings (TopK and WkNN), we learn
734
+ a simple matrix factorization model on the generated dataset by
735
+ considering as positive samples the pairs composed of the user in
736
+ the trajectory and each clicked item in their recommended slates.
737
+ In all of our experiments, we compare average cumulative re-
738
+ wards over 10 seeded runs, corresponding to ten initializations of
739
+ the agent’s parameters. In the case of GeMS, the seed also controls
740
+ the initialization of the VAE model during pre-training. We train
741
+ agents for 100K steps. Each step corresponds to a user trajectory,
742
+ composed of 100 interaction turns (i.e., 100 slates successively pre-
743
+ sented to the user) for a unique user. Every 1, 000 training steps, we
744
+ also evaluate the agents on 200 validation user trajectories. Finally,
745
+ the agents are tested by selecting the checkpoint with the highest
746
+ validation return and applying it on 500 test user trajectories. Con-
747
+ fidence intervals use Student’s 𝑡-distribution, and statistical tests
748
+ are Welch’s 𝑡-test. Both are based on a 95% confidence level.
749
+ 6
750
+ RESULTS
751
+ In our experiments, we investigate the following research ques-
752
+ tions: (RQ1) How does our slate recommendation framework based
753
+ on GeMS compare to previous methods when the underlying as-
754
+ sumptions of the latter are lifted? (RQ2) Does the proposed GeMS
755
+ framework effectively balance immediate and future rewards to
756
+ avoid boredom? (RQ3) How do the balancing hyperparameters 𝛽
757
+ and 𝜆 in GeMS impact the downstream RL performance?
758
+ 6.1
759
+ Comparison of our method against
760
+ baselines (RQ1)
761
+ In this section, we compare the performance of our method and
762
+ baselines on a wide array of simulated environments, corresponding
763
+ to the six environments described in Section 5.1.
764
+ Overview of the results. Table 2 shows the average test return
765
+ (i.e., cumulated reward or cumulated number of clicks) after train-
766
+ ing on 100K user trajectories. We group methods into two cate-
767
+ gories: Disclosed env., i.e., methods leveraging hidden environment
768
+ information, and Undisclosed env., i.e., methods that consider the
769
+ environment as a black-box and are therefore practically applicable.
770
+ A first observation we can draw, regardless of the specific environ-
771
+ ment used, is that the short-term oracle is easily beaten by most
772
+ approaches. Indeed, the simulator penalizes short-sighted recom-
773
+ mendations that lead to boredom: in these environments, diversity
774
+ is required to reach higher returns. We can also observe the superi-
775
+ ority of SAC+TopK (Ideal). This is not surprising, as this method
776
+ benefits from an unfair advantage – access to true item embed-
777
+ dings – but it suggests that practically applicable methods could be
778
+ augmented with domain knowledge to improve their performance.
779
+ However, despite having access to privileged information, SlateQ’s
780
+ performance is subpar, especially in DivPen environments. Its lower
781
+
782
+ Generative Slate Recommendation with Reinforcement Learning
783
+ WSDM ’23, February 27-March 3, 2023, Singapore, Singapore
784
+ (a) Short-term oracle.
785
+ (b) SAC+GeMS with 𝛾 = 0.
786
+ (c) SAC+GeMS with 𝛾 = 0.8.
787
+ Figure 2: Distribution of the relevance scores of items recommended by (a) a short-term oracle, (b) SAC+GeMS with 𝛾 = 0 and (c) SAC+GeMS
788
+ with 𝛾 = 0.8. Boredom penalizes item scores and is visualized by orange areas. The myopic approaches (left, center) lead to more boredom
789
+ than the long-term approach (right), and therefore to lower average item scores (solid red lines).
790
+ performance might be explained by its approximate optimization
791
+ strategy and restrictive single-click assumption.
792
+ Overall comparison of methods. The proposed SAC+GeMS com-
793
+ pares favorably to baselines across the range of environments we sim-
794
+ ulate. Out of the 6 tested environments, SAC+GeMS obtained the
795
+ best average results on all of them, among which 3 show a statisti-
796
+ cally significant improvement over all other methods. SAC+WkNN
797
+ performs very poorly: we hypothesize that the approach suffers
798
+ from the curse of dimensionality due to the larger action space
799
+ (200 dimensions in our experiments) and the assumption made
800
+ by the approach that candidate items need to be close to target
801
+ item embeddings according to the Euclidean distance. SAC+TopK
802
+ (MF) is more competitive, but the large difference with SAC+TopK
803
+ (ideal) suggests that TopK is very sensitive to the quality of item
804
+ embeddings. Despite its very restrictive assumptions and lack of the-
805
+ oretical guarantees in our setup, REINFORCE+SoftMax was a very
806
+ competitive baseline overall. However, while its best checkpoint
807
+ had high return, its training was unstable and failed to converge in
808
+ our experiments, which suggests it may be unreliable.
809
+ Comparisons across environments. The TopDown environ-
810
+ ment is the easiest for most methods, regardless of the type of
811
+ item embeddings. This is not surprising as all methods besides
812
+ Random either assume a top-down click model, sample items in
813
+ a top-down fashion or rely on data from a top-down logging pol-
814
+ icy. However, it is worth noting that other factors can dominate
815
+ the performance, such as sub-optimality of item embeddings for
816
+ SAC+TopK (MF). Conversely, DivPen was harder for most methods,
817
+ because it requires a strong additional constraint to obtain high
818
+ returns: intra-slate diversity must be high. SAC+GeMS was also af-
819
+ fected by these dynamics, but remained able to beat other methods
820
+ by generating diverse slates. Finally, the use of diffused item embed-
821
+ dings does not appear to cause lower returns for GeMS, compared
822
+ with focused ones, but is associated with larger confidence intervals
823
+ for SAC+GeMS: indeed, pivot items spanning multiple topics are
824
+ more likely to be attractive, at the expense of more fine-grained
825
+ strategies, making the training process uncertain.
826
+ 6.2
827
+ GeMS overcomes boredom to improve its
828
+ return (RQ2)
829
+ In Section 1 we highlighted that long-term optimization with RL
830
+ can penalize myopic behavior such as recommending only highly
831
+ relevant but similar items, which may lead to boredom. In this sec-
832
+ tion, we verify that SAC+GeMS is able to adapt its slate selection
833
+ to cope with boredom. We recall that in our simulated environ-
834
+ ments (detailed in Section 5.1), users get bored of a particular topic
835
+ whenever 5 of their latest 10 clicks were on items from that topic.
836
+ When a topic is saturated, its corresponding dimensions in the user
837
+ embedding are set to 0, which has the effect of diminishing the
838
+ attractiveness of future items presented to the user. It is therefore
839
+ necessary to avoid boredom in order to reach higher returns, even
840
+ if it comes at the cost of lower immediate rewards.
841
+ In this section, we compare three approaches on the TopDown
842
+ environment with focused item embeddings: (i) the short-term ora-
843
+ cle (STO) always maximizing the immediate reward, (ii) SAC+GeMS
844
+ with 𝛾 = 0.8 (i.e., our proposed method) where 𝛾 is the discount
845
+ factor of the RL algorithm, and (iii) SAC+GeMS with 𝛾 = 0 which
846
+ does not explicitly include future rewards in its policy gradient. In
847
+ this environment, SAC+GeMS𝛾=0.8 achieves an average test return
848
+ of 305.3, while SAC+GeMS𝛾=0 reaches 194.3, and STO only ob-
849
+ tains 107.7. These results suggest that long-term optimization is
850
+ indeed required to reach higher returns. It may seem surprising
851
+ that SAC+GeMS𝛾=0 gets better returns than STO, but its training
852
+ objective incentivizes average immediate rewards, which implicitly
853
+ encourages it to avoid low future rewards. However, adopting an
854
+ explicit mechanism to account for its causal effect on the user (i.e.,
855
+ setting 𝛾 = 0.8) allows SAC+GeMS to improve its decision-making.
856
+ In Figure 2, we plot the distribution of item scores (i.e., the dot-
857
+ product between internal user and item embeddings as defined in
858
+ Section 5.1) for the items recommended in slates by each of the
859
+ three methods, with the same seed for all three plots. The dashed
860
+ vertical line shows the score threshold of 0.28 needed to reach a
861
+ relevance probability of 0.5. Therefore, items on the left of this
862
+ line have a lower click probability while items on the right have a
863
+ higher click probability. The color indicates how many topics were
864
+ saturated when the agent recommended that particular item whose
865
+ score is plotted: one can see that when the user is bored of at least
866
+ one topic, items become less attractive as scores are reduced.
867
+ When no topic is saturated (i.e., yellow distribution), STO rec-
868
+
869
+ average score
870
+ ithreshold
871
+ 12
872
+ 1
873
+ Number of
874
+ 10
875
+ saturated topics
876
+ 0
877
+ 1
878
+ 8
879
+ 2
880
+ 1
881
+ PDF
882
+ 6
883
+ 4
884
+ 2
885
+ 00
886
+ 0.1
887
+ 0.2
888
+ 0.3
889
+ 0.4
890
+ 0.5
891
+ Scoreaverage score
892
+ ithreshold
893
+ 12
894
+ Number of
895
+ saturated topics
896
+ 10
897
+ 0
898
+ 1
899
+ 2
900
+ 8
901
+ PDF
902
+ 6
903
+ 4
904
+ 2
905
+ 00
906
+ 0.1
907
+ 0.2
908
+ 0.3
909
+ 0.4
910
+ 0.5
911
+ Scorethreshold .
912
+ average score
913
+ 18
914
+ Number of
915
+ 16
916
+ saturated topics
917
+ 14
918
+ 0
919
+ 1
920
+ 12
921
+ 2
922
+ PDF
923
+ 10
924
+ 8
925
+ 6
926
+ 4
927
+ 2
928
+ 00
929
+ 0.1
930
+ 0.2
931
+ 0.3
932
+ 0.4
933
+ 0.5
934
+ ScoreWSDM ’23, February 27-March 3, 2023, Singapore, Singapore
935
+ Romain Deffayet, Thibaut Thonet, Jean-Michel Renders, & Maarten de Rijke
936
+ (a) Impact of 𝛽 for 𝜆 = 0.5.
937
+ (b) Impact of 𝜆 for 𝛽 = 1.0.
938
+ Figure 3: Average cumulative number of clicks on the validation set obtained by SAC+GeMS with its best validation checkpoint, for different
939
+ values of 𝛽 and 𝜆 (defined in Section 3.3). We also display 95% confidence intervals.
940
+ ommends items with excellent scores (above the threshold and
941
+ up to 0.45): as a consequence, STO gets high immediate rewards.
942
+ However, by doing so it incurs a lot of boredom (large orange
943
+ areas). Overall, it leads to lower expected scores (solid red line)
944
+ and therefore fewer clicks. Conversely, SAC+GeMS𝛾=0.8 sacrifices
945
+ some immediate reward (yellow distribution shifted to the left) but
946
+ causes very little boredom (small orange area). Overall, by trading
947
+ off relevance and diversity, SAC+GeMS𝛾=0.8 yields good immediate
948
+ rewards while limiting boredom. It therefore gets higher average
949
+ scores. SAC+GeMS𝛾=0 exhibits an intermediate behavior due to its
950
+ limited capabilities: it recommends items of varying relevance, yet
951
+ leads to substantial boredom (larger orange area than for 𝛾 = 0.8).
952
+ 6.3
953
+ Balancing hyperparameters 𝛽 and 𝜆 (RQ3)
954
+ In Section 3.3, we suggested that the choice of 𝛽 and 𝜆 leads to trade-
955
+ offs that may impact the downstream performance of SAC+GeMS.
956
+ As a reminder, 𝛽 adjusts the importance of accurate reconstruction
957
+ versus smoothness and structure in the latent space (i.e., controlla-
958
+ bility), while 𝜆 weights the click reconstruction with respect to the
959
+ slate reconstruction. Next, we verify our intuition on the importance
960
+ of these trade-offs by reporting (in Figure 3) the best validation
961
+ return obtained for different values of said hyperparameters, on
962
+ the TopDown environment with focused item embeddings.
963
+ Figure 3a suggests that, indeed, there exists a “sweet spot” in the
964
+ selection of 𝛽. It confirms the intuition described in Section 3.3 and
965
+ the observation of Liu et al. [25]: 𝛽 must be appropriately balanced
966
+ in order to ensure high performance on the downstream RL task.
967
+ Specifically, we found that choosing 𝛽 = 1.0 leads to the highest
968
+ return overall, regardless of whether a latent dimension of 16 or
969
+ 32 is used.
970
+ The impact on the downstream performance of the trade-off
971
+ between slate and click reconstruction (Figure 3b) is less prominent
972
+ but can still be observed. It justifies our choice to add the click
973
+ reconstruction term in the loss (Eq. 1), even though clicks output by
974
+ GeMS’ decoder are not used during RL training. This also confirms
975
+ the importance of introducing and adjusting the hyperparameter 𝜆:
976
+ modeling clicks jointly with slates improves the final performance of
977
+ SAC+GeMS, but properly weighting the click reconstruction objective
978
+ with respect to the slate reconstruction objective is necessary.
979
+ 7
980
+ CONCLUSION
981
+ We have presented GeMS, a slate representation learning method
982
+ based on variational auto-encoders for slate recommendation with
983
+ reinforcement learning. This method has the notable advantage
984
+ of being flexible, allowing full-slate modeling and lightweight as-
985
+ sumptions, in contrast with existing approaches.
986
+ Findings and broader impact. Our experiments across a wide
987
+ array of environments demonstrate that GeMS compares favor-
988
+ ably against existing slate representation methods in practical set-
989
+ tings. Moreover, our empirical analysis highlights that it effectively
990
+ balances immediate and future rewards, and that the trade-offs
991
+ imposed by 𝛽 and 𝜆 significantly impact the RL downstream perfor-
992
+ mance, indicating that properly balancing these hyperparameters is
993
+ critical. Our work suggests that generative models are a promising
994
+ direction for representing rich actions such as slates.
995
+ Limitations. Our simulated experiments demonstrate the effec-
996
+ tiveness of GeMS for representing slates in an RL framework. How-
997
+ ever, it is well-known that online training of RL agents is too expen-
998
+ sive and risky, and that in practice agents must be trained offline, i.e.,
999
+ directly from logged data [8]. We did not address here the specific
1000
+ challenges of offline RL, as we wished to isolate the contribution of
1001
+ the slate representation to downstream performance.
1002
+ Future work. In future work, we will investigate how generative
1003
+ models can be leveraged in the offline setting, in different scenarios,
1004
+ or with even richer actions. We also plan to look into improvements
1005
+ of the architectures used for structured action representations, for
1006
+ example by using domain knowledge and user models.
1007
+ ACKNOWLEDGMENTS
1008
+ This research was (partially) funded by the Hybrid Intelligence
1009
+ Center, a 10-year program funded by the Dutch Ministry of Educa-
1010
+ tion, Culture and Science through the Netherlands Organisation for
1011
+ Scientific Research, https://hybrid-intelligence-centre.nl. All con-
1012
+ tent represents the opinion of the authors, which is not necessarily
1013
+ shared or endorsed by their respective employers and/or sponsors.
1014
+ REFERENCES
1015
+ [1] Ashton Anderson, Lucas Maystre, Ian Anderson, Rishabh Mehrotra, and Mounia
1016
+ Lalmas. 2020. Algorithmic Effects on the Diversity of Consumption on Spotify.
1017
+ In WWW ’20. 2155–2165.
1018
+ [2] Susan Athey, Raj Chetty, Guido W Imbens, and Hyunseung Kang. 2019. The
1019
+
1020
+ 325
1021
+ T↑
1022
+ 300
1023
+ ks
1024
+ click
1025
+ 275
1026
+ number of
1027
+ 250
1028
+ Latent dim
1029
+ Cumulative (
1030
+ 225
1031
+ 16
1032
+ 32
1033
+ 200
1034
+ T
1035
+ 175
1036
+ 150 -
1037
+ 0.0
1038
+ 0.2
1039
+ 0.5
1040
+ 1.0
1041
+ 2.0
1042
+ beta300
1043
+ 1
1044
+ T
1045
+ clicks
1046
+ T
1047
+ 250
1048
+ number of
1049
+ Latent dim
1050
+ Cumulative
1051
+ 200
1052
+ 16
1053
+ 32
1054
+ 150
1055
+ 100 -
1056
+ 0.0
1057
+ 0.2
1058
+ 0.5
1059
+ 1.0
1060
+ lambdaGenerative Slate Recommendation with Reinforcement Learning
1061
+ WSDM ’23, February 27-March 3, 2023, Singapore, Singapore
1062
+ Surrogate Index: Combining Short-Term Proxies to Estimate Long-Term Treatment
1063
+ Effects More Rapidly and Precisely. Technical Report. National Bureau of Economic
1064
+ Research.
1065
+ [3] Xueying Bai, Jian Guan, and Hongning Wang. 2019.
1066
+ A Model-Based Rein-
1067
+ forcement Learning with Adversarial Training for Online Recommendation.
1068
+ In NeurIPS ’19. 10734–10745.
1069
+ [4] Eytan Bakshy, Solomon Messing, and Lada Adamic. 2015. Exposure to Ideo-
1070
+ logically Diverse News and Opinion on Facebook. Science 348, 6239 (2015),
1071
+ 1130–1132.
1072
+ [5] Nicolò Botteghi, Mannes Poel, Beril Sirmaçek, and Christoph Brune. 2021. Low-
1073
+ Dimensional State and Action Representation Learning with MDP Homomor-
1074
+ phism Metrics. arXiv:2107.01677 (2021).
1075
+ [6] Yash Chandak, Georgios Theocharous, James Kostas, Scott Jordan, and Philip
1076
+ Thomas. 2019. Learning Action Representations for Reinforcement Learning. In
1077
+ ICML ’19. 941–950.
1078
+ [7] Praveen Chandar, Brian St. Thomas, Lucas Maystre, Vijay Pappu, Roberto Sanchis-
1079
+ Ojeda, Tiffany Wu, Ben Carterette, Mounia Lalmas, and Tony Jebara. 2022. Using
1080
+ Survival Models to Estimate User Engagement in Online Experiments. In WWW
1081
+ ’22. 3186–3195.
1082
+ [8] Minmin Chen, Alex Beutel, Paul Covington, Sagar Jain, Francois Belletti, and
1083
+ Ed H. Chi. 2019. Top-K Off-Policy Correction for a REINFORCE Recommender
1084
+ System. In WSDM ’19. 456–464.
1085
+ [9] Xinshi Chen, Shuang Li, Hui Li, Shaohua Jiang, Yuan Qi, and Le Song. 2019.
1086
+ Generative Adversarial User Model for Reinforcement Learning Based Recom-
1087
+ mendation System. In ICML ’19. 1052–1061.
1088
+ [10] Kyunghyun Cho, Bart van Merrienboer, Çaglar Gülçehre, Dzmitry Bahdanau,
1089
+ Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning Phrase
1090
+ Representations using RNN Encoder-Decoder for Statistical Machine Translation.
1091
+ In EMNLP ’14. 1724–1734.
1092
+ [11] Van Dang, Michael Bendersky, and W. Bruce Croft. 2013. Two-Stage Learning to
1093
+ Rank for Information Retrieval. In ECIR ’13. 423–434.
1094
+ [12] Gabriel Dulac-Arnold, Richard Evans, Hado van Hasselt, Peter Sunehag, Timothy
1095
+ Lillicrap, Jonathan Hunt, Timothy Mann, Theophane Weber, Thomas Degris,
1096
+ and Ben Coppin. 2015. Deep Reinforcement Learning in Large Discrete Action
1097
+ Spaces. arXiv:1512.07679 (2015).
1098
+ [13] Seth R. Flaxman, Sharad Goel, and Justin M. Rao. 2016. Filter Bubbles, Echo
1099
+ Chambers, and Online News Consumption. Public Opinion Quarterly 80, S1
1100
+ (2016), 298–320.
1101
+ [14] David Ha and Jürgen Schmidhuber. 2018. Recurrent World Models Facilitate
1102
+ Policy Evolution. In NeurIPS ’18. 2455–2467.
1103
+ [15] Tuomas Haarnoja, Aurick Zhou, Pieter Abbeel, and Sergey Levine. 2018. Soft
1104
+ Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement Learning with a
1105
+ Stochastic Actor. In ICML ’18. 1856–1865.
1106
+ [16] Christian Hansen, Rishabh Mehrotra, Casper Hansen, Brian Brost, Lucas Maystre,
1107
+ and Mounia Lalmas. 2021. Shifting Consumption towards Diverse Content on
1108
+ Music Streaming Platforms. In WSDM ’21. 238–246.
1109
+ [17] Henning Hohnhold, Deirdre O’Brien, and Diane Tang. 2015. Focusing on the
1110
+ Long-Term: It’s Good for Users and Business. In KDD ’15. 1849–1858.
1111
+ [18] Eugene Ie, Vihan Jain, Jing Wang, Sanmit Narvekar, Ritesh Agarwal, Rui Wu,
1112
+ Heng-Tze Cheng, Tushar Chandra, and Craig Boutilier. 2019. SlateQ: A Tractable
1113
+ Decomposition for Reinforcement Learning with Recommendation Sets. In IJ-
1114
+ CAI ’19. 2592–2599.
1115
+ [19] Dietmar Jannach, Pearl Pu, Francesco Ricci, and Markus Zanker. 2021. Recom-
1116
+ mender Systems: Past, Present, Future. AI Mag. 42, 3 (2021), 3–6.
1117
+ [20] Ray Jiang, Sven Gowal, Yuqiu Qian, Timothy A. Mann, and Danilo J. Rezende.
1118
+ 2019. Beyond Greedy Ranking: Slate Optimization via List-CVAE. In ICLR ’19.
1119
+ [21] Leslie Pack Kaelbling, Michael L. Littman, and Anthony R. Cassandra. 1998.
1120
+ Planning and Acting in Partially Observable Stochastic Domains. Artificial
1121
+ Intelligence 101, 1 (1998), 99–134.
1122
+ [22] Diederik Kingma and Max Welling. 2014. Auto-Encoding Variational Bayes. In
1123
+ ICLR ’14.
1124
+ [23] Yehuda Koren, Robert M. Bell, and Chris Volinsky. 2009. Matrix Factorization
1125
+ Techniques for Recommender Systems. Computer 42, 8 (2009), 30–37.
1126
+ [24] Solomon Kullback and Richard A. Leibler. 1951. On Information and Sufficiency.
1127
+ The Annals of Mathematical Statistics 22, 1 (1951), 79–86.
1128
+ [25] Shuchang Liu, Fei Sun, Yingqiang Ge, Changhua Pei, and Yongfeng Zhang. 2021.
1129
+ Variation Control and Evaluation for Generative Slate Recommendations. In
1130
+ WWW ’21. 436–448.
1131
+ [26] Farzan Masrour, Tyler Wilson, Heng Yan, Pang-Ning Tan, and Abdol-Hossein
1132
+ Esfahanian. 2020. Bursting the Filter Bubble: Fairness-Aware Network Link
1133
+ Prediction. In AAAI ’20. 841–848.
1134
+ [27] James McInerney, Brian Brost, Praveen Chandar, Rishabh Mehrotra, and Benjamin
1135
+ Carterette. 2020. Counterfactual Evaluation of Slate Recommendations with
1136
+ Sequential Reward Interactions. In KDD ’20. 1779–1788.
1137
+ [28] Eli Pariser. 2011. The Filter Bubble: What the Internet Is Hiding from You. The
1138
+ Penguin Press.
1139
+ [29] Wilbert Samuel Rossi, Jan Willem Polderman, and Paolo Frasca. 2021. The Closed
1140
+ Loop between Opinion Formation and Personalised Recommendations. IEEE
1141
+ Transactions on Control of Network Systems (2021).
1142
+ [30] Adam Stooke, Kimin Lee, Pieter Abbeel, and Michael Laskin. 2021. Decoupling
1143
+ Representation Learning from Reinforcement Learning. In ICML ’21. 9870–9879.
1144
+ [31] Peter Sunehag, Richard Evans, Gabriel Dulac-Arnold, Yori Zwols, Daniel Visentin,
1145
+ and Ben Coppin. 2015.
1146
+ Deep Reinforcement Learning with Attention for
1147
+ Slate Markov Decision Processes with High-Dimensional States and Actions.
1148
+ arXiv:1512.01124 (2015).
1149
+ [32] Richard Sutton and Andrew Barto. 2018. Reinforcement Learning: An Introduction.
1150
+ MIT Press, 326–329.
1151
+ [33] Isaac Waller and Ashton Anderson. 2019. Generalists and Specialists: Using
1152
+ Community Embeddings to Quantify Activity Diversity in Online Platforms. In
1153
+ WWW ’19. 1954–1964.
1154
+ [34] Christopher Watkins and Peter Dayan. 1992. Q-learning. Machine Learning 8
1155
+ (1992), 279–292.
1156
+ [35] Wenxuan Zhou, Sujay Bajracharya, and David Held. 2020. PLAS: Latent Action
1157
+ Space for Offline Reinforcement Learning. In CoRL ’20. 1719–1735.
1158
+ [36] Lixin Zou, Long Xia, Zhuoye Ding, Jiaxing Song, Weidong Liu, and Dawei Yin.
1159
+ 2019. Reinforcement Learning to Optimize Long-Term User Engagement in
1160
+ Recommender Systems. In KDD ’19. 2810–2818.
1161
+
7NFAT4oBgHgl3EQfoB2V/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
7tE3T4oBgHgl3EQfRgnj/content/tmp_files/2301.04423v1.pdf.txt ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Multi-Scanner Canine Cutaneous Squamous Cell
2
+ Carcinoma Histopathology Dataset
3
+ Frauke Wilm1,2, Marco Fragoso3, Christof A. Bertram4, Nikolas Stathonikos5,
4
+ Mathias Öttl1, Jingna Qiu2, Robert Klopfleisch3, Andreas Maier1,
5
+ Katharina Breininger2,†, Marc Aubreville6,†
6
+ 1Pattern Recognition Lab, Friedrich-Alexander-Universität Erlangen-Nünberg, Germany
7
+ 2Department AIBE, Friedrich-Alexander-Universität Erlangen-Nürnberg, Germany
8
+ 3Institute of Veterinary Pathology, Freie Universität Berlin, Germany
9
+ 4Institute of Pathology, University of Veterinary Medicine, Vienna, Austria
10
+ 5Pathology Department, University Medical Centre Utrecht, The Netherlands
11
+ 6Technische Hochschule Ingolstadt, Ingolstadt, Germany
12
+ †shared senior authors
13
+ frauke.wilm@fau.de
14
+ Abstract. In histopathology, scanner-induced domain shifts are known to impede
15
+ the performance of trained neural networks when tested on unseen data. Multi-
16
+ domain pre-training or dedicated domain-generalization techniques can help to
17
+ develop domain-agnostic algorithms. For this, multi-scanner datasets with a high
18
+ variety of slide scanning systems are highly desirable. We present a publicly
19
+ available multi-scanner dataset of canine cutaneous squamous cell carcinoma
20
+ histopathology images, composed of 44 samples digitized with five slide scanners.
21
+ This dataset provides local correspondences between images and thereby isolates
22
+ the scanner-induced domain shift from other inherent, e.g. morphology-induced
23
+ domain shifts. To highlight scanner differences, we present a detailed evaluation
24
+ of color distributions, sharpness, and contrast of the individual scanner subsets.
25
+ Additionally, to quantify the inherent scanner-induced domain shift, we train a
26
+ tumor segmentation network on each scanner subset and evaluate the performance
27
+ both in- and cross-domain. We achieve a class-averaged in-domain intersection
28
+ over union coefficient of up to 0.86 and observe a cross-domain performance
29
+ decrease of up to 0.38, which confirms the inherent domain shift of the presented
30
+ dataset and its negative impact on the performance of deep neural networks.
31
+ 1
32
+ Introduction
33
+ The digitization of histological specimens with dedicated slide scanning systems has
34
+ facilitated machine learning-based image analysis for histopathology. These algorithms
35
+ have since assisted pathologists in a variety of routine tasks, e.g. mitotic figure detection
36
+ [1], for which they even have been able to outperform trained experts in controlled
37
+ settings [1, 2]. Still, their performance is highly dependent on the quality and availabil-
38
+ ity of training data [3] and can deteriorate considerably on a test set where the image
39
+ characteristics differ from the training data [4]. Such differences commonly referred to
40
+ as “domain shift” can originate not only from different staining and tissue preparation
41
+ protocols of different pathology laboratories but also from the digitization of histolog-
42
+ 1
43
+ arXiv:2301.04423v1 [eess.IV] 11 Jan 2023
44
+
45
+ 2
46
+ Wilm et al.
47
+ ical specimens with different scanning systems. Especially from a clinical perspective,
48
+ domain-agnostic models are important for generating accurate and reliable predictions.
49
+ Previous work has shown that domain generalization techniques, e.g. domain-
50
+ adversarial training, can help to develop domain-agnostic models [5]. For this, a training
51
+ dataset composed of a wide range of different domains is highly desirable. So far, the
52
+ most extensive publicly available multi-scanner histopathology dataset is the training
53
+ set of the MICCAI MItosis DOmain Generalization (MIDOG) 2021 challenge [2]. The
54
+ dataset consists of 2 mm2-sized cropped regions of 200 breast cancer cases digitized
55
+ with four scanners. However, the cases were divided between the scanners, and perfor-
56
+ mance differences can therefore not solely be attributed to the slide scanner but also
57
+ to the case selection. The Mitos & Atypia dataset [6] is the only public multi-scanner
58
+ histopathology dataset with local image correspondences, i.e. the same case was digi-
59
+ tized with multiple slide scanners, however, with 16 cases and two scanners, its extent
60
+ is limited and it does not leave room for experiments with hold-out test scanners.
61
+ In this work, we present a canine cutaneous histopathology dataset, where each of
62
+ the 44 samples was digitized with five different slide scanning systems. This multi-
63
+ scanner dataset provides local image correspondences, useful for domain generalization
64
+ experiments. Accompanied by an annotation database of 1,243 polygon annotations
65
+ for seven histologic classes (tumor, epidermis, dermis, subcutis, bone, cartilage, and
66
+ a combined class of inflammation and necrosis), this is the first publicly available
67
+ multi-scanner segmentation dataset. For each scanner subset, we provide a detailed
68
+ evaluation of color distributions, sharpness, and contrast. To quantify the extent of
69
+ the scanner-induced domain shift, we performed a technical validation of the dataset
70
+ by training a baseline tumor segmentation algorithm on each single scanner domain
71
+ and then testing the algorithm across all scanners. For some scanners, we observed
72
+ a considerable performance decrease, which highlights the domain shift inherent in
73
+ the dataset. The whole slide images (WSIs) and annotation databases are publicly
74
+ available on Zenodo (https://doi.org/10.5281/zenodo.7418555), and code for
75
+ implementing the baseline architectures can be obtained from our GitHub repository
76
+ (https://github.com/DeepPathology/MultiScanner_SCC).
77
+ 2
78
+ Materials and methods
79
+ The dataset presented in this work extends the publicly available CATCH dataset [7], a
80
+ collection of 350 WSIs of seven of the most common canine cutaneous tumor subtypes
81
+ (50 WSIs per subtype). For the CATCH dataset, the specimens were digitized with the
82
+ Aperio ScanScope CS2 (Leica, Germany) at a resolution of 0.25 µm/pixel using a 40 ×
83
+ objective lens. Use of these samples was approved by the local governmental authorities
84
+ (State Office of Health and Social Affairs of Berlin, approval ID: StN 011/20). For the
85
+ multi-scanner dataset, we randomly selected one subtype (squamous cell carcinoma)
86
+ and digitized the samples with four additional slide scanners (see Figure 1):
87
+ • NanoZoomer S210 (Hamamatsu, Japan), 0.22 µm/pixel
88
+ • NanoZoomer 2.0-HT (Hamamatsu, Japan), 0.23 µm/pixel
89
+ • Pannoramic 1000 (3DHISTECH, Hungary), 0.25 µm/pixel
90
+ • Aperio GT 450 (Leica, Germany), 0.26 µm/pixel
91
+
92
+ Multi-Scanner Histopathology Dataset
93
+ 3
94
+ (a) CS2
95
+ (b) NZ210
96
+ (c) NZ2.0
97
+ (d) P1000
98
+ (e) GT450
99
+ Fig. 1. Exemplary region of interest of the multi-scanner dataset.
100
+ Due to severe scanning artifacts in at least one of the scans, six specimens were excluded
101
+ from the dataset, resulting in a total of 220 WSIs (44 samples digitized with five
102
+ scanners each). The CATCH annotation database provides annotations for the individual
103
+ tumor subtypes and six additional skin tissue classes (epidermis, dermis, subcutis,
104
+ bone, cartilage, and a combined class of inflammation and necrosis). We transferred all
105
+ annotations to the other scanners using the WSI registration algorithm by Marzahl et
106
+ al. [8] and visually validated them by overlaying the transformed polygon annotations
107
+ onto the scans. We provide public access to the WSIs on Zenodo (https://doi.
108
+ org/10.5281/zenodo.7418555), licensed under a Creative Commons Attribution
109
+ 4.0 International License. However, due to storage restrictions, we have converted them
110
+ to lower-resolution pyramidal TIFFs (4 µm/pixel), which has shown to be adequate for
111
+ training segmentation tasks on the CATCH dataset [7].
112
+ 2.1
113
+ Dataset validation
114
+ For each scanner subset, we evaluated the average RGB color distribution, sharpness,
115
+ and contrast. For sharpness estimation, we used the cumulative probability of blur
116
+ detection (CPBD) metric [9], which is a perceptual-based image sharpness metric. It is
117
+ computed via edge detection, followed by a blur estimation at the detected edges. The
118
+ CPBD metric then corresponds to the cumulative probability of blur detection, i.e. the
119
+ percentage of image edges that fall below a threshold of a perceptually noticeable blur.
120
+ For implementation details, we refer to [9]. For the analysis of RGB distributions and
121
+ contrast, we used Otsu’s adaptive thresholding to separate foreground tissue from white
122
+ background. For each image, we calculated the average intensities of the color channels
123
+ 𝐼𝑅, 𝐼𝐺, and 𝐼𝐵 in the detected tissue regions. Afterward, we converted the regions to
124
+ grayscale and computed the Michelson contrast [10] 𝐶𝑀 as a measure of global contrast.
125
+ 2.2
126
+ Technical validation
127
+ For technical validation of the dataset, we trained a segmentation model on each scan-
128
+ ner domain and tested the algorithm across all scanners. For model development, we
129
+ performed a slide-level split into training (N=30), validation (N=5), and test (N=9)
130
+ cases. We trained a UNet with a ResNet18 encoder pre-trained on ImageNet for the
131
+ segmentation into tumor, non-tumor, and background. For this, we combined all skin
132
+ tissue classes into one non-tumor class and used the automatically detected background
133
+
134
+ 4
135
+ Wilm et al.
136
+ areas to train the background class. We trained the networks on image patches sized
137
+ 512 × 512 pixels, extracted at a resolution of 4 µm/pixel. During each epoch, we sampled
138
+ 50 patches per WSI within the annotated polygons. Due to a high class imbalance, we
139
+ randomly sampled the polygons with a class-weighting of 10 % background and 45 %
140
+ each of tumor and non-tumor regions. For each scanner, we applied z-score normaliza-
141
+ tion with the training set statistics (mean and standard deviation) and performed data
142
+ augmentation using random flipping, affine transformations, and random lightning and
143
+ contrast change. We used the Adam optimizer and trained the networks with a com-
144
+ bination of cross-entropy and Dice loss. We trained the models with a batch size of
145
+ 8 and a cyclic learning rate of 10−4 for 100 epochs, after which we observed model
146
+ convergence. Model selection was guided by the highest intersection over union (mIoU)
147
+ on the validation set.
148
+ 3
149
+ Results
150
+ Figure 2 shows the RGB distribution of the non-background areas for the complete
151
+ dataset of 44 WSIs per scanner. The distributions match the exemplary patches in
152
+ Figure 1, where the patches of the Aperio CS2 and the NanoZoomer 210 appear redder,
153
+ which is reflected in a shift of the red pixel distributions to higher values. When looking
154
+ at the distributions of the Aperio GT450, all curves are densely located at the higher color
155
+ component values, which corresponds to the bright appearance of the patch in Figure 1d.
156
+ Table 1 summarizes the channel-wise color averages, sharpness, and contrast of the slide
157
+ scanning systems. These results further underline the visual impression of the patches
158
+ in Figure 1. When calculating the ratio of the red and the blue color channel 𝐼𝑅/𝐼𝐵, the
159
+ NZ210 results in a ratio of 1.12 and the NZ2.0 in a ratio of 1.04, which matches the much
160
+ redder appearance of the NZ210 patch and the bluer appearance of the NZ2.0 patch.
161
+ Overall, the CS2, NZ210, NZ2.0, and P1000 show comparable sharpness and contrast
162
+ values, while the Aperio 450 exhibits a slightly higher sharpness but a considerably lower
163
+ contrast. Figure 3 visualizes the mIoU when training the segmentation network on one
164
+ scanner, and testing it on all scanners. The results show high in-domain performance
165
+ (diagonal) with mIoU values between 0.82 for the P1000 and GT450, and 0.86 for
166
+ the NZ210. The cross-domain performance highlights the scanner-induced domain shift
167
+ inherent in our dataset. While the networks trained on the CS2 and the NZ210 generalize
168
+ considerably well, with performance decreases of up to 0.08 and 0.12 compared to the in-
169
+ domain mIoU, the highest cross-domain performance drop was observed when training
170
+ 0
171
+ 50
172
+ 100
173
+ 150
174
+ 200
175
+ 250
176
+ 0.000
177
+ 0.005
178
+ 0.010
179
+ 0.015
180
+ 0.020
181
+ 0.025
182
+ 0.030
183
+ 0.035
184
+ 0.040
185
+ Density
186
+ (a) CS2
187
+ 0
188
+ 50
189
+ 100
190
+ 150
191
+ 200
192
+ 250
193
+ 0.000
194
+ 0.005
195
+ 0.010
196
+ 0.015
197
+ 0.020
198
+ 0.025
199
+ 0.030
200
+ 0.035
201
+ 0.040
202
+ Density
203
+ (b) NZ210
204
+ 0
205
+ 50
206
+ 100
207
+ 150
208
+ 200
209
+ 250
210
+ 0.000
211
+ 0.005
212
+ 0.010
213
+ 0.015
214
+ 0.020
215
+ 0.025
216
+ 0.030
217
+ 0.035
218
+ 0.040
219
+ Density
220
+ (c) NZ2.0
221
+ 0
222
+ 50
223
+ 100
224
+ 150
225
+ 200
226
+ 250
227
+ 0.000
228
+ 0.005
229
+ 0.010
230
+ 0.015
231
+ 0.020
232
+ 0.025
233
+ 0.030
234
+ 0.035
235
+ 0.040
236
+ Density
237
+ (d) P1000
238
+ 0
239
+ 50
240
+ 100
241
+ 150
242
+ 200
243
+ 250
244
+ 0.000
245
+ 0.005
246
+ 0.010
247
+ 0.015
248
+ 0.020
249
+ 0.025
250
+ 0.030
251
+ 0.035
252
+ 0.040
253
+ Density
254
+ (e) GT450
255
+ Fig. 2. Kernel density estimation of RGB values per scanner.
256
+
257
+ Multi-Scanner Histopathology Dataset
258
+ 5
259
+ Tab. 1. Channel-wise color distributions 𝐼𝑅, 𝐼𝐺, and 𝐼𝐵, sharpness 𝑆𝐶𝑃𝐵𝐷 calculated as cumu-
260
+ lative probability of blur detection, and Michelson contrast 𝐶𝑀 of the scanners (𝜇 ± 𝜎).
261
+ .
262
+ 𝐼𝑅
263
+ 𝐼𝐺
264
+ 𝐼𝐵
265
+ 𝑆𝐶𝑃𝐵𝐷
266
+ 𝐶𝑀
267
+ CS2
268
+ 201.84 ± 19.46
269
+ 153.18 ± 35.41
270
+ 171.54 ± 30.02
271
+ 0.80 ± 0.02
272
+ 0.74 ± 0.12
273
+ NZ210
274
+ 218.88 ± 17.96
275
+ 172.64 ± 28.04
276
+ 195.26 ± 20.15
277
+ 0.82 ± 0.03
278
+ 0.81 ± 0.14
279
+ NZ2.0
280
+ 192.49 ± 21.63
281
+ 153.46 ± 36.72
282
+ 184.51 ± 23.90
283
+ 0.81 ± 0.02
284
+ 0.81 ± 0.13
285
+ P1000
286
+ 223.41 ± 18.60
287
+ 164.97 ± 41.15
288
+ 211.44 ± 21.64
289
+ 0.80 ± 0.02
290
+ 0.71 ± 0.14
291
+ GT450
292
+ 226.59 ± 12.99
293
+ 208.18 ± 20.88
294
+ 218.80 ± 15.92
295
+ 0.84 ± 0.04
296
+ 0.53 ± 0.15
297
+ on the P1000, with a decrease of up to 0.38. A visual inspection of segmentation results
298
+ showed that the network trained on the P1000 misclassified many background areas
299
+ of the other scanners. A reason might be the integrated tissue detection of the P1000,
300
+ which sets all pixels outside the tissue bounding box to (255, 255, 255) in order to
301
+ reduce scanning times. This artificially removes common artifacts, e.g. dust particles,
302
+ and the network might only look for high pixel values and not learn the morphological
303
+ characteristics of background areas.
304
+ CS2
305
+ NZ210
306
+ NZ2.0
307
+ P1000
308
+ GT450
309
+ test
310
+ CS2
311
+ NZ210
312
+ NZ2.0
313
+ P1000
314
+ GT450
315
+ train
316
+ 0.83
317
+ 0.83
318
+ 0.82
319
+ 0.75
320
+ 0.8
321
+ 0.79
322
+ 0.86
323
+ 0.84
324
+ 0.74
325
+ 0.81
326
+ 0.71
327
+ 0.7
328
+ 0.84
329
+ 0.82
330
+ 0.79
331
+ 0.6
332
+ 0.44
333
+ 0.65
334
+ 0.82
335
+ 0.55
336
+ 0.81
337
+ 0.71
338
+ 0.81
339
+ 0.7
340
+ 0.82
341
+ 0.45
342
+ 0.50
343
+ 0.55
344
+ 0.60
345
+ 0.65
346
+ 0.70
347
+ 0.75
348
+ 0.80
349
+ 0.85
350
+ Fig. 3. Scanner-wise performance of segmentation net-
351
+ works. Matrix entry 𝑚𝑖, 𝑗 is the mean intersection over
352
+ union (mIoU) when training on the scanning system
353
+ in row 𝑖 and testing on the scanning system in column
354
+ 𝑗. Diagonal elements indicate in-domain performance,
355
+ whereas off-diagonal elements represent cross-domain
356
+ performance.
357
+ 4
358
+ Discussion
359
+ Our experiments have demonstrated the negative impact of scanner-induced domain
360
+ shifts on the performance of deep neural networks, indicated by a considerable de-
361
+ crease in mIoU on unseen scanners. This confirms the observations of previous works
362
+ and supports the need for methods that can tackle this domain shift and adequate
363
+ datasets to evaluate their generalization capability. The presented dataset exceeds exist-
364
+ ing multi-scanner datasets in terms of sample size and scanning systems. Furthermore,
365
+ it provides local image correspondences, which isolate the scanner-induced from the
366
+ morphology-induced domain shift and allow the development of algorithms dependent
367
+ on these correspondences, e.g. WSI registration algorithms. We have implicitly shown
368
+ the eligibility of our dataset for this application by successfully transferring our anno-
369
+ tation database from the CS2 scanner to the remaining scanner using WSI registration.
370
+ The detailed evaluation of our scanner subsets has highlighted considerable differences
371
+
372
+ 6
373
+ Wilm et al.
374
+ regarding color distributions and contrasts present in clinically used scanners. Surpris-
375
+ ingly, even though our evaluations resulted in the lowest contrast value for the Aperio
376
+ GT450, this did not impede segmentation performance, shown by an in-domain mIoU
377
+ of 0.82, which is comparable to the in-domain mIoUs of the remaining scanners. In our
378
+ technical validation, we observed a large cross-domain performance decrease, especially
379
+ when training on the P1000 scanner. We assume that this can mainly be attributed to the
380
+ unique pre-processing steps of the scanner vendor, as the P1000 showed similar image
381
+ statistics to the CS2 but their average cross-domain performance differed considerably.
382
+ However, we also observed a cross-domain performance decrease for the remaining
383
+ scanners, which indicates that some of the learned feature representations did not gen-
384
+ eralize well across scanners. Future work could focus on a closer evaluation of which
385
+ scanner characteristics hinder the extraction of domain-agnostic features and should
386
+ therefore be disregarded, e.g. by using specific filters for data pre-processing or using
387
+ adversarial training to punish the extraction of these features.
388
+ Acknowledgement. F.W. gratefully acknowledges the financial support received by
389
+ Merck Healthcare KGaA and the technical support received by the Clinical Assay
390
+ Strategy 1 group at Merck Healthcare KGaA during sample digitization. K.B. gratefully
391
+ acknowledges support by d.hip campus - Bavarian aim in form of a faculty endowment.
392
+ References
393
+ 1. Aubreville M, Bertram CA, Marzahl C, Gurtner C, Dettwiler M, Schmidt A et al. Deep
394
+ learning algorithms out-perform veterinary pathologists in detecting the mitotically most
395
+ active tumor region. Sci Rep 10:16447 (2020), pp. 1–11.
396
+ 2. Aubreville M, Stathonikos N, Bertram CA, Klopleisch R, Hoeve N ter, Ciompi F et al. Mitosis
397
+ domain generalization in histopathology images–The MIDOG challenge. Med Image Anal
398
+ 84:102699 (2023).
399
+ 3. Deng S, Zhang X, Yan W, Chang EI, Fan Y, Lai M et al. Deep learning in digital pathology
400
+ image analysis: a survey. Front Med 14.4 (2020), pp. 470–487.
401
+ 4. Stacke K, Eilertsen G, Unger J, Lundström C. Measuring domain shift for deep learning in
402
+ histopathology. IEEE J Biomed Health Inform 25.2 (2020), pp. 325–336.
403
+ 5. Wilm F, Marzahl C, Breininger K, Aubreville M. Domain adversarial RetinaNet as a refer-
404
+ ence algorithm for the MItosis DOmain Generalization challenge. Biomedical Image Regis-
405
+ tration, Domain Generalisation and Out-of-Distribution Analysis: MICCAI 2021 Challenges.
406
+ Springer. 2022, pp. 5–13.
407
+ 6. Roux L, Racoceanu D, Capron F, Calvo J, Attieh E, Le Naour G et al. Mitos & Atypia. Image
408
+ Pervasive Access Lab (IPAL), Agency Sci., Technol. & Res. Inst. Infocom Res., Singapore,
409
+ Tech. Rep 1 (2014), pp. 1–8.
410
+ 7. Wilm F et al. CAnine CuTaneous Cancer Histology dataset (version 1). The Cancer Imaging
411
+ Archive (2022). https://doi.org/10.7937/TCIA.2M93-FX66.
412
+ 8. Marzahl C, Wilm F, F. DF, Tharun L, Perner S, Bertram CA et al. Robust quad-tree based
413
+ registration on whole slide images. Comput Pathol (2021). PMLR, 2021, pp. 181–190.
414
+ 9. Narvekar ND, Karam LJ. A no-reference image blur metric based on the cumulative proba-
415
+ bility of blur detection (CPBD). IEEE Trans Image Process 20.9 (2011), pp. 2678–2683.
416
+ 10. Michelson AA. Studies in optics. Courier Corporation, 1995.
417
+
7tE3T4oBgHgl3EQfRgnj/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf,len=376
2
+ page_content='Multi-Scanner Canine Cutaneous Squamous Cell Carcinoma Histopathology Dataset Frauke Wilm1,2, Marco Fragoso3, Christof A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
3
+ page_content=' Bertram4,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
4
+ page_content=' Nikolas Stathonikos5,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
5
+ page_content=' Mathias Öttl1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
6
+ page_content=' Jingna Qiu2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
7
+ page_content=' Robert Klopfleisch3,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
8
+ page_content=' Andreas Maier1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
9
+ page_content=' Katharina Breininger2,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
10
+ page_content='†,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
11
+ page_content=' Marc Aubreville6,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
12
+ page_content='† 1Pattern Recognition Lab,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
13
+ page_content=' Friedrich-Alexander-Universität Erlangen-Nünberg,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
14
+ page_content=' Germany 2Department AIBE,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
15
+ page_content=' Friedrich-Alexander-Universität Erlangen-Nürnberg,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
16
+ page_content=' Germany 3Institute of Veterinary Pathology,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
17
+ page_content=' Freie Universität Berlin,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
18
+ page_content=' Germany 4Institute of Pathology,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
19
+ page_content=' University of Veterinary Medicine,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
20
+ page_content=' Vienna,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
21
+ page_content=' Austria 5Pathology Department,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
22
+ page_content=' University Medical Centre Utrecht,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
23
+ page_content=' The Netherlands 6Technische Hochschule Ingolstadt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
24
+ page_content=' Ingolstadt,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
25
+ page_content=' Germany †shared senior authors frauke.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
26
+ page_content='wilm@fau.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
27
+ page_content='de Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
28
+ page_content=' In histopathology, scanner-induced domain shifts are known to impede the performance of trained neural networks when tested on unseen data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
29
+ page_content=' Multi- domain pre-training or dedicated domain-generalization techniques can help to develop domain-agnostic algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
30
+ page_content=' For this, multi-scanner datasets with a high variety of slide scanning systems are highly desirable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
31
+ page_content=' We present a publicly available multi-scanner dataset of canine cutaneous squamous cell carcinoma histopathology images, composed of 44 samples digitized with five slide scanners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
32
+ page_content=' This dataset provides local correspondences between images and thereby isolates the scanner-induced domain shift from other inherent, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
33
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
34
+ page_content=' morphology-induced domain shifts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
35
+ page_content=' To highlight scanner differences, we present a detailed evaluation of color distributions, sharpness, and contrast of the individual scanner subsets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
36
+ page_content=' Additionally, to quantify the inherent scanner-induced domain shift, we train a tumor segmentation network on each scanner subset and evaluate the performance both in- and cross-domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
37
+ page_content=' We achieve a class-averaged in-domain intersection over union coefficient of up to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
38
+ page_content='86 and observe a cross-domain performance decrease of up to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
39
+ page_content='38, which confirms the inherent domain shift of the presented dataset and its negative impact on the performance of deep neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
40
+ page_content=' 1 Introduction The digitization of histological specimens with dedicated slide scanning systems has facilitated machine learning-based image analysis for histopathology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
41
+ page_content=' These algorithms have since assisted pathologists in a variety of routine tasks, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
42
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
43
+ page_content=' mitotic figure detection [1], for which they even have been able to outperform trained experts in controlled settings [1, 2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
44
+ page_content=' Still, their performance is highly dependent on the quality and availabil- ity of training data [3] and can deteriorate considerably on a test set where the image characteristics differ from the training data [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
45
+ page_content=' Such differences commonly referred to as “domain shift” can originate not only from different staining and tissue preparation protocols of different pathology laboratories but also from the digitization of histolog- 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
46
+ page_content='04423v1 [eess.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
47
+ page_content='IV] 11 Jan 2023 2 Wilm et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
48
+ page_content=' ical specimens with different scanning systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
49
+ page_content=' Especially from a clinical perspective, domain-agnostic models are important for generating accurate and reliable predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
50
+ page_content=' Previous work has shown that domain generalization techniques, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
51
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
52
+ page_content=' domain- adversarial training, can help to develop domain-agnostic models [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
53
+ page_content=' For this, a training dataset composed of a wide range of different domains is highly desirable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
54
+ page_content=' So far, the most extensive publicly available multi-scanner histopathology dataset is the training set of the MICCAI MItosis DOmain Generalization (MIDOG) 2021 challenge [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
55
+ page_content=' The dataset consists of 2 mm2-sized cropped regions of 200 breast cancer cases digitized with four scanners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
56
+ page_content=' However, the cases were divided between the scanners, and perfor- mance differences can therefore not solely be attributed to the slide scanner but also to the case selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
57
+ page_content=' The Mitos & Atypia dataset [6] is the only public multi-scanner histopathology dataset with local image correspondences, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
58
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
59
+ page_content=' the same case was digi- tized with multiple slide scanners, however, with 16 cases and two scanners, its extent is limited and it does not leave room for experiments with hold-out test scanners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
60
+ page_content=' In this work, we present a canine cutaneous histopathology dataset, where each of the 44 samples was digitized with five different slide scanning systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
61
+ page_content=' This multi- scanner dataset provides local image correspondences, useful for domain generalization experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
62
+ page_content=' Accompanied by an annotation database of 1,243 polygon annotations for seven histologic classes (tumor, epidermis, dermis, subcutis, bone, cartilage, and a combined class of inflammation and necrosis), this is the first publicly available multi-scanner segmentation dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
63
+ page_content=' For each scanner subset, we provide a detailed evaluation of color distributions, sharpness, and contrast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
64
+ page_content=' To quantify the extent of the scanner-induced domain shift, we performed a technical validation of the dataset by training a baseline tumor segmentation algorithm on each single scanner domain and then testing the algorithm across all scanners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
65
+ page_content=' For some scanners, we observed a considerable performance decrease, which highlights the domain shift inherent in the dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
66
+ page_content=' The whole slide images (WSIs) and annotation databases are publicly available on Zenodo (https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
67
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
68
+ page_content='5281/zenodo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
69
+ page_content='7418555), and code for implementing the baseline architectures can be obtained from our GitHub repository (https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
70
+ page_content='com/DeepPathology/MultiScanner_SCC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
71
+ page_content=' 2 Materials and methods The dataset presented in this work extends the publicly available CATCH dataset [7], a collection of 350 WSIs of seven of the most common canine cutaneous tumor subtypes (50 WSIs per subtype).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
72
+ page_content=' For the CATCH dataset, the specimens were digitized with the Aperio ScanScope CS2 (Leica, Germany) at a resolution of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
73
+ page_content='25 µm/pixel using a 40 × objective lens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
74
+ page_content=' Use of these samples was approved by the local governmental authorities (State Office of Health and Social Affairs of Berlin, approval ID: StN 011/20).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
75
+ page_content=' For the multi-scanner dataset, we randomly selected one subtype (squamous cell carcinoma) and digitized the samples with four additional slide scanners (see Figure 1): NanoZoomer S210 (Hamamatsu, Japan), 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
76
+ page_content='22 µm/pixel NanoZoomer 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
77
+ page_content='0-HT (Hamamatsu, Japan), 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
78
+ page_content='23 µm/pixel Pannoramic 1000 (3DHISTECH, Hungary), 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
79
+ page_content='25 µm/pixel Aperio GT 450 (Leica, Germany), 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
80
+ page_content='26 µm/pixel Multi-Scanner Histopathology Dataset 3 (a) CS2 (b) NZ210 (c) NZ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
81
+ page_content='0 (d) P1000 (e) GT450 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
82
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
83
+ page_content=' Exemplary region of interest of the multi-scanner dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
84
+ page_content=' Due to severe scanning artifacts in at least one of the scans, six specimens were excluded from the dataset, resulting in a total of 220 WSIs (44 samples digitized with five scanners each).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
85
+ page_content=' The CATCH annotation database provides annotations for the individual tumor subtypes and six additional skin tissue classes (epidermis, dermis, subcutis, bone, cartilage, and a combined class of inflammation and necrosis).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
86
+ page_content=' We transferred all annotations to the other scanners using the WSI registration algorithm by Marzahl et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
87
+ page_content=' [8] and visually validated them by overlaying the transformed polygon annotations onto the scans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
88
+ page_content=' We provide public access to the WSIs on Zenodo (https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
89
+ page_content=' org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
90
+ page_content='5281/zenodo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
91
+ page_content='7418555), licensed under a Creative Commons Attribution 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
92
+ page_content='0 International License.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
93
+ page_content=' However, due to storage restrictions, we have converted them to lower-resolution pyramidal TIFFs (4 µm/pixel), which has shown to be adequate for training segmentation tasks on the CATCH dataset [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
94
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
95
+ page_content='1 Dataset validation For each scanner subset, we evaluated the average RGB color distribution, sharpness, and contrast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
96
+ page_content=' For sharpness estimation, we used the cumulative probability of blur detection (CPBD) metric [9], which is a perceptual-based image sharpness metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
97
+ page_content=' It is computed via edge detection, followed by a blur estimation at the detected edges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
98
+ page_content=' The CPBD metric then corresponds to the cumulative probability of blur detection, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
99
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
100
+ page_content=' the percentage of image edges that fall below a threshold of a perceptually noticeable blur.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
101
+ page_content=' For implementation details, we refer to [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
102
+ page_content=' For the analysis of RGB distributions and contrast, we used Otsu’s adaptive thresholding to separate foreground tissue from white background.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
103
+ page_content=' For each image, we calculated the average intensities of the color channels 𝐼𝑅, 𝐼𝐺, and 𝐼𝐵 in the detected tissue regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
104
+ page_content=' Afterward, we converted the regions to grayscale and computed the Michelson contrast [10] 𝐶𝑀 as a measure of global contrast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
105
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
106
+ page_content='2 Technical validation For technical validation of the dataset, we trained a segmentation model on each scan- ner domain and tested the algorithm across all scanners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
107
+ page_content=' For model development, we performed a slide-level split into training (N=30), validation (N=5), and test (N=9) cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
108
+ page_content=' We trained a UNet with a ResNet18 encoder pre-trained on ImageNet for the segmentation into tumor, non-tumor, and background.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
109
+ page_content=' For this, we combined all skin tissue classes into one non-tumor class and used the automatically detected background 4 Wilm et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
110
+ page_content=' areas to train the background class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
111
+ page_content=' We trained the networks on image patches sized 512 × 512 pixels, extracted at a resolution of 4 µm/pixel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
112
+ page_content=' During each epoch, we sampled 50 patches per WSI within the annotated polygons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
113
+ page_content=' Due to a high class imbalance, we randomly sampled the polygons with a class-weighting of 10 % background and 45 % each of tumor and non-tumor regions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
114
+ page_content=' For each scanner, we applied z-score normaliza- tion with the training set statistics (mean and standard deviation) and performed data augmentation using random flipping, affine transformations, and random lightning and contrast change.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
115
+ page_content=' We used the Adam optimizer and trained the networks with a com- bination of cross-entropy and Dice loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
116
+ page_content=' We trained the models with a batch size of 8 and a cyclic learning rate of 10−4 for 100 epochs, after which we observed model convergence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
117
+ page_content=' Model selection was guided by the highest intersection over union (mIoU) on the validation set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
118
+ page_content=' 3 Results Figure 2 shows the RGB distribution of the non-background areas for the complete dataset of 44 WSIs per scanner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
119
+ page_content=' The distributions match the exemplary patches in Figure 1, where the patches of the Aperio CS2 and the NanoZoomer 210 appear redder, which is reflected in a shift of the red pixel distributions to higher values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
120
+ page_content=' When looking at the distributions of the Aperio GT450, all curves are densely located at the higher color component values, which corresponds to the bright appearance of the patch in Figure 1d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
121
+ page_content=' Table 1 summarizes the channel-wise color averages, sharpness, and contrast of the slide scanning systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
122
+ page_content=' These results further underline the visual impression of the patches in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
123
+ page_content=' When calculating the ratio of the red and the blue color channel 𝐼𝑅/𝐼𝐵, the NZ210 results in a ratio of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
124
+ page_content='12 and the NZ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
125
+ page_content='0 in a ratio of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
126
+ page_content='04, which matches the much redder appearance of the NZ210 patch and the bluer appearance of the NZ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
127
+ page_content='0 patch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
128
+ page_content=' Overall, the CS2, NZ210, NZ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
129
+ page_content='0, and P1000 show comparable sharpness and contrast values, while the Aperio 450 exhibits a slightly higher sharpness but a considerably lower contrast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
130
+ page_content=' Figure 3 visualizes the mIoU when training the segmentation network on one scanner, and testing it on all scanners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
131
+ page_content=' The results show high in-domain performance (diagonal) with mIoU values between 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
132
+ page_content='82 for the P1000 and GT450, and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
133
+ page_content='86 for the NZ210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
134
+ page_content=' The cross-domain performance highlights the scanner-induced domain shift inherent in our dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
135
+ page_content=' While the networks trained on the CS2 and the NZ210 generalize considerably well, with performance decreases of up to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
136
+ page_content='08 and 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
137
+ page_content='12 compared to the in- domain mIoU, the highest cross-domain performance drop was observed when training 0 50 100 150 200 250 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
138
+ page_content='000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
139
+ page_content='005 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
140
+ page_content='010 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
141
+ page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
142
+ page_content='020 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
143
+ page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
144
+ page_content='030 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
145
+ page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
146
+ page_content='040 Density (a) CS2 0 50 100 150 200 250 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
147
+ page_content='000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
148
+ page_content='005 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
149
+ page_content='010 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
150
+ page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
151
+ page_content='020 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
152
+ page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
153
+ page_content='030 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
154
+ page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
155
+ page_content='040 Density (b) NZ210 0 50 100 150 200 250 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
156
+ page_content='000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
157
+ page_content='005 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
158
+ page_content='010 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
159
+ page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
160
+ page_content='020 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
161
+ page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
162
+ page_content='030 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
163
+ page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
164
+ page_content='040 Density (c) NZ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
165
+ page_content='0 0 50 100 150 200 250 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
166
+ page_content='000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
167
+ page_content='005 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
168
+ page_content='010 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
169
+ page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
170
+ page_content='020 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
171
+ page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
172
+ page_content='030 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
173
+ page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
174
+ page_content='040 Density (d) P1000 0 50 100 150 200 250 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
175
+ page_content='000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
176
+ page_content='005 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
177
+ page_content='010 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
178
+ page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
179
+ page_content='020 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
180
+ page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
181
+ page_content='030 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
182
+ page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
183
+ page_content='040 Density (e) GT450 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
184
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
185
+ page_content=' Kernel density estimation of RGB values per scanner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
186
+ page_content=' Multi-Scanner Histopathology Dataset 5 Tab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
187
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
188
+ page_content=' Channel-wise color distributions 𝐼𝑅, 𝐼𝐺, and 𝐼𝐵, sharpness 𝑆𝐶𝑃𝐵𝐷 calculated as cumu- lative probability of blur detection, and Michelson contrast 𝐶𝑀 of the scanners (𝜇 ± 𝜎).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
189
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
190
+ page_content=' 𝐼𝑅 𝐼𝐺 𝐼𝐵 𝑆𝐶𝑃𝐵𝐷 𝐶𝑀 CS2 201.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
191
+ page_content='84 ± 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
192
+ page_content='46 153.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
193
+ page_content='18 ± 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
194
+ page_content='41 171.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
195
+ page_content='54 ± 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
196
+ page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
197
+ page_content='80 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
198
+ page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
199
+ page_content='74 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
200
+ page_content='12 NZ210 218.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
201
+ page_content='88 ± 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
202
+ page_content='96 172.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
203
+ page_content='64 ± 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
204
+ page_content='04 195.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
205
+ page_content='26 ± 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
206
+ page_content='15 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
207
+ page_content='82 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
208
+ page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
209
+ page_content='81 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
210
+ page_content='14 NZ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
211
+ page_content='0 192.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
212
+ page_content='49 ± 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
213
+ page_content='63 153.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
214
+ page_content='46 ± 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
215
+ page_content='72 184.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
216
+ page_content='51 ± 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
217
+ page_content='90 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
218
+ page_content='81 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
219
+ page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
220
+ page_content='81 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
221
+ page_content='13 P1000 223.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
222
+ page_content='41 ± 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
223
+ page_content='60 164.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
224
+ page_content='97 ± 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
225
+ page_content='15 211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
226
+ page_content='44 ± 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
227
+ page_content='64 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
228
+ page_content='80 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
229
+ page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
230
+ page_content='71 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
231
+ page_content='14 GT450 226.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
232
+ page_content='59 ± 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
233
+ page_content='99 208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
234
+ page_content='18 ± 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
235
+ page_content='88 218.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
236
+ page_content='80 ± 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
237
+ page_content='92 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
238
+ page_content='84 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
239
+ page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
240
+ page_content='53 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
241
+ page_content='15 on the P1000, with a decrease of up to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
242
+ page_content='38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
243
+ page_content=' A visual inspection of segmentation results showed that the network trained on the P1000 misclassified many background areas of the other scanners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
244
+ page_content=' A reason might be the integrated tissue detection of the P1000, which sets all pixels outside the tissue bounding box to (255, 255, 255) in order to reduce scanning times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
245
+ page_content=' This artificially removes common artifacts, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
246
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
247
+ page_content=' dust particles, and the network might only look for high pixel values and not learn the morphological characteristics of background areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
248
+ page_content=' CS2 NZ210 NZ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
249
+ page_content='0 P1000 GT450 test CS2 NZ210 NZ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
250
+ page_content='0 P1000 GT450 train 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
251
+ page_content='83 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
252
+ page_content='83 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
253
+ page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
254
+ page_content='75 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
255
+ page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
256
+ page_content='79 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
257
+ page_content='86 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
258
+ page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
259
+ page_content='74 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
260
+ page_content='81 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
261
+ page_content='71 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
262
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
263
+ page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
264
+ page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
265
+ page_content='79 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
266
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
267
+ page_content='44 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
268
+ page_content='65 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
269
+ page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
270
+ page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
271
+ page_content='81 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
272
+ page_content='71 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
273
+ page_content='81 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
274
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
275
+ page_content='82 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
276
+ page_content='45 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
277
+ page_content='50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
278
+ page_content='55 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
279
+ page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
280
+ page_content='65 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
281
+ page_content='70 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
282
+ page_content='75 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
283
+ page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
284
+ page_content='85 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
285
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
286
+ page_content=' Scanner-wise performance of segmentation net- works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
287
+ page_content=' Matrix entry 𝑚𝑖, 𝑗 is the mean intersection over union (mIoU) when training on the scanning system in row 𝑖 and testing on the scanning system in column 𝑗.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
288
+ page_content=' Diagonal elements indicate in-domain performance, whereas off-diagonal elements represent cross-domain performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
289
+ page_content=' 4 Discussion Our experiments have demonstrated the negative impact of scanner-induced domain shifts on the performance of deep neural networks, indicated by a considerable de- crease in mIoU on unseen scanners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
290
+ page_content=' This confirms the observations of previous works and supports the need for methods that can tackle this domain shift and adequate datasets to evaluate their generalization capability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
291
+ page_content=' The presented dataset exceeds exist- ing multi-scanner datasets in terms of sample size and scanning systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
292
+ page_content=' Furthermore, it provides local image correspondences, which isolate the scanner-induced from the morphology-induced domain shift and allow the development of algorithms dependent on these correspondences, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
293
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
294
+ page_content=' WSI registration algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
295
+ page_content=' We have implicitly shown the eligibility of our dataset for this application by successfully transferring our anno- tation database from the CS2 scanner to the remaining scanner using WSI registration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
296
+ page_content=' The detailed evaluation of our scanner subsets has highlighted considerable differences 6 Wilm et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
297
+ page_content=' regarding color distributions and contrasts present in clinically used scanners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
298
+ page_content=' Surpris- ingly, even though our evaluations resulted in the lowest contrast value for the Aperio GT450, this did not impede segmentation performance, shown by an in-domain mIoU of 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
299
+ page_content='82, which is comparable to the in-domain mIoUs of the remaining scanners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
300
+ page_content=' In our technical validation, we observed a large cross-domain performance decrease, especially when training on the P1000 scanner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
301
+ page_content=' We assume that this can mainly be attributed to the unique pre-processing steps of the scanner vendor, as the P1000 showed similar image statistics to the CS2 but their average cross-domain performance differed considerably.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
302
+ page_content=' However, we also observed a cross-domain performance decrease for the remaining scanners, which indicates that some of the learned feature representations did not gen- eralize well across scanners.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
303
+ page_content=' Future work could focus on a closer evaluation of which scanner characteristics hinder the extraction of domain-agnostic features and should therefore be disregarded, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
304
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
305
+ page_content=' by using specific filters for data pre-processing or using adversarial training to punish the extraction of these features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
306
+ page_content=' Acknowledgement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
307
+ page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
308
+ page_content='W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
309
+ page_content=' gratefully acknowledges the financial support received by Merck Healthcare KGaA and the technical support received by the Clinical Assay Strategy 1 group at Merck Healthcare KGaA during sample digitization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
310
+ page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
311
+ page_content='B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
312
+ page_content=' gratefully acknowledges support by d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
313
+ page_content='hip campus - Bavarian aim in form of a faculty endowment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
314
+ page_content=' References 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
315
+ page_content=' Aubreville M, Bertram CA, Marzahl C, Gurtner C, Dettwiler M, Schmidt A et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
316
+ page_content=' Deep learning algorithms out-perform veterinary pathologists in detecting the mitotically most active tumor region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
317
+ page_content=' Sci Rep 10:16447 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
318
+ page_content=' 1–11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
319
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
320
+ page_content=' Aubreville M, Stathonikos N, Bertram CA, Klopleisch R, Hoeve N ter, Ciompi F et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
321
+ page_content=' Mitosis domain generalization in histopathology images–The MIDOG challenge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
322
+ page_content=' Med Image Anal 84:102699 (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
323
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
324
+ page_content=' Deng S, Zhang X, Yan W, Chang EI, Fan Y, Lai M et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
325
+ page_content=' Deep learning in digital pathology image analysis: a survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
326
+ page_content=' Front Med 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
327
+ page_content='4 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
328
+ page_content=' 470–487.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
329
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
330
+ page_content=' Stacke K, Eilertsen G, Unger J, Lundström C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
331
+ page_content=' Measuring domain shift for deep learning in histopathology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
332
+ page_content=' IEEE J Biomed Health Inform 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
333
+ page_content='2 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
334
+ page_content=' 325–336.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
335
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
336
+ page_content=' Wilm F, Marzahl C, Breininger K, Aubreville M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
337
+ page_content=' Domain adversarial RetinaNet as a refer- ence algorithm for the MItosis DOmain Generalization challenge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
338
+ page_content=' Biomedical Image Regis- tration, Domain Generalisation and Out-of-Distribution Analysis: MICCAI 2021 Challenges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
339
+ page_content=' Springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
340
+ page_content=' 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
341
+ page_content=' 5–13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
342
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
343
+ page_content=' Roux L, Racoceanu D, Capron F, Calvo J, Attieh E, Le Naour G et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
344
+ page_content=' Mitos & Atypia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
345
+ page_content=' Image Pervasive Access Lab (IPAL), Agency Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
346
+ page_content=', Technol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
347
+ page_content=' & Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
348
+ page_content=' Inst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
349
+ page_content=' Infocom Res.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
350
+ page_content=', Singapore, Tech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
351
+ page_content=' Rep 1 (2014), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
352
+ page_content=' 1–8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
353
+ page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
354
+ page_content=' Wilm F et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
355
+ page_content=' CAnine CuTaneous Cancer Histology dataset (version 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
356
+ page_content=' The Cancer Imaging Archive (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
357
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
358
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
359
+ page_content='7937/TCIA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
360
+ page_content='2M93-FX66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
361
+ page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
362
+ page_content=' Marzahl C, Wilm F, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
363
+ page_content=' DF, Tharun L, Perner S, Bertram CA et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
364
+ page_content=' Robust quad-tree based registration on whole slide images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
365
+ page_content=' Comput Pathol (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
366
+ page_content=' PMLR, 2021, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
367
+ page_content=' 181–190.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
368
+ page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
369
+ page_content=' Narvekar ND, Karam LJ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
370
+ page_content=' A no-reference image blur metric based on the cumulative proba- bility of blur detection (CPBD).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
371
+ page_content=' IEEE Trans Image Process 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
372
+ page_content='9 (2011), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
373
+ page_content=' 2678–2683.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
374
+ page_content=' 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
375
+ page_content=' Michelson AA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
376
+ page_content=' Studies in optics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
377
+ page_content=' Courier Corporation, 1995.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/7tE3T4oBgHgl3EQfRgnj/content/2301.04423v1.pdf'}
7tE4T4oBgHgl3EQfcwyw/content/2301.05086v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8538092d60635e1a8f7b3e486ab3bbbed3b418e4f21092866ac011bdf3157036
3
+ size 779900
7tE4T4oBgHgl3EQfcwyw/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a7ce0ccaab7932e08fdd2f2339251473d64641ade50885b2ae0f6c38aa1ae15
3
+ size 495926
89E1T4oBgHgl3EQfCALf/content/2301.02860v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59fa7dc33031521347545f9f3f732defeb315cae4c35dfda05bd9a3ef3abc616
3
+ size 628157
89E1T4oBgHgl3EQfCALf/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6661419a40d546814701ce5d3f95fed63ff744a900b329b205ec35f362c7f03b
3
+ size 2818093
89E1T4oBgHgl3EQfCALf/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9d9dd77a89159e11e30c6bd3d2c47ff4f4fdcef8ff49aa27aad8a8734b1f62e
3
+ size 109704
8NE2T4oBgHgl3EQf8Ag-/content/tmp_files/2301.04214v1.pdf.txt ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CageCoach: Sharing-Oriented Redaction-Capable
2
+ Distributed Cryptographic File System
3
+ Jason Carpenter
4
+ CARPE415@umn.edu
5
+ University of Minnesota
6
+ Minneapolis, MN
7
+ Zhi-Li Zhang
8
+ zhzhang@cs.umn.edu
9
+ University of Minnesota
10
+ Minneapolis, MN
11
+ ABSTRACT
12
+ The modern data economy is built on sharing data. However,
13
+ sharing data can be an expensive and risky endeavour. Exist-
14
+ ing sharing systems like Distributed File Systems provide full
15
+ read, write, and execute Role-based Access Control (RBAC)
16
+ for sharing data, but can be expensive and difficult to scale.
17
+ Likewise such systems operate on a binary access model for
18
+ their data, either a user can read all the data or read none of
19
+ the data. This approach is not necessary for a more read-only
20
+ oriented data landscape, and one where data contains many
21
+ dimensions that represent a risk if overshared. In order to
22
+ encourage users to share data and smooth out the process
23
+ of accessing such data a new approach is needed. This new
24
+ approach must simplify the RBAC of older DFS approaches
25
+ to something more read-only and something that integrates
26
+ redaction for user protections.
27
+ To accomplish this we present CageCoach, a simple sharing-
28
+ oriented Distributed Cryptographic File System (DCFS). Cage-
29
+ Coach leverages the simplicity and speed of basic HTTP,
30
+ linked data concepts, and automatic redaction systems to
31
+ facilitate safe and easy sharing of user data. The implemen-
32
+ tation of CageCoach is available at https://github.umn.edu/
33
+ CARPE415/CageCoach.
34
+ 1
35
+ INTRODUCTION
36
+ User-generated data drives the modern world. Everything
37
+ from Uber driver rides and Google search queries to video
38
+ game experiences and Amazon purchase patterns feed user
39
+ data back into these systems to provide insights for improve-
40
+ ment. Additionally, users sharing their data as part of crowd
41
+ sourcing solutions has proven key to reverse engineering gig
42
+ working applications such as Uber[5, 16, 18], Lyft[16, 18],
43
+ and Shipt[3, 18]. Further these efforts help solve civic and
44
+ national needs such as with Atlanta’s Data Dashboard[13],
45
+ Minneapolis’s Opendata program[7], or the United State’s
46
+ Citizen Science initiative[8].
47
+ However, users providing their data to these initiatives
48
+ often comes with a level of risk and a loss of control over the
49
+ data they provide. Once a user has handed over information
50
+ the safety considerations, redaction approaches, and man-
51
+ agement decisions are out of their control. Further, should
52
+ any shared user data become dangerous to a user, the user
53
+ has no more sway to alleviate this risk other than ask the
54
+ current data holder to act, a practice often fruitless.
55
+ In order to further encourage users to share their data, a
56
+ new sharing oriented data hosting system is required. Such a
57
+ platform must be simple to implement, easy to request data
58
+ from, but still provide some assurances of privacy and safety
59
+ for users involved. Crucially it should remain in the user’s
60
+ control, and not be subject to control by others even those
61
+ hosting data such as on public hosting systems. The privacy
62
+ capability must be granular not just in who can access data
63
+ but what specific data is accessible. For example, for some
64
+ users, sharing their full name to everyone who asks is un-
65
+ reasonable. Thus they should be able to share with some a
66
+ partial redaction of their name. Existing works such as Dis-
67
+ tributed File Systems (DFS) are promising, but require exten-
68
+ sive implementation, Role-based Access Control (RBAC) en-
69
+ forcement, and do not implement granular redaction. Other
70
+ platforms like Google Drive, Dropbox, and Kaggle are great
71
+ for sharing bulk data but also do not provide granular redac-
72
+ tion and require trusting of the platform holders to not share
73
+ otherwise redacted user data.
74
+ In this work, we introduce CageCoach a sharing oriented
75
+ distributed cryptographic file system. CageCoach’s notable
76
+ features are:
77
+ • Simple Trustless DCFS built over HTTP GET/POST
78
+ • Customizable RBAC and Datatype Granular Redac-
79
+ tion Pipeline
80
+ • Easier sharing with Decentralized data access and
81
+ centralized user control
82
+ CageCoach streamlines the older RBAC based models of
83
+ DFSs and decentralizes the data hosting approaches of plat-
84
+ forms making for an overall simpler means of sharing data
85
+ with others while retaining granular privacy control for users.
86
+ This system is leverages simple HTTP GET/POST operations
87
+ to interact with symmetrically encrypted files hosted on any
88
+ HTTP platform to achieve decentralized hosting. These files
89
+ point back to their owners, represented by a controlling
90
+ server, that can facilitate redacted data access for a data re-
91
+ quester providing user control of data access. Finally, the
92
+ 1
93
+ arXiv:2301.04214v1 [cs.CR] 10 Jan 2023
94
+
95
+ ,
96
+ user’s controlling server applies user defined redaction oper-
97
+ ations from a suite of modules CageCoach provides to reduce
98
+ sensitive data leakage.
99
+ CageCoach’s code can be found at https://github.umn.edu/
100
+ CARPE415/CageCoach.
101
+ 2
102
+ RELATED WORK
103
+ Distributed File Systems (DFS) and cryptographic file sys-
104
+ tems (DCFS) have been around for a long time with some
105
+ works as early as 1993[1] and as recent as 2020[2]. These are
106
+ mature fields with well-developed and commercial products
107
+ we see every day, such as Dropbox, GoogleDrive, Hadoop,
108
+ Ceph, and others[22]. Despite this, the changing data land-
109
+ scape and changing usage behaviors with data invite re-
110
+ examinations of existing systems to better fit them for a new
111
+ era. The work must relevant in the current data landscape,
112
+ data redaction, is an old field but with a renewed interest in
113
+ the face of big data breaches, data privacy concerns, and ma-
114
+ chine learning for data protection. In this section we outline
115
+ these two related areas and contrast them with our proposed
116
+ system.
117
+ 2.1
118
+ Distributed & Cryptographic File
119
+ Systems
120
+ Distributed File Systems (DFS) are systems for maintaining
121
+ coherent file management across desperate hosting devices.
122
+ Examples include standard file hosting such as Google Drive,
123
+ Dropbox, and InRupt’s Solid[19]. Such systems have a long
124
+ history and continued relevance in the modern era. DFS also
125
+ manifest as cloud storage systems, albeit with looser file sys-
126
+ tem format adherence to mesh with the more diverse Internet
127
+ access environment. Extending DFSs into privacy and secu-
128
+ rity oriented spaces yields the Distributed Cryptographic
129
+ File System (DCFS) domain. Works such as UPSS[2] focus
130
+ on creating a sharing-oriented and protective DFS with full
131
+ RBAC and mutable verifiable histories of each file involved as
132
+ a check against malicious behavior. Further other works such
133
+ as [10, 12] aim to utilize the blockchain to achieve the same
134
+ RBAC with a more decentralized approach. Finally, other
135
+ approaches aim to refine key management in encryption for
136
+ DFS[14].These systems while powerful, rely on relatively
137
+ expensive RBAC and infrastructure or require significant
138
+ trust for the platform holders. In the former case, simplify-
139
+ ing the RBAC with the mostly read-only reality of user data
140
+ can lower RBAC complexity significantly. In the latter case,
141
+ hosting infrastructure is still necessary, but one must create
142
+ a trustless environment in order to retain control of one’s
143
+ data even on such hosting platforms.
144
+ Our work focuses on streamlining data sharing by creat-
145
+ ing a middlepoint between strong, rigid, and RBAC focused
146
+ approaches such as DCFSs and trust-oriented data platforms
147
+ and services like Uber, Kaggle, and Gridwise.
148
+ 2.2
149
+ Data Redaction
150
+ Data redaction is not a new field, but has gained vigor in the
151
+ last decade or so as the data economy has shaped. Redac-
152
+ tion provides the means for which sensitive data can be
153
+ made less sensitive and thus less dangerous in the event of
154
+ leaks, breaches, or theft. Likewise, redaction has its place in
155
+ academic publications when such publications may contain
156
+ in themselves dangerous or sensitive information[4]. Many
157
+ existing tools provide a user the quick means of redacting
158
+ a document such as [6] and [20]. A handful of commercial
159
+ products, such as [21], [15], and [17], apply machine learning
160
+ to identify and remove automatically sensitive data. Finally,
161
+ other work such as [11] highlight an interesting scenario
162
+ where redaction itself must be transparent enough such that
163
+ the redaction doesn’t mislead the information. These systems
164
+ as implemented are not part of a sharing pipeline and are
165
+ applied ad-hoc to data. A system such as the one outlined by
166
+ UPSS[2], envisions such technologies are part of a pipeline
167
+ of data requests but did not implement or specify beyond
168
+ such designs.
169
+ Our work applies the concepts behind these redaction
170
+ systems, but crucially, as part of a standard granular access
171
+ pipeline and not as a one-off and static redaction. This in
172
+ effect realizes some aspects of the UPSS[2] pipeline, but with-
173
+ out the more complex full RBAC suite.
174
+ 3
175
+ PROBLEM AND DESIGN GOALS
176
+ In order to build a system that encourages users to share their
177
+ data two primary problems and design considerations must
178
+ be achieved: Simplification of access control for accessing
179
+ and requesting data and automatic policy informed data
180
+ redaction. With these two aspects a sharing-oriented DFS
181
+ will lower the cost of sharing and accessing data and provide
182
+ a wide net of protections for users who choose to share.
183
+ 3.1
184
+ Simplify Access Control For Data
185
+ Existing DFS systems utilize a full suite of RBAC function-
186
+ ality to provide read, write, and execute functionality for
187
+ shared files. These provisions while useful, require signif-
188
+ icant infrastructure such as certificates and user profiles
189
+ registered within the computational structure of the data
190
+ host. This full suite of RBAC is necessary if the group of
191
+ users intended to read, write, and/or execute the shared data,
192
+ but costly if sharing (read only) is the intention. By removing
193
+ the write and execute assumptions of RBAC we can in turn
194
+ simplify the operating infrastructure required for accessing
195
+ data and making sharing a lower cost effort. This lower cost
196
+ is necessary for encouraging users to share their data, as it
197
+ 2
198
+
199
+ CageCoach: Sharing-Oriented Redaction-Capable Distributed Cryptographic File System
200
+ ,
201
+ will be easier to host for consumption, and for consumers of
202
+ data as it will be easier to access.
203
+ 3.2
204
+ Provide Integrated Automatic User
205
+ Data Redaction
206
+ Regardless of ease of access, users must be given some as-
207
+ surances of safety, privacy, and proper use for their data.
208
+ Traditional RBAC focuses on binary access models for data,
209
+ either a user can read all the data or none of the data in a typi-
210
+ cally hosted file. This approach is not adequate for data items
211
+ that contain core sensitive fields. For example, a typical sales
212
+ receipt is useful for inventory systems and market trending
213
+ services, as they provide insights into purchases and sales
214
+ trends, however, these same receipts may contain the pur-
215
+ chaser’s name, credit card information, and/or address and
216
+ location. Such fields are not important for the overall trend,
217
+ but present a security risk for the user. In a binary RBAC
218
+ model, such fields would available if the receipt is available.
219
+ A more granular approach to access is needed. Such an ap-
220
+ proach is outlined but not realized or specified by UPSS[2].
221
+ Such an approach would require that when a user’s data is re-
222
+ quested by another, a trusted middle system acquires the raw
223
+ full set of data, and then redacts and removes information
224
+ that is included in the data but not allowed for that partic-
225
+ ular user. For example, removing the name, address, and
226
+ credit fields from the sales receipt scenario. This approach
227
+ is required to provide granular and safer exposure of user’s
228
+ data for general consumption. Further, this process can be
229
+ handled by user-defined policy thus providing guidelines
230
+ for any user data added in the future thus lowering sharing
231
+ costs further.
232
+ 4
233
+ CAGECOACH SYSTEM
234
+ We realize the goals of a sharing-oriented DFS with our
235
+ system CageCoach. CageCoach simplifies the RBAC and
236
+ infrastructure of existing DFSs and integrates redaction tech-
237
+ nologies into a data request pipeline. All of this together
238
+ creates a simple and easy means for users to safely and eas-
239
+ ily share their data. CageCoach is organized around several
240
+ concepts and a flow, outlined in fig. 1. Requesters, who re-
241
+ quest user data. Data hosts, which host encrypted data files
242
+ and some attached meta data files. Finally, a Data Control
243
+ Server (DCS) which manages the owner’s data, processes
244
+ requests made by requesters, and redacts outgoing sensitive
245
+ data. CageCoach’s operational use-case is:
246
+ (1) A owner uploads some data (video, text, audio, etc) to
247
+ a hosting system after encrypting and creating a meta
248
+ file for the data.
249
+ (2) A requester sees this data and examines the meta file
250
+ (using GET for example) for information as to where
251
+ the owner’s DCS operates.
252
+ Figure 1: CageCoach System, providing a streamlined
253
+ means for requestors to ask for data and receive useful
254
+ but protected data.
255
+ (3) The requester sends a POST request to the owner’s
256
+ DCS server, asking to view the original data item.
257
+ (4) The DCS receives this request, verifies the requester’s
258
+ identity through asymmetric key phrase decryption,
259
+ and then uses GET to retrieve the remotely hosted
260
+ encrypted data file.
261
+ (5) The DCS decrypts the file with its own internal sym-
262
+ metric key and then applies a series of redaction oper-
263
+ ations on the data.
264
+ (6) The DCS forwards the remaining unredacted data to
265
+ the requester, completing the request and preventing
266
+ unnecessary or forbidden data from leaving encrypt-
267
+ ed/controlled space.
268
+ The details for how the RBAC is simplified and how the
269
+ redaction is integrated is detailed in the following sections.
270
+ 4.1
271
+ Simplifying RBAC Using HTTP And
272
+ Read-Only Assumptions
273
+ CageCoach simplifies the primary RBAC and infrastructure
274
+ of other DFSs by assuming that user data need only be read,
275
+ not written too or executed collaboratively. Additionally,
276
+ unlike UPSS[2], since there is no write permissions data
277
+ versions are no longer necessary thus can relax the assump-
278
+ tion UPSS makes for needing a transparent modifications
279
+ tree. With this simplification in mind, CageCoach utilizes
280
+ the most common means of read-only operation on the Inter-
281
+ net: HTTP GET. This means that user data can be hosted on
282
+ any system that facilitates HTTP GET, such as open source
283
+ systems like Apache2. The data that gets hosted is the user’s
284
+ encrypted file and a plain text meta data file. Using some con-
285
+ cepts of linked data, the meta data file points to the owner’s
286
+ DCS to actually facilitate the request for data among other
287
+ fields. The total definition for this meta data file is:
288
+ 3
289
+
290
+ (2)Directrequestertodataowner
291
+ B
292
+ (1)Requestaccesstodata
293
+ 000
294
+ (3) Downloads encrypted file
295
+ Data Host
296
+ (Dropbox,GDrive,Apache)
297
+ 000
298
+ 000
299
+ HTTP Data Control
300
+ Requester
301
+ Server (Dcs)
302
+ AccessControl(ACL)And
303
+ DataCensoringRules(DCR)
304
+ (5)Alloweddatais returned
305
+ (4)Decryptsandprocessesfile,
306
+ • owner-url: URL indicating where the owner’s DCS is. The
307
+ place where any request will be processed.
308
+ • meta-data: User filled info tags about the data, such as
309
+ what format it is, overall context. All of this information is
310
+ optional.
311
+ • description: A more textual description of the data, op-
312
+ tional if an owner wishes to provide more than just tags of
313
+ information.
314
+ • data-url: The URL indicating where the data this meta file
315
+ belongs to is. This is important for providing some backup if
316
+ the meta file is moved elsewhere or if it must live elsewhere
317
+ in hosting.
318
+ • data-hash-sha1: A sha1 of the encrypted file to provide a
319
+ minimal check for any requester that wishes to double check
320
+ the file they are asking about.
321
+ Despite our overall read-only approach, some computa-
322
+ tional efforts are still required. Namely the decryption of
323
+ the requested file and the granular redaction of information
324
+ within this file. The purpose of redirecting the requester
325
+ from the data host is to provide a centralized response by
326
+ the owner and the computational space for redaction poli-
327
+ cies. The requester will send an HTTP POST request to the
328
+ DCS indicated by the owner-url and receive a decrypted
329
+ and redacted data file. The DCS’s process is implemented as
330
+ a basic python HTTP server. The process involves several
331
+ steps: 1) Receive a POST request with the URL of the data
332
+ being requested and optionally an ID and asymmetrically
333
+ encrypted phrase to verify the requester’s identity. Cage-
334
+ Coach implements this with RSA public/private key pairs.
335
+ 2) Locate the data profile for the requested data on the DCS
336
+ server, itself a simple text file containing pointers to decrypt
337
+ and identify the requested data. Additionally, if the user is
338
+ registered with the DCS (registry comprised of a private key
339
+ for decrypting phrases, the plain text passphrase, and a id
340
+ name) it will load their profile. We implement this as simply
341
+ a separate json file containing each requester’s information.
342
+ Our approach assumes this registry happens outside of the
343
+ CageCoach architecture but can utilize it. 3) The DCS will
344
+ download the encrypted file from its host using HTTP GET.
345
+ After reception, the DCS will decrypt the data file and load
346
+ the redaction policies that match the specific data item (by
347
+ its name), the data type (json, mp3, etc), and finally the poli-
348
+ cies for the requester (if provided). CageCoach implements
349
+ this encryption with symmetric keys using pythons Fernet
350
+ library. 4) The DCS will apply these redaction operations,
351
+ gradually chipping away data until left with whatever is al-
352
+ lowed to pass. 5) The remaining data is sent to the requester
353
+ in the POST response. The specifics of how the redaction is
354
+ applied is outlined in the next section.
355
+ Figure 2: CageCoach Redaction Pipeline, providing a
356
+ generalized measure of privacy assurance.
357
+ 4.2
358
+ Access Control and Redaction
359
+ Pipelines
360
+ CageCoach’s read-only assumption for user data is not a
361
+ binary, like older models of RBAC based system, but granu-
362
+ lar. By using a series of redaction operations over requested
363
+ data, CageCoach can allow partial access to data. These op-
364
+ erations, dividable by datatype as outlined in Fig. 2, provide
365
+ for blurring faces in images, redacting text in jsons and csvs,
366
+ and muting specific words or background noises recognized
367
+ in audio. In the overall data request pipeline after a user
368
+ has requested data and the DCS has downloaded the target
369
+ data, it will apply these redaction operations according to
370
+ the specific user, datatype, and data item. This provides three
371
+ levels of granularity for controlling data flow outwards to re-
372
+ questers: by datatype (all jsons, csvs, mp3s, etc), by data item
373
+ (ex: specific files like example-1.json hosted on Google Drive
374
+ or example-2.json hosted on dropbox), and by requester id
375
+ (ex: John Doe can access the user’s name, but Jane Doe can
376
+ only see the user’s first name). However, such operations
377
+ that would be specific to an owner, such as blurring only
378
+ the owner’s face, require the owner provide their own data
379
+ to the redacting DCS. Our implementation we provide does
380
+ general redaction such as blurring all faces and removing a
381
+ handful of well known text fields such as social security and
382
+ street addresses. We do not implement an audio redaction
383
+ approach as there isn’t a general python capable pre-built
384
+ audio redaction library nor a common set of what "words"
385
+ should be auto removed, unlike faces in images. CageCoach
386
+ does support extensions to these operations to tailor to spe-
387
+ cific users. Our implementation uses the Haar cascade and
388
+ OpenCV2 [9] python libraries for blurring faces (illustrated
389
+ with the blurring of photo of American Union Army General
390
+ Benjamin Butler fig. 3), and python Pandas to redact textual
391
+ data (example of such in fig. 4).
392
+ 4
393
+
394
+ 010110
395
+ RedactionProcessesByDataTypes
396
+ 010001
397
+ 111011
398
+ Blob
399
+ Age:43
400
+ Name:"JohnDoe"
401
+ SSN:"999-99-9999"
402
+ File
403
+ CensorFields
404
+ Encrypted
405
+ 8871
406
+ File
407
+ Image
408
+ CensorFaces/Persons
409
+ 4)
410
+ Audio
411
+ CensorAudio SegmentsCageCoach: Sharing-Oriented Redaction-Capable Distributed Cryptographic File System
412
+ ,
413
+ Figure 3: CageCoach Redaction Pipeline example blur-
414
+ ring a specific image.
415
+ Figure 4: CageCoach Redaction Pipeline example
416
+ redacting specific text and fields.
417
+ 5
418
+ CONCLUSION
419
+ In this work, we introduced a new sharing oriented imple-
420
+ mentation of DCFS: CageCoach. CageCoach streamlines the
421
+ older RBAC heavy and trust-necessary hosting models of
422
+ DFS, while using the simpler HTTP GET/POST ecosystem to
423
+ facilitate easier data sharing. All of this is possible while still
424
+ respecting the privacy of users through granular customize-
425
+ able redaction pipelines that handle removal of sensitive user
426
+ information.
427
+ 6
428
+ LIMITATIONS AND FUTURE WORK
429
+ CageCoach has a set of drawbacks and limitations. Cage-
430
+ Coach is implemented as a demonstration of a new inter-
431
+ pretation of sharing-oriented DCFS and not intended for
432
+ industrial or commercial use. Future implementations would
433
+ need to provide better integration with hosting services like
434
+ Google and Dropbox, and provide tougher and more robust
435
+ security checks and infrastructure. Likewise future work
436
+ improvements would be needed to make the redaction oper-
437
+ ations more capable and workable on a wider set of diverse
438
+ data. Notably there are two non-implementation limitations
439
+ that stunt CageCoach and the broader goal of safe sharing
440
+ oriented DFS:
441
+ • No system can stop external data reconstruction.
442
+ No matter if a user is using CageCoach, Google Drive,
443
+ or any other hosting system, external actors with access to
444
+ pieces of separate data can always reassemble it together.
445
+ For example, an actor A has access to a subset of data 1, and
446
+ an actor B has access to another subset of data 1. These two
447
+ actors are not allowed access to either subset of data by the
448
+ policies of the user whose data it is. However, this does not
449
+ stop nor disincentivise actor A and B from simply sharing
450
+ with each other the user’s data. Each filling in the other’s gap
451
+ of missing data. No system can solve this if the requesting
452
+ actors are able to observe data.
453
+ • Leakage is still possible through indirect implicating
454
+ fields.
455
+ CageCoach’s redaction pipeline is quite rudimentary, in
456
+ some cases data may be leaked through a combination of un-
457
+ related fields. For example, with a street address, a malicious
458
+ user may be able to correctly guess a zip code when paired
459
+ with other information. This is due to CageCoach’s inability
460
+ to understand the connections between data.
461
+ CageCoach’s unique sharing-oriented DCFS structure pro-
462
+ vides several new areas of exploration. CageCoach itself can
463
+ be expanded to cover more datatypes, and work can be done
464
+ to integrate the ingress of user’s data to the data hosts that
465
+ CageCoach manages.
466
+ 6.1
467
+ Collective Redaction Rules For
468
+ Multi-Owner Data
469
+ Given our system’s usage of a redaction pipeline, one could
470
+ envision a scenario where data that is collected by one user,
471
+ but contains multiple other users’ data is pass around each
472
+ impacted user’s DCS for specific group based redaction. This
473
+ would facilitate greater granularity of redaction and a sense
474
+ of group ownership over data and its privacy implications.
475
+ 6.2
476
+ Enhanced ACL And Redaction
477
+ Through Impact Trees
478
+ A future work could examine how to enhance the redaction
479
+ rules to include field implications to provide greater coverage
480
+ of privacy in the event a user misses these concepts them-
481
+ selves. This would fill in the gaps that leaking implicating
482
+ fields create.
483
+ REFERENCES
484
+ [1] Matt Blaze. 1993. A Cryptographic File System for UNIX. In Proceedings
485
+ of the 1st ACM Conference on Computer and Communications Security
486
+ (Fairfax, Virginia, USA) (CCS ’93). Association for Computing Machin-
487
+ ery, New York, NY, USA, 9–16. https://doi.org/10.1145/168588.168590
488
+ 5
489
+
490
+ "Name":"John Doe"
491
+ "Age""24"
492
+ "Height": "4 feet, 2 inches"
493
+ 1
494
+ "Age":
495
+ "24"
496
+ "Height":
497
+ "4 feet, x",
498
+ [2] Arastoo Bozorgi, Mahya Soleimani Jadidi, and Jonathan Anderson.
499
+ 2020. Challenges in Designing a Distributed Cryptographic File System.
500
+ In Security Protocols XXVII, Jonathan Anderson, Frank Stajano, Bruce
501
+ Christianson, and Vashek Matyáš (Eds.). Springer International Pub-
502
+ lishing, Cham, 177–192. https://link.springer.com/chapter/10.1007/
503
+ 978-3-030-57043-9_17
504
+ [3] Dan Calacci and Alex Pentland. 2022. Bargaining with the Black-Box:
505
+ Designing and Deploying Worker-Centric Tools to Audit Algorithmic
506
+ Management. Proc. ACM Hum.-Comput. Interact. 6, CSCW2, Article
507
+ 428 (nov 2022), 24 pages. https://doi.org/10.1145/3570601
508
+ [4] Arturo Casadevall, Lynn Enquist, Michael Imperiale, Paul Keim,
509
+ Michael Osterholm, and David Relman. 2013. Redaction of Sensi-
510
+ tive Data in the Publication of Dual Use Research of Concern. mBio 5
511
+ (12 2013). https://doi.org/10.1128/mBio.00991-13
512
+ [5] Le Chen, Alan Mislove, and Christo Wilson. 2015. Peeking Beneath the
513
+ Hood of Uber. In Proceedings of the 2015 Internet Measurement Confer-
514
+ ence (Tokyo, Japan) (IMC ’15). Association for Computing Machinery,
515
+ New York, NY, USA, 495–508. https://doi.org/10.1145/2815675.2815681
516
+ [6] extract team. [n.d.]. Automated Data Redaction Software. https://www.
517
+ extractsystems.com/automated-data-redaction-software accessed on
518
+ Dec 2022.
519
+ [7] Minneapolis Government. [n.d.]. Minneapolis Open Data.
520
+ https:
521
+ //opendata.minneapolismn.gov/ accessed on Sun 18 Dec 2022.
522
+ [8] United States Government. [n.d.]. Citizen Science.
523
+ https://www.
524
+ citizenscience.gov/# accessed on Sun 18 Dec 2022.
525
+ [9] Olli-Pekka Heinisuo. [n.d.]. OpenCV on Wheels.
526
+ https://pypi.org/
527
+ project/opencv-python/ accessed on Dec 2022.
528
+ [10] Hsiao-Shan Huang, Tian-Sheuan Chang, and Jhih-Yi Wu. 2020. A Se-
529
+ cure File Sharing System Based on IPFS and Blockchain. In Proceedings
530
+ of the 2020 2nd International Electronics Communication Conference.
531
+ ACM. https://doi.org/10.1145/3409934.3409948
532
+ [11] Jinhua Ma, Xinyi Huang, Yi Mu, and Robert H. Deng. 2022. Authen-
533
+ ticated Data Redaction With Accountability and Transparency. IEEE
534
+ Transactions on Dependable and Secure Computing 19, 1 (2022), 149–160.
535
+ https://doi.org/10.1109/TDSC.2020.2998135
536
+ [12] Muqaddas Naz, Fahad A. Al-zahrani, Rabiya Khalid, Nadeem Javaid,
537
+ Ali Mustafa Qamar, Muhammad Khalil Afzal, and Muhammad Shafiq.
538
+ 2019. A Secure Data Sharing Platform Using Blockchain and Inter-
539
+ planetary File System. Sustainability 11, 24 (2019). https://doi.org/10.
540
+ 3390/su11247054
541
+ [13] Firaz Peer and Carl DiSalvo. 2022.
542
+ The Work of Infrastructural
543
+ Bricoleurs in Building Civic Data Dashboards.
544
+ Proc. ACM Hum.-
545
+ Comput. Interact. 6, CSCW1, Article 124 (apr 2022), 25 pages. https:
546
+ //doi.org/10.1145/3512971
547
+ [14] K. V. Pradeep, V. Vijayakumar, V. Subramaniyaswamy, and Arash H.
548
+ Lashkari. 2019. An Efficient Framework for Sharing a File in a Secure
549
+ Manner Using Asymmetric Key Distribution Management in Cloud
550
+ Environment. J. Comput. Netw. Commun. 2019 (jan 2019), 8. https:
551
+ //doi.org/10.1155/2019/9852472
552
+ [15] redacted.ai team. [n.d.]. Redacted.ai. https://redacted.ai/ accessed on
553
+ Dec 2022.
554
+ [16] Todd W. Schneider. 2020. Reverse Engineering Uber and Lyft Surge
555
+ Pricing in Chicago.
556
+ https://toddwschneider.com/posts/chicago-
557
+ ridehail-surge-pricing/ accessed on Dec 2022.
558
+ [17] Document.Redact team. [n.d.]. Document.Redact.
559
+ https://super.ai/
560
+ blog/redacting-information-from-documents-automatically accessed
561
+ on Dec 2022.
562
+ [18] Gridwise Team. [n.d.]. Gridwise. https://gridwise.io/ access on Mon
563
+ 12 Dec 2022.
564
+ [19] InRupt Team. [n.d.]. Solid. https://www.inrupt.com/solid
565
+ [20] Objective Team. [n.d.]. Objective Redact. https://www.objective.com/
566
+ products/objective-redact accessed on Dec 2022.
567
+ [21] DOMA Technologies. [n.d.]. DOMA. https://www.domaonline.com/
568
+ solutions/digitalservices/data-redaction/ accessed on Dec 2022.
569
+ [22] Mahmut Ünver and Atilla Erguzen. 2016. A STUDY ON DISTRIBUTED
570
+ FILE SYSTEMS: An example of NFS.
571
+ 6
572
+
8NE2T4oBgHgl3EQf8Ag-/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf,len=363
2
+ page_content='CageCoach: Sharing-Oriented Redaction-Capable Distributed Cryptographic File System Jason Carpenter CARPE415@umn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
3
+ page_content='edu University of Minnesota Minneapolis, MN Zhi-Li Zhang zhzhang@cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
4
+ page_content='umn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
5
+ page_content='edu University of Minnesota Minneapolis, MN ABSTRACT The modern data economy is built on sharing data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
6
+ page_content=' However, sharing data can be an expensive and risky endeavour.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
7
+ page_content=' Exist- ing sharing systems like Distributed File Systems provide full read, write, and execute Role-based Access Control (RBAC) for sharing data, but can be expensive and difficult to scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
8
+ page_content=' Likewise such systems operate on a binary access model for their data, either a user can read all the data or read none of the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
9
+ page_content=' This approach is not necessary for a more read-only oriented data landscape, and one where data contains many dimensions that represent a risk if overshared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
10
+ page_content=' In order to encourage users to share data and smooth out the process of accessing such data a new approach is needed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
11
+ page_content=' This new approach must simplify the RBAC of older DFS approaches to something more read-only and something that integrates redaction for user protections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
12
+ page_content=' To accomplish this we present CageCoach, a simple sharing- oriented Distributed Cryptographic File System (DCFS).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
13
+ page_content=' Cage- Coach leverages the simplicity and speed of basic HTTP, linked data concepts, and automatic redaction systems to facilitate safe and easy sharing of user data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
14
+ page_content=' The implemen- tation of CageCoach is available at https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
15
+ page_content='umn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
16
+ page_content='edu/ CARPE415/CageCoach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
17
+ page_content=' 1 INTRODUCTION User-generated data drives the modern world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
18
+ page_content=' Everything from Uber driver rides and Google search queries to video game experiences and Amazon purchase patterns feed user data back into these systems to provide insights for improve- ment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
19
+ page_content=' Additionally, users sharing their data as part of crowd sourcing solutions has proven key to reverse engineering gig working applications such as Uber[5, 16, 18], Lyft[16, 18], and Shipt[3, 18].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
20
+ page_content=' Further these efforts help solve civic and national needs such as with Atlanta’s Data Dashboard[13], Minneapolis’s Opendata program[7], or the United State’s Citizen Science initiative[8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
21
+ page_content=' However, users providing their data to these initiatives often comes with a level of risk and a loss of control over the data they provide.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
22
+ page_content=' Once a user has handed over information the safety considerations, redaction approaches, and man- agement decisions are out of their control.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
23
+ page_content=' Further, should any shared user data become dangerous to a user, the user has no more sway to alleviate this risk other than ask the current data holder to act, a practice often fruitless.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
24
+ page_content=' In order to further encourage users to share their data, a new sharing oriented data hosting system is required.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
25
+ page_content=' Such a platform must be simple to implement, easy to request data from, but still provide some assurances of privacy and safety for users involved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
26
+ page_content=' Crucially it should remain in the user’s control, and not be subject to control by others even those hosting data such as on public hosting systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
27
+ page_content=' The privacy capability must be granular not just in who can access data but what specific data is accessible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
28
+ page_content=' For example, for some users, sharing their full name to everyone who asks is un- reasonable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
29
+ page_content=' Thus they should be able to share with some a partial redaction of their name.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
30
+ page_content=' Existing works such as Dis- tributed File Systems (DFS) are promising, but require exten- sive implementation, Role-based Access Control (RBAC) en- forcement, and do not implement granular redaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
31
+ page_content=' Other platforms like Google Drive, Dropbox, and Kaggle are great for sharing bulk data but also do not provide granular redac- tion and require trusting of the platform holders to not share otherwise redacted user data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
32
+ page_content=' In this work, we introduce CageCoach a sharing oriented distributed cryptographic file system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
33
+ page_content=' CageCoach’s notable features are: Simple Trustless DCFS built over HTTP GET/POST Customizable RBAC and Datatype Granular Redac- tion Pipeline Easier sharing with Decentralized data access and centralized user control CageCoach streamlines the older RBAC based models of DFSs and decentralizes the data hosting approaches of plat- forms making for an overall simpler means of sharing data with others while retaining granular privacy control for users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
34
+ page_content=' This system is leverages simple HTTP GET/POST operations to interact with symmetrically encrypted files hosted on any HTTP platform to achieve decentralized hosting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
35
+ page_content=' These files point back to their owners, represented by a controlling server, that can facilitate redacted data access for a data re- quester providing user control of data access.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
36
+ page_content=' Finally, the 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
37
+ page_content='04214v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
38
+ page_content='CR] 10 Jan 2023 , user’s controlling server applies user defined redaction oper- ations from a suite of modules CageCoach provides to reduce sensitive data leakage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
39
+ page_content=' CageCoach’s code can be found at https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
40
+ page_content='umn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
41
+ page_content='edu/ CARPE415/CageCoach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
42
+ page_content=' 2 RELATED WORK Distributed File Systems (DFS) and cryptographic file sys- tems (DCFS) have been around for a long time with some works as early as 1993[1] and as recent as 2020[2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
43
+ page_content=' These are mature fields with well-developed and commercial products we see every day, such as Dropbox, GoogleDrive, Hadoop, Ceph, and others[22].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
44
+ page_content=' Despite this, the changing data land- scape and changing usage behaviors with data invite re- examinations of existing systems to better fit them for a new era.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
45
+ page_content=' The work must relevant in the current data landscape, data redaction, is an old field but with a renewed interest in the face of big data breaches, data privacy concerns, and ma- chine learning for data protection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
46
+ page_content=' In this section we outline these two related areas and contrast them with our proposed system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
47
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
48
+ page_content='1 Distributed & Cryptographic File Systems Distributed File Systems (DFS) are systems for maintaining coherent file management across desperate hosting devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
49
+ page_content=' Examples include standard file hosting such as Google Drive, Dropbox, and InRupt’s Solid[19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
50
+ page_content=' Such systems have a long history and continued relevance in the modern era.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
51
+ page_content=' DFS also manifest as cloud storage systems, albeit with looser file sys- tem format adherence to mesh with the more diverse Internet access environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
52
+ page_content=' Extending DFSs into privacy and secu- rity oriented spaces yields the Distributed Cryptographic File System (DCFS) domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
53
+ page_content=' Works such as UPSS[2] focus on creating a sharing-oriented and protective DFS with full RBAC and mutable verifiable histories of each file involved as a check against malicious behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
54
+ page_content=' Further other works such as [10, 12] aim to utilize the blockchain to achieve the same RBAC with a more decentralized approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
55
+ page_content=' Finally, other approaches aim to refine key management in encryption for DFS[14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
56
+ page_content='These systems while powerful, rely on relatively expensive RBAC and infrastructure or require significant trust for the platform holders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
57
+ page_content=' In the former case, simplify- ing the RBAC with the mostly read-only reality of user data can lower RBAC complexity significantly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
58
+ page_content=' In the latter case, hosting infrastructure is still necessary, but one must create a trustless environment in order to retain control of one’s data even on such hosting platforms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
59
+ page_content=' Our work focuses on streamlining data sharing by creat- ing a middlepoint between strong, rigid, and RBAC focused approaches such as DCFSs and trust-oriented data platforms and services like Uber, Kaggle, and Gridwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
60
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
61
+ page_content='2 Data Redaction Data redaction is not a new field, but has gained vigor in the last decade or so as the data economy has shaped.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
62
+ page_content=' Redac- tion provides the means for which sensitive data can be made less sensitive and thus less dangerous in the event of leaks, breaches, or theft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
63
+ page_content=' Likewise, redaction has its place in academic publications when such publications may contain in themselves dangerous or sensitive information[4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
64
+ page_content=' Many existing tools provide a user the quick means of redacting a document such as [6] and [20].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
65
+ page_content=' A handful of commercial products, such as [21], [15], and [17], apply machine learning to identify and remove automatically sensitive data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
66
+ page_content=' Finally, other work such as [11] highlight an interesting scenario where redaction itself must be transparent enough such that the redaction doesn’t mislead the information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
67
+ page_content=' These systems as implemented are not part of a sharing pipeline and are applied ad-hoc to data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
68
+ page_content=' A system such as the one outlined by UPSS[2], envisions such technologies are part of a pipeline of data requests but did not implement or specify beyond such designs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
69
+ page_content=' Our work applies the concepts behind these redaction systems, but crucially, as part of a standard granular access pipeline and not as a one-off and static redaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
70
+ page_content=' This in effect realizes some aspects of the UPSS[2] pipeline, but with- out the more complex full RBAC suite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
71
+ page_content=' 3 PROBLEM AND DESIGN GOALS In order to build a system that encourages users to share their data two primary problems and design considerations must be achieved: Simplification of access control for accessing and requesting data and automatic policy informed data redaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
72
+ page_content=' With these two aspects a sharing-oriented DFS will lower the cost of sharing and accessing data and provide a wide net of protections for users who choose to share.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
73
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
74
+ page_content='1 Simplify Access Control For Data Existing DFS systems utilize a full suite of RBAC function- ality to provide read, write, and execute functionality for shared files.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
75
+ page_content=' These provisions while useful, require signif- icant infrastructure such as certificates and user profiles registered within the computational structure of the data host.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
76
+ page_content=' This full suite of RBAC is necessary if the group of users intended to read, write, and/or execute the shared data, but costly if sharing (read only) is the intention.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
77
+ page_content=' By removing the write and execute assumptions of RBAC we can in turn simplify the operating infrastructure required for accessing data and making sharing a lower cost effort.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
78
+ page_content=' This lower cost is necessary for encouraging users to share their data, as it 2 CageCoach: Sharing-Oriented Redaction-Capable Distributed Cryptographic File System , will be easier to host for consumption, and for consumers of data as it will be easier to access.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
79
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
80
+ page_content='2 Provide Integrated Automatic User Data Redaction Regardless of ease of access, users must be given some as- surances of safety, privacy, and proper use for their data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
81
+ page_content=' Traditional RBAC focuses on binary access models for data, either a user can read all the data or none of the data in a typi- cally hosted file.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
82
+ page_content=' This approach is not adequate for data items that contain core sensitive fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
83
+ page_content=' For example, a typical sales receipt is useful for inventory systems and market trending services, as they provide insights into purchases and sales trends, however, these same receipts may contain the pur- chaser’s name, credit card information, and/or address and location.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
84
+ page_content=' Such fields are not important for the overall trend, but present a security risk for the user.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
85
+ page_content=' In a binary RBAC model, such fields would available if the receipt is available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
86
+ page_content=' A more granular approach to access is needed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
87
+ page_content=' Such an ap- proach is outlined but not realized or specified by UPSS[2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
88
+ page_content=' Such an approach would require that when a user’s data is re- quested by another, a trusted middle system acquires the raw full set of data, and then redacts and removes information that is included in the data but not allowed for that partic- ular user.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
89
+ page_content=' For example, removing the name, address, and credit fields from the sales receipt scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
90
+ page_content=' This approach is required to provide granular and safer exposure of user’s data for general consumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
91
+ page_content=' Further, this process can be handled by user-defined policy thus providing guidelines for any user data added in the future thus lowering sharing costs further.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
92
+ page_content=' 4 CAGECOACH SYSTEM We realize the goals of a sharing-oriented DFS with our system CageCoach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
93
+ page_content=' CageCoach simplifies the RBAC and infrastructure of existing DFSs and integrates redaction tech- nologies into a data request pipeline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
94
+ page_content=' All of this together creates a simple and easy means for users to safely and eas- ily share their data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
95
+ page_content=' CageCoach is organized around several concepts and a flow, outlined in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
96
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
97
+ page_content=' Requesters, who re- quest user data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
98
+ page_content=' Data hosts, which host encrypted data files and some attached meta data files.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
99
+ page_content=' Finally, a Data Control Server (DCS) which manages the owner’s data, processes requests made by requesters, and redacts outgoing sensitive data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
100
+ page_content=' CageCoach’s operational use-case is: (1) A owner uploads some data (video, text, audio, etc) to a hosting system after encrypting and creating a meta file for the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
101
+ page_content=' (2) A requester sees this data and examines the meta file (using GET for example) for information as to where the owner’s DCS operates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
102
+ page_content=' Figure 1: CageCoach System, providing a streamlined means for requestors to ask for data and receive useful but protected data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
103
+ page_content=' (3) The requester sends a POST request to the owner’s DCS server, asking to view the original data item.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
104
+ page_content=' (4) The DCS receives this request, verifies the requester’s identity through asymmetric key phrase decryption, and then uses GET to retrieve the remotely hosted encrypted data file.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
105
+ page_content=' (5) The DCS decrypts the file with its own internal sym- metric key and then applies a series of redaction oper- ations on the data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
106
+ page_content=' (6) The DCS forwards the remaining unredacted data to the requester, completing the request and preventing unnecessary or forbidden data from leaving encrypt- ed/controlled space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
107
+ page_content=' The details for how the RBAC is simplified and how the redaction is integrated is detailed in the following sections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
108
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
109
+ page_content='1 Simplifying RBAC Using HTTP And Read-Only Assumptions CageCoach simplifies the primary RBAC and infrastructure of other DFSs by assuming that user data need only be read, not written too or executed collaboratively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
110
+ page_content=' Additionally, unlike UPSS[2], since there is no write permissions data versions are no longer necessary thus can relax the assump- tion UPSS makes for needing a transparent modifications tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
111
+ page_content=' With this simplification in mind, CageCoach utilizes the most common means of read-only operation on the Inter- net: HTTP GET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
112
+ page_content=' This means that user data can be hosted on any system that facilitates HTTP GET, such as open source systems like Apache2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
113
+ page_content=' The data that gets hosted is the user’s encrypted file and a plain text meta data file.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
114
+ page_content=' Using some con- cepts of linked data, the meta data file points to the owner’s DCS to actually facilitate the request for data among other fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
115
+ page_content=' The total definition for this meta data file is: 3 (2)Directrequestertodataowner B (1)Requestaccesstodata 000 (3) Downloads encrypted file Data Host (Dropbox,GDrive,Apache) 000 000 HTTP Data Control Requester Server (Dcs) AccessControl(ACL)And DataCensoringRules(DCR) (5)Alloweddatais returned (4)Decryptsandprocessesfile, owner-url: URL indicating where the owner’s DCS is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
116
+ page_content=' The place where any request will be processed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
117
+ page_content=' meta-data: User filled info tags about the data, such as what format it is, overall context.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
118
+ page_content=' All of this information is optional.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
119
+ page_content=' description: A more textual description of the data, op- tional if an owner wishes to provide more than just tags of information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
120
+ page_content=' data-url: The URL indicating where the data this meta file belongs to is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
121
+ page_content=' This is important for providing some backup if the meta file is moved elsewhere or if it must live elsewhere in hosting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
122
+ page_content=' data-hash-sha1: A sha1 of the encrypted file to provide a minimal check for any requester that wishes to double check the file they are asking about.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
123
+ page_content=' Despite our overall read-only approach, some computa- tional efforts are still required.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
124
+ page_content=' Namely the decryption of the requested file and the granular redaction of information within this file.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
125
+ page_content=' The purpose of redirecting the requester from the data host is to provide a centralized response by the owner and the computational space for redaction poli- cies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
126
+ page_content=' The requester will send an HTTP POST request to the DCS indicated by the owner-url and receive a decrypted and redacted data file.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
127
+ page_content=' The DCS’s process is implemented as a basic python HTTP server.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
128
+ page_content=' The process involves several steps: 1) Receive a POST request with the URL of the data being requested and optionally an ID and asymmetrically encrypted phrase to verify the requester’s identity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
129
+ page_content=' Cage- Coach implements this with RSA public/private key pairs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
130
+ page_content=' 2) Locate the data profile for the requested data on the DCS server, itself a simple text file containing pointers to decrypt and identify the requested data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
131
+ page_content=' Additionally, if the user is registered with the DCS (registry comprised of a private key for decrypting phrases, the plain text passphrase, and a id name) it will load their profile.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
132
+ page_content=' We implement this as simply a separate json file containing each requester’s information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
133
+ page_content=' Our approach assumes this registry happens outside of the CageCoach architecture but can utilize it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
134
+ page_content=' 3) The DCS will download the encrypted file from its host using HTTP GET.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
135
+ page_content=' After reception, the DCS will decrypt the data file and load the redaction policies that match the specific data item (by its name), the data type (json, mp3, etc), and finally the poli- cies for the requester (if provided).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
136
+ page_content=' CageCoach implements this encryption with symmetric keys using pythons Fernet library.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
137
+ page_content=' 4) The DCS will apply these redaction operations, gradually chipping away data until left with whatever is al- lowed to pass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
138
+ page_content=' 5) The remaining data is sent to the requester in the POST response.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
139
+ page_content=' The specifics of how the redaction is applied is outlined in the next section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
140
+ page_content=' Figure 2: CageCoach Redaction Pipeline, providing a generalized measure of privacy assurance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
141
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
142
+ page_content='2 Access Control and Redaction Pipelines CageCoach’s read-only assumption for user data is not a binary, like older models of RBAC based system, but granu- lar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
143
+ page_content=' By using a series of redaction operations over requested data, CageCoach can allow partial access to data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
144
+ page_content=' These op- erations, dividable by datatype as outlined in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
145
+ page_content=' 2, provide for blurring faces in images, redacting text in jsons and csvs, and muting specific words or background noises recognized in audio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
146
+ page_content=' In the overall data request pipeline after a user has requested data and the DCS has downloaded the target data, it will apply these redaction operations according to the specific user, datatype, and data item.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
147
+ page_content=' This provides three levels of granularity for controlling data flow outwards to re- questers: by datatype (all jsons, csvs, mp3s, etc), by data item (ex: specific files like example-1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
148
+ page_content='json hosted on Google Drive or example-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
149
+ page_content='json hosted on dropbox), and by requester id (ex: John Doe can access the user’s name, but Jane Doe can only see the user’s first name).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
150
+ page_content=' However, such operations that would be specific to an owner, such as blurring only the owner’s face, require the owner provide their own data to the redacting DCS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
151
+ page_content=' Our implementation we provide does general redaction such as blurring all faces and removing a handful of well known text fields such as social security and street addresses.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
152
+ page_content=' We do not implement an audio redaction approach as there isn’t a general python capable pre-built audio redaction library nor a common set of what "words" should be auto removed, unlike faces in images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
153
+ page_content=' CageCoach does support extensions to these operations to tailor to spe- cific users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
154
+ page_content=' Our implementation uses the Haar cascade and OpenCV2 [9] python libraries for blurring faces (illustrated with the blurring of photo of American Union Army General Benjamin Butler fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
155
+ page_content=' 3), and python Pandas to redact textual data (example of such in fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
156
+ page_content=' 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
157
+ page_content=' 4 010110 RedactionProcessesByDataTypes 010001 111011 Blob Age:43 Name:"JohnDoe" SSN:"999-99-9999" File CensorFields Encrypted 8871 File Image CensorFaces/Persons 4) Audio CensorAudio SegmentsCageCoach: Sharing-Oriented Redaction-Capable Distributed Cryptographic File System , Figure 3: CageCoach Redaction Pipeline example blur- ring a specific image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
158
+ page_content=' Figure 4: CageCoach Redaction Pipeline example redacting specific text and fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
159
+ page_content=' 5 CONCLUSION In this work, we introduced a new sharing oriented imple- mentation of DCFS: CageCoach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
160
+ page_content=' CageCoach streamlines the older RBAC heavy and trust-necessary hosting models of DFS, while using the simpler HTTP GET/POST ecosystem to facilitate easier data sharing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
161
+ page_content=' All of this is possible while still respecting the privacy of users through granular customize- able redaction pipelines that handle removal of sensitive user information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
162
+ page_content=' 6 LIMITATIONS AND FUTURE WORK CageCoach has a set of drawbacks and limitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
163
+ page_content=' Cage- Coach is implemented as a demonstration of a new inter- pretation of sharing-oriented DCFS and not intended for industrial or commercial use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
164
+ page_content=' Future implementations would need to provide better integration with hosting services like Google and Dropbox, and provide tougher and more robust security checks and infrastructure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
165
+ page_content=' Likewise future work improvements would be needed to make the redaction oper- ations more capable and workable on a wider set of diverse data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
166
+ page_content=' Notably there are two non-implementation limitations that stunt CageCoach and the broader goal of safe sharing oriented DFS: No system can stop external data reconstruction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
167
+ page_content=' No matter if a user is using CageCoach, Google Drive, or any other hosting system, external actors with access to pieces of separate data can always reassemble it together.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
168
+ page_content=' For example, an actor A has access to a subset of data 1, and an actor B has access to another subset of data 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
169
+ page_content=' These two actors are not allowed access to either subset of data by the policies of the user whose data it is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
170
+ page_content=' However, this does not stop nor disincentivise actor A and B from simply sharing with each other the user’s data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
171
+ page_content=' Each filling in the other’s gap of missing data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
172
+ page_content=' No system can solve this if the requesting actors are able to observe data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
173
+ page_content=' Leakage is still possible through indirect implicating fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
174
+ page_content=' CageCoach’s redaction pipeline is quite rudimentary, in some cases data may be leaked through a combination of un- related fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
175
+ page_content=' For example, with a street address, a malicious user may be able to correctly guess a zip code when paired with other information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
176
+ page_content=' This is due to CageCoach’s inability to understand the connections between data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
177
+ page_content=' CageCoach’s unique sharing-oriented DCFS structure pro- vides several new areas of exploration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
178
+ page_content=' CageCoach itself can be expanded to cover more datatypes, and work can be done to integrate the ingress of user’s data to the data hosts that CageCoach manages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
179
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
180
+ page_content='1 Collective Redaction Rules For Multi-Owner Data Given our system’s usage of a redaction pipeline, one could envision a scenario where data that is collected by one user, but contains multiple other users’ data is pass around each impacted user’s DCS for specific group based redaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
181
+ page_content=' This would facilitate greater granularity of redaction and a sense of group ownership over data and its privacy implications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
182
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
183
+ page_content='2 Enhanced ACL And Redaction Through Impact Trees A future work could examine how to enhance the redaction rules to include field implications to provide greater coverage of privacy in the event a user misses these concepts them- selves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
184
+ page_content=' This would fill in the gaps that leaking implicating fields create.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
185
+ page_content=' REFERENCES [1] Matt Blaze.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
186
+ page_content=' 1993.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
187
+ page_content=' A Cryptographic File System for UNIX.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
188
+ page_content=' In Proceedings of the 1st ACM Conference on Computer and Communications Security (Fairfax, Virginia, USA) (CCS ’93).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
189
+ page_content=' Association for Computing Machin- ery, New York, NY, USA, 9–16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
190
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
191
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
192
+ page_content='1145/168588.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
193
+ page_content='168590 5 "Name":"John Doe" "Age""24" "Height": "4 feet, 2 inches" 1 "Age": "24" "Height": "4 feet, x", [2] Arastoo Bozorgi, Mahya Soleimani Jadidi, and Jonathan Anderson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
194
+ page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
195
+ page_content=' Challenges in Designing a Distributed Cryptographic File System.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
196
+ page_content=' In Security Protocols XXVII, Jonathan Anderson, Frank Stajano, Bruce Christianson, and Vashek Matyáš (Eds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
197
+ page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
198
+ page_content=' Springer International Pub- lishing, Cham, 177–192.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
199
+ page_content=' https://link.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
200
+ page_content='springer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
201
+ page_content='com/chapter/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
202
+ page_content='1007/ 978-3-030-57043-9_17 [3] Dan Calacci and Alex Pentland.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
203
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
204
+ page_content=' Bargaining with the Black-Box: Designing and Deploying Worker-Centric Tools to Audit Algorithmic Management.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
205
+ page_content=' Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
206
+ page_content=' ACM Hum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
207
+ page_content='-Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
208
+ page_content=' Interact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
209
+ page_content=' 6, CSCW2, Article 428 (nov 2022), 24 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
210
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
211
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
212
+ page_content='1145/3570601 [4] Arturo Casadevall, Lynn Enquist, Michael Imperiale, Paul Keim, Michael Osterholm, and David Relman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
213
+ page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
214
+ page_content=' Redaction of Sensi- tive Data in the Publication of Dual Use Research of Concern.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
215
+ page_content=' mBio 5 (12 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
216
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
217
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
218
+ page_content='1128/mBio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
219
+ page_content='00991-13 [5] Le Chen, Alan Mislove, and Christo Wilson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
220
+ page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
221
+ page_content=' Peeking Beneath the Hood of Uber.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
222
+ page_content=' In Proceedings of the 2015 Internet Measurement Confer- ence (Tokyo, Japan) (IMC ’15).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
223
+ page_content=' Association for Computing Machinery, New York, NY, USA, 495–508.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
224
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
225
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
226
+ page_content='1145/2815675.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
227
+ page_content='2815681 [6] extract team.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
228
+ page_content=' [n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
229
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
230
+ page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
231
+ page_content=' Automated Data Redaction Software.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
232
+ page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
233
+ page_content=' extractsystems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
234
+ page_content='com/automated-data-redaction-software accessed on Dec 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
235
+ page_content=' [7] Minneapolis Government.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
236
+ page_content=' [n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
237
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
238
+ page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
239
+ page_content=' Minneapolis Open Data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
240
+ page_content=' https: //opendata.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
241
+ page_content='minneapolismn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
242
+ page_content='gov/ accessed on Sun 18 Dec 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
243
+ page_content=' [8] United States Government.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
244
+ page_content=' [n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
245
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
246
+ page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
247
+ page_content=' Citizen Science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
248
+ page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
249
+ page_content=' citizenscience.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
250
+ page_content='gov/# accessed on Sun 18 Dec 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
251
+ page_content=' [9] Olli-Pekka Heinisuo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
252
+ page_content=' [n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
253
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
254
+ page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
255
+ page_content=' OpenCV on Wheels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
256
+ page_content=' https://pypi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
257
+ page_content='org/ project/opencv-python/ accessed on Dec 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
258
+ page_content=' [10] Hsiao-Shan Huang, Tian-Sheuan Chang, and Jhih-Yi Wu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
259
+ page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
260
+ page_content=' A Se- cure File Sharing System Based on IPFS and Blockchain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
261
+ page_content=' In Proceedings of the 2020 2nd International Electronics Communication Conference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
262
+ page_content=' ACM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
263
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
264
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
265
+ page_content='1145/3409934.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
266
+ page_content='3409948 [11] Jinhua Ma, Xinyi Huang, Yi Mu, and Robert H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
267
+ page_content=' Deng.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
268
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
269
+ page_content=' Authen- ticated Data Redaction With Accountability and Transparency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
270
+ page_content=' IEEE Transactions on Dependable and Secure Computing 19, 1 (2022), 149–160.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
271
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
272
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
273
+ page_content='1109/TDSC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
274
+ page_content='2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
275
+ page_content='2998135 [12] Muqaddas Naz, Fahad A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
276
+ page_content=' Al-zahrani, Rabiya Khalid, Nadeem Javaid, Ali Mustafa Qamar, Muhammad Khalil Afzal, and Muhammad Shafiq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
277
+ page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
278
+ page_content=' A Secure Data Sharing Platform Using Blockchain and Inter- planetary File System.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
279
+ page_content=' Sustainability 11, 24 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
280
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
281
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
282
+ page_content=' 3390/su11247054 [13] Firaz Peer and Carl DiSalvo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
283
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
284
+ page_content=' The Work of Infrastructural Bricoleurs in Building Civic Data Dashboards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
285
+ page_content=' Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
286
+ page_content=' ACM Hum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
287
+ page_content='- Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
288
+ page_content=' Interact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
289
+ page_content=' 6, CSCW1, Article 124 (apr 2022), 25 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
290
+ page_content=' https: //doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
291
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
292
+ page_content='1145/3512971 [14] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
293
+ page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
294
+ page_content=' Pradeep, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
295
+ page_content=' Vijayakumar, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
296
+ page_content=' Subramaniyaswamy, and Arash H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
297
+ page_content=' Lashkari.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
298
+ page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
299
+ page_content=' An Efficient Framework for Sharing a File in a Secure Manner Using Asymmetric Key Distribution Management in Cloud Environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
300
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
301
+ page_content=' Comput.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
302
+ page_content=' Netw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
303
+ page_content=' Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
304
+ page_content=' 2019 (jan 2019), 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
305
+ page_content=' https: //doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
306
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
307
+ page_content='1155/2019/9852472 [15] redacted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
308
+ page_content='ai team.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
309
+ page_content=' [n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
310
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
311
+ page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
312
+ page_content=' Redacted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
313
+ page_content='ai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
314
+ page_content=' https://redacted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
315
+ page_content='ai/ accessed on Dec 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
316
+ page_content=' [16] Todd W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
317
+ page_content=' Schneider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
318
+ page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
319
+ page_content=' Reverse Engineering Uber and Lyft Surge Pricing in Chicago.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
320
+ page_content=' https://toddwschneider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
321
+ page_content='com/posts/chicago- ridehail-surge-pricing/ accessed on Dec 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
322
+ page_content=' [17] Document.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
323
+ page_content='Redact team.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
324
+ page_content=' [n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
325
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
326
+ page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
327
+ page_content=' Document.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
328
+ page_content='Redact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
329
+ page_content=' https://super.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
330
+ page_content='ai/ blog/redacting-information-from-documents-automatically accessed on Dec 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
331
+ page_content=' [18] Gridwise Team.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
332
+ page_content=' [n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
333
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
334
+ page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
335
+ page_content=' Gridwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
336
+ page_content=' https://gridwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
337
+ page_content='io/ access on Mon 12 Dec 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
338
+ page_content=' [19] InRupt Team.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
339
+ page_content=' [n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
340
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
341
+ page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
342
+ page_content=' Solid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
343
+ page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
344
+ page_content='inrupt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
345
+ page_content='com/solid [20] Objective Team.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
346
+ page_content=' [n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
347
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
348
+ page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
349
+ page_content=' Objective Redact.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
350
+ page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
351
+ page_content='objective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
352
+ page_content='com/ products/objective-redact accessed on Dec 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
353
+ page_content=' [21] DOMA Technologies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
354
+ page_content=' [n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
355
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
356
+ page_content='].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
357
+ page_content=' DOMA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
358
+ page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
359
+ page_content='domaonline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
360
+ page_content='com/ solutions/digitalservices/data-redaction/ accessed on Dec 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
361
+ page_content=' [22] Mahmut Ünver and Atilla Erguzen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
362
+ page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
363
+ page_content=' A STUDY ON DISTRIBUTED FILE SYSTEMS: An example of NFS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
364
+ page_content=' 6' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/8NE2T4oBgHgl3EQf8Ag-/content/2301.04214v1.pdf'}
8dAyT4oBgHgl3EQf2_nF/content/2301.00762v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24a5fd36587c123e79d7074f78d71b34c7ddc114e5a0cb60291557ac0d22274e
3
+ size 1000504
8dAyT4oBgHgl3EQf2_nF/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ba6efb93273aac7da94afd912b72fd784be0029fa68f5caa2419f5cd50e8ca7
3
+ size 1703981
8dAyT4oBgHgl3EQf2_nF/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da1fe2521963eac8bc7a8d2ef293ad541ea31a7a177079bae869ee9d654a716b
3
+ size 74467
99E3T4oBgHgl3EQfSQn_/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4902039652d2d82b61d75b48d7098a318491ce760e6c45f27bfcf65b577cf49
3
+ size 4653101
99FLT4oBgHgl3EQfCS7y/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d9cd467bffe8736bb495e3a442a586a45d1fb73b689efe9b6c8f4116f1ba7b4
3
+ size 313039
9NE1T4oBgHgl3EQfnwSt/content/2301.03313v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8103972662eeec7a7a434d6886ec95e51c375446a95ce6f2a6722828bb2c3eb3
3
+ size 1810879
9NE1T4oBgHgl3EQfnwSt/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bb23ceef43d34829fd7f458c3ed70aa8ba08096abdf6b42bcaef532187813cc
3
+ size 4325421
9NE1T4oBgHgl3EQfnwSt/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d4b8d1ebd1308fa2860e30540271ae3e08e1ec5db65f9a8b3dab914c8f1f212
3
+ size 175259
B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf ADDED
Binary file (97.1 kB). View file
 
B9AzT4oBgHgl3EQfGPvM/content/tmp_files/2301.01026v1.pdf.txt ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.01026v1 [cs.LG] 3 Jan 2023
2
+ Continual Treatment Effect Estimation: Challenges and Opportunities
3
+ Zhixuan Chu1, Sheng Li2
4
+ 1Ant Group, Hangzhou, China
5
+ 2University of Virginia, Charlottesville, USA
6
+ chuzhixuan.czx@alibaba-inc.com, shengli@virginia.edu
7
+ Introduction
8
+ A further understanding of cause and effect within obser-
9
+ vational data is critical across many domains, such as eco-
10
+ nomics, health care, public policy, web mining, online ad-
11
+ vertising, and marketing campaigns. Although significant
12
+ advances have been made to overcome the challenges in
13
+ causal effect estimation with observational data, such as
14
+ missing counterfactual outcomes and selection bias between
15
+ treatment and control groups, the existing methods mainly
16
+ focus on source-specific and stationary observational data.
17
+ Such learning strategies assume that all observational data
18
+ are already available during the training phase and from only
19
+ one source.
20
+ Along with the fast-growing segments of industrial appli-
21
+ cations, this assumption is unsubstantial in practice. Taking
22
+ Alipay as an example, which is one of the world’s largest
23
+ mobile payment platforms and offers financial services to
24
+ billion-scale users, a tremendous amount of data containing
25
+ much privacy-related information is produced daily and col-
26
+ lected from different sources. In conclusion, the following
27
+ two points are summed up. The first one is based on the
28
+ characteristics of observational data, which are incremen-
29
+ tally available from non-stationary data distributions. For
30
+ instance, the electronic financial records for one marketing
31
+ campaign are growing every day and they may be collected
32
+ from different cities or even other countries. This character-
33
+ istic implies that one cannot have access to all observational
34
+ data at a one-time point and from one single source. The sec-
35
+ ond reason is based on the realistic consideration of accessi-
36
+ bility. For example, when new observational data are avail-
37
+ able, if we want to refine the model previously trained by
38
+ original data, maybe the original training data are no longer
39
+ accessible due to a variety of reasons, e.g., legacy data may
40
+ be unrecorded, proprietary, the sensitivity of financial data,
41
+ too large to store, or subject to privacy constraint of personal
42
+ information (Zhang et al. 2020). This practical concern of
43
+ accessibility is ubiquitous in various academic and indus-
44
+ trial applications. That’s what it boiled down to: in the era
45
+ of big data, we face new challenges in causal inference with
46
+ observational data, i.e., the extensibility for incrementally
47
+ available observational data, the adaptability for extra do-
48
+ Copyright © 2023, Association for the Advancement of Artificial
49
+ Intelligence (www.aaai.org). All rights reserved.
50
+ main adaptation problem except for the imbalance between
51
+ treatment and control groups, and the accessibility for an
52
+ enormous amount of data.
53
+ In this position paper, we formally define the problem of
54
+ continual treatment effect estimation, describe its research
55
+ challenges, and then present possible solutions to this prob-
56
+ lem. Moreover, we will discuss future research directions on
57
+ this topic.
58
+ Related Work
59
+ Instead of randomized controlled trials, observational data
60
+ is obtained by the researcher simply observing the subjects
61
+ without any interference. That means that the researchers
62
+ have no control over the treatment assignments, and they just
63
+ observe the subjects and record data based on their obser-
64
+ vations (Yao et al. 2021). Therefore, from the observational
65
+ data, directly estimating the treatment effect is challenging
66
+ due to the missing counterfactual outcomes and the exis-
67
+ tence of confounders. Recently, powerful machine learning
68
+ methods such as tree-based methods (Athey and Imbens
69
+ 2016; Wager and Athey 2018), representation learning
70
+ (Li and Fu
71
+ 2017;
72
+ Shalit, Johansson, and Sontag
73
+ 2017;
74
+ Yao et al. 2018; Chu, Rathbun, and Li 2022), meta-learning
75
+ (K¨unzel et al. 2019; Nie and Wager 2021), generative mod-
76
+ els (Louizos et al. 2017; Yoon, Jordon, and van der Schaar
77
+ 2018) have achieved prominent progress in treatment effect
78
+ estimation task.
79
+ In addition, the combination of causal inference and
80
+ other research fields also exhibits complementary strengths,
81
+ such as computer vision (Tang et al. 2020; Liu et al. 2022a),
82
+ graph learning (Ma et al. 2022; Chu, Rathbun, and Li 2021),
83
+ natural language processing (Feder et al. 2022; Liu et al.
84
+ 2022b), and so on. The involved causal analysis helps to im-
85
+ prove the model’s capability of discovering and resolving
86
+ the underlying system beyond the statistical relationships
87
+ learned from observational data.
88
+ Problem Definition
89
+ Suppose that the observational data contain n units collected
90
+ from d different domains and the d-th dataset Dd contains
91
+ the data {(x, y, t)|x ∈ X, y ∈ Y, t ∈ T } collected from d-th
92
+ domain, which contains nd units. Let X denote all observed
93
+ variables, Y denote the outcomes in the observational data,
94
+
95
+ and T be a binary variable. Let D1:d = {D1, D2, ..., Dd}
96
+ be the set of combination of d datasets, separately collected
97
+ from d different domains. For d datasets {D1, D2, ..., Dd},
98
+ they have the commonly observed variables, but due to the
99
+ fact that they are collected from different domains, they have
100
+ different distributions with respect to X, Y , and T in each
101
+ dataset. Each unit in the observational data received one of
102
+ two treatments. Let ti denote the treatment assignment for
103
+ unit i; i = 1, ..., n. For binary treatments, ti = 1 is for
104
+ the treatment group and ti = 0 for the control group. The
105
+ outcome for unit i is denoted by yi
106
+ t when treatment t is ap-
107
+ plied to unit i. For observational data, only one of the poten-
108
+ tial outcomes is observed. The observed outcome is called
109
+ the factual outcome, and the remaining unobserved poten-
110
+ tial outcomes are called counterfactual outcomes.
111
+ This task can follow the potential outcome frame-
112
+ work for estimating treatment effects
113
+ (Rubin 1974;
114
+ Splawa-Neyman, Dabrowska, and Speed 1990). The indi-
115
+ vidual treatment effect (ITE) for unit i is the difference be-
116
+ tween the potential treated and control outcomes and is de-
117
+ fined as
118
+ ITEi = yi
119
+ 1 − yi
120
+ 0.
121
+ (1)
122
+ The average treatment effect (ATE) is the difference be-
123
+ tween the mean potential treated and control outcomes,
124
+ which is defined as
125
+ ATE = 1
126
+ n
127
+ n
128
+
129
+ i=1
130
+ (yi
131
+ 1 − yi
132
+ 0).
133
+ (2)
134
+ The success of the potential outcome framework is based
135
+ on the following assumptions (Imbens and Rubin 2015),
136
+ which ensure that the treatment effect can be identified.
137
+ Assumption 1 Stable Unit Treatment Value Assumption
138
+ (SUTVA): The potential outcomes for any unit do not vary
139
+ with the treatments assigned to other units, and, for each
140
+ unit, there are no different forms or versions of each treat-
141
+ ment level, which lead to different potential outcomes.
142
+ Assumption 2 Consistency: The potential outcome of treat-
143
+ ment t is equal to the observed outcome if the actual treat-
144
+ ment received is t.
145
+ Assumption 3 Positivity: For any value of x, treatment as-
146
+ signment is not deterministic, i.e.,P(T = t|X = x) > 0, for
147
+ all t and x.
148
+ Assumption 4 Ignorability: Given covariates, treatment
149
+ assignment is independent of the potential outcomes, i.e.,
150
+ (y1, y0) ⊥⊥ t|x.
151
+ Our goal is to develop a novel continual causal inference
152
+ framework to estimate the causal effect for all available data,
153
+ including new data Dd and the previous data D1:(d−1), with-
154
+ out having access to previous data D1:(d−1).
155
+ Research Challenges
156
+ Existing causal effect inference methods, however, are un-
157
+ able to deal with the aforementioned new challenges, i.e.,
158
+ extensibility, adaptability, and accessibility. Although it is
159
+ possible to adapt existing causal inference methods to cater
160
+ to these issues, these adjusted methods still have inevitable
161
+ defects. Three straightforward adaptation strategies are de-
162
+ scribed as follows:
163
+ 1. If we directly apply the model previously trained based
164
+ on original data to new observational data, the perfor-
165
+ mance on new tasks will be very poor due to the domain
166
+ shift issues among different data sources;
167
+ 2. Suppose we utilize newly available data to re-train the
168
+ previously learned model for adapting changes in the
169
+ data distribution. In that case, old knowledge will be
170
+ completely or partially overwritten by the new one,
171
+ which can result in severe performance degradation on
172
+ old tasks. This is the well-known catastrophic forgetting
173
+ problem (McCloskey and Cohen 1989; French 1999);
174
+ 3. To overcome the catastrophic forgetting problem, we
175
+ may rely on the storage of old data and combine the old
176
+ and new data together, and then re-train the model from
177
+ scratch. However, this strategy is memory inefficient and
178
+ time-consuming, and it brings practical concerns such as
179
+ copyright or privacy issues when storing data for a long
180
+ time (Samet, Miri, and Granger 2013).
181
+ Any of these three strategies, in combination with the exist-
182
+ ing causal effect inference methods, is deficient.
183
+ Potential Solution
184
+ To address the continual treatment effect estimation prob-
185
+ lem, we propose a Continual Causal Effect Representation
186
+ Learning framework (CERL) for estimating causal effect
187
+ with incrementally available observational data. Instead of
188
+ having access to all previous observational data, we only
189
+ store a limited subset of feature representations learned from
190
+ previous data. Combining selective and balanced represen-
191
+ tation learning, feature representation distillation, and fea-
192
+ ture transformation, our framework preserves the knowl-
193
+ edge learned from previous data and updates the knowledge
194
+ by leveraging new data so that it can achieve the continual
195
+ causal effect estimation for incrementally new data without
196
+ compromising the estimation capability for previous data.
197
+ Framework Overview. To estimate the incrementally
198
+ available observational data, the framework of CERL is
199
+ mainly composed of two components: (1) the baseline
200
+ causal effect learning model is only for the first available
201
+ observational data, and thus we don’t need to consider the
202
+ domain shift issue among different data sources. This com-
203
+ ponent is equivalent to the traditional causal effect estima-
204
+ tion problem; (2) the continual causal effect learning model
205
+ is for the sequentially available observational data, where
206
+ we need to handle more complex issues, such as knowledge
207
+ transfer, catastrophic forgetting, global representation bal-
208
+ ance, and memory constraint.
209
+ Baseline Causal Effect Learning Model. We first train
210
+ the baseline causal effect learning model for the initial obser-
211
+ vational dataset and then bring in subsequent datasets. The
212
+ task on the initial dataset can be converted to a traditional
213
+ causal effect estimation problem. Owing to the success
214
+ of deep learning for counterfactual inference, we propose
215
+
216
+ to learn the selective and balanced feature representations
217
+ (Shalit, Johansson, and Sontag 2017; Chu, Rathbun, and Li
218
+ 2020) for units in treatment and control groups and then in-
219
+ fer the potential outcomes based on learned representation
220
+ space.
221
+ Sustainability of Model Learning. We have built the
222
+ baseline model for causal effect estimation with observa-
223
+ tional data from a single source. To avoid catastrophic for-
224
+ getting when learning new data, we propose to preserve a
225
+ subset of lower-dimensional feature representations rather
226
+ than all original covariates. We also can adjust the number
227
+ of preserved feature representations according to the mem-
228
+ ory constraint.
229
+ Continual Causal Effect Learning. We have stored mem-
230
+ ory and the baseline model. To continually estimate the
231
+ causal effect for incrementally available observational data,
232
+ we incorporate feature representation distillation and feature
233
+ representation transformation (Chu et al. 2023) to estimate
234
+ the causal effect for all seen data based on a balanced global
235
+ feature representation space.
236
+ Research Opportunities
237
+ Although significant advances have been made to over-
238
+ come the challenges in causal effect estimation from an aca-
239
+ demic perspective, industrial applications based on obser-
240
+ vational data are always more complicated and harder. Un-
241
+ like source-specific and stationary observational data, most
242
+ real-world data are incrementally available and from non-
243
+ stationary data distributions. Significantly, we also face the
244
+ realistic consideration of accessibility. This work is the first
245
+ attempt to investigate the continual lifelong causal effect in-
246
+ ference problem and propose the corresponding evaluation
247
+ criteria. However, constructing the comprehensive analyt-
248
+ ical tools and the theoretical framework derived from this
249
+ brand-new problem requires non-trivial efforts. Specifically,
250
+ there are several potential directions for continual causal in-
251
+ ference:
252
+ • In addition to the distribution shift of the covariates
253
+ among different domains, there are other potential tech-
254
+ nical issues for continual effect estimation: for example,
255
+ perhaps we do not initially observe all the necessary con-
256
+ founding variables and may get access to increasingly
257
+ more confounders.
258
+ • Compared with homogeneous treatment effects (the
259
+ magnitude and direction of the treatment effect are the
260
+ same for all patients, regardless of any other patient char-
261
+ acteristics), heterogeneous causal effects could differ for
262
+ different individuals. This could be another candidate
263
+ to consider for the continual treatment effect estimation
264
+ model.
265
+ • The basic assumptions for traditional causal effect esti-
266
+ mation may not be completely applicable. New assump-
267
+ tions may be supplemented, or previous assumptions
268
+ need to be relaxed.
269
+ • There exists a natural connection with continual domain
270
+ adaptation among different times or domains (“contin-
271
+ ual” causal inference) and between treatment and control
272
+ groups (continual “causal inference”).
273
+ • Compared to traditional causal effect estimation tasks
274
+ based on a small amount of medical data, the continual
275
+ causal inference method will face big data computing or
276
+ cloud computing due to its objective task.
277
+ • With the increasing public concern over privacy leakage
278
+ in data, federated learning, which collaboratively trains
279
+ the machine learning model without directly sharing the
280
+ raw data among the data holders, may become a potential
281
+ solution for continual causal inference.
282
+ References
283
+ Athey, S.; and Imbens, G. 2016. Recursive partitioning for
284
+ heterogeneous causal effects. Proceedings of the National
285
+ Academy of Sciences, 113(27): 7353–7360.
286
+ Chu, Z.; Li, R.; Rathbun, S. L.; and Li, S. 2023. Continual
287
+ Causal Inference with Incremental Observational Data. In
288
+ The 39th IEEE International Conference on Data Engineer-
289
+ ing.
290
+ Chu, Z.; Rathbun, S. L.; and Li, S. 2020. Matching in se-
291
+ lective and balanced representation space for treatment ef-
292
+ fects estimation. In Proceedings of the 29th ACM Interna-
293
+ tional Conference on Information & Knowledge Manage-
294
+ ment, 205–214.
295
+ Chu, Z.; Rathbun, S. L.; and Li, S. 2021. Graph infomax
296
+ adversarial learning for treatment effect estimation with net-
297
+ worked observational data. In Proceedings of the 27th ACM
298
+ SIGKDD Conference on Knowledge Discovery & Data Min-
299
+ ing, 176–184.
300
+ Chu, Z.; Rathbun, S. L.; and Li, S. 2022. Learning Info-
301
+ max and Domain-Independent Representations for Causal
302
+ Effect Inference with Real-World Data. In Proceedings of
303
+ the 2022 SIAM International Conference on Data Mining
304
+ (SDM), 433–441. SIAM.
305
+ Feder, A.; Keith, K. A.; Manzoor, E.; Pryzant, R.; Sridhar,
306
+ D.; Wood-Doughty, Z.; Eisenstein, J.; Grimmer, J.; Reichart,
307
+ R.; Roberts, M. E.; et al. 2022. Causal inference in natural
308
+ language processing: Estimation, prediction, interpretation
309
+ and beyond. Transactions of the Association for Computa-
310
+ tional Linguistics, 10: 1138–1158.
311
+ French, R. M. 1999. Catastrophic forgetting in connectionist
312
+ networks. Trends in cognitive sciences, 3(4): 128–135.
313
+ Imbens, G. W.; and Rubin, D. B. 2015. Causal inference
314
+ in statistics, social, and biomedical sciences.
315
+ Cambridge
316
+ University Press.
317
+ K¨unzel, S. R.; Sekhon, J. S.; Bickel, P. J.; and Yu, B.
318
+ 2019. Metalearners for estimating heterogeneous treatment
319
+ effects using machine learning. Proceedings of the national
320
+ academy of sciences, 116(10): 4156–4165.
321
+ Li, S.; and Fu, Y. 2017. Matching on balanced nonlinear
322
+ representations for treatment effects estimation. Advances
323
+ in Neural Information Processing Systems, 30.
324
+ Liu, B.; Wang, D.; Yang, X.; Zhou, Y.; Yao, R.; Shao, Z.; and
325
+ Zhao, J. 2022a. Show, Deconfound and Tell: Image Caption-
326
+ ing With Causal Inference. In Proceedings of the IEEE/CVF
327
+ Conference on Computer Vision and Pattern Recognition,
328
+ 18041–18050.
329
+
330
+ Liu, J.; Wei, W.; Chu, Z.; Gao, X.; Zhang, J.; Yan, T.; and
331
+ Kang, Y. 2022b. Incorporating Causal Analysis into Diversi-
332
+ fied and Logical Response Generation. In Proceedings of the
333
+ 29th International Conference on Computational Linguis-
334
+ tics. International Committee on Computational Linguistics.
335
+ Louizos, C.; Shalit, U.; Mooij, J. M.; Sontag, D.; Zemel, R.;
336
+ and Welling, M. 2017. Causal effect inference with deep
337
+ latent-variable models. In Advances in Neural Information
338
+ Processing Systems, 6446–6456.
339
+ Ma, J.; Wan, M.; Yang, L.; Li, J.; Hecht, B.; and Teevan, J.
340
+ 2022. Learning causal effects on hypergraphs. In Proceed-
341
+ ings of the 28th ACM SIGKDD Conference on Knowledge
342
+ Discovery and Data Mining, 1202–1212.
343
+ McCloskey, M.; and Cohen, N. J. 1989. Catastrophic inter-
344
+ ference in connectionist networks: The sequential learning
345
+ problem. In Psychology of learning and motivation, vol-
346
+ ume 24, 109–165. Elsevier.
347
+ Nie, X.; and Wager, S. 2021.
348
+ Quasi-oracle estimation of
349
+ heterogeneous treatment effects. Biometrika, 108(2): 299–
350
+ 319.
351
+ Rubin, D. B. 1974. Estimating causal effects of treatments
352
+ in randomized and nonrandomized studies. Journal of edu-
353
+ cational Psychology, 66(5): 688.
354
+ Samet, S.; Miri, A.; and Granger, E. 2013.
355
+ Incremental
356
+ learning of privacy-preserving Bayesian networks. Applied
357
+ Soft Computing, 13(8): 3657–3667.
358
+ Shalit, U.; Johansson, F. D.; and Sontag, D. 2017. Estimat-
359
+ ing individual treatment effect: generalization bounds and
360
+ algorithms. In International Conference on Machine Learn-
361
+ ing, 3076–3085. PMLR.
362
+ Splawa-Neyman, J.; Dabrowska, D. M.; and Speed, T. 1990.
363
+ On the application of probability theory to agricultural ex-
364
+ periments. Essay on principles. Section 9. Statistical Sci-
365
+ ence, 465–472.
366
+ Tang, K.; Niu, Y.; Huang, J.; Shi, J.; and Zhang, H. 2020.
367
+ Unbiased scene graph generation from biased training. In
368
+ Proceedings of the IEEE/CVF conference on computer vi-
369
+ sion and pattern recognition, 3716–3725.
370
+ Wager, S.; and Athey, S. 2018. Estimation and inference of
371
+ heterogeneous treatment effects using random forests. Jour-
372
+ nal of the American Statistical Association, 113(523): 1228–
373
+ 1242.
374
+ Yao, L.; Chu, Z.; Li, S.; Li, Y.; Gao, J.; and Zhang, A. 2021.
375
+ A survey on causal inference. ACM Transactions on Knowl-
376
+ edge Discovery from Data (TKDD), 15(5): 1–46.
377
+ Yao, L.; Li, S.; Li, Y.; Huai, M.; Gao, J.; and Zhang, A. 2018.
378
+ Representation learning for treatment effect estimation from
379
+ observational data. Advances in Neural Information Pro-
380
+ cessing Systems, 31.
381
+ Yoon, J.; Jordon, J.; and van der Schaar, M. 2018. GANITE:
382
+ Estimation of individualized treatment effects using genera-
383
+ tive adversarial nets. In International Conference on Learn-
384
+ ing Representations.
385
+ Zhang, J.; Zhang, J.; Ghosh, S.; Li, D.; Tasci, S.; Heck, L.;
386
+ Zhang, H.; and Kuo, C.-C. J. 2020. Class-incremental learn-
387
+ ing via deep model consolidation. In The IEEE Winter Con-
388
+ ference on Applications of Computer Vision, 1131–1140.
389
+
B9AzT4oBgHgl3EQfGPvM/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf,len=419
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
3
+ page_content='01026v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
4
+ page_content='LG] 3 Jan 2023 Continual Treatment Effect Estimation: Challenges and Opportunities Zhixuan Chu1, Sheng Li2 1Ant Group, Hangzhou, China 2University of Virginia, Charlottesville, USA chuzhixuan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
5
+ page_content='czx@alibaba-inc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
6
+ page_content='com, shengli@virginia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
7
+ page_content='edu Introduction A further understanding of cause and effect within obser- vational data is critical across many domains, such as eco- nomics, health care, public policy, web mining, online ad- vertising, and marketing campaigns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
8
+ page_content=' Although significant advances have been made to overcome the challenges in causal effect estimation with observational data, such as missing counterfactual outcomes and selection bias between treatment and control groups, the existing methods mainly focus on source-specific and stationary observational data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
9
+ page_content=' Such learning strategies assume that all observational data are already available during the training phase and from only one source.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
10
+ page_content=' Along with the fast-growing segments of industrial appli- cations, this assumption is unsubstantial in practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
11
+ page_content=' Taking Alipay as an example, which is one of the world’s largest mobile payment platforms and offers financial services to billion-scale users, a tremendous amount of data containing much privacy-related information is produced daily and col- lected from different sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
12
+ page_content=' In conclusion, the following two points are summed up.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
13
+ page_content=' The first one is based on the characteristics of observational data, which are incremen- tally available from non-stationary data distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
14
+ page_content=' For instance, the electronic financial records for one marketing campaign are growing every day and they may be collected from different cities or even other countries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
15
+ page_content=' This character- istic implies that one cannot have access to all observational data at a one-time point and from one single source.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
16
+ page_content=' The sec- ond reason is based on the realistic consideration of accessi- bility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
17
+ page_content=' For example, when new observational data are avail- able, if we want to refine the model previously trained by original data, maybe the original training data are no longer accessible due to a variety of reasons, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
18
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
19
+ page_content=', legacy data may be unrecorded, proprietary, the sensitivity of financial data, too large to store, or subject to privacy constraint of personal information (Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
20
+ page_content=' 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
21
+ page_content=' This practical concern of accessibility is ubiquitous in various academic and indus- trial applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
22
+ page_content=' That’s what it boiled down to: in the era of big data, we face new challenges in causal inference with observational data, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
23
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
24
+ page_content=', the extensibility for incrementally available observational data, the adaptability for extra do- Copyright © 2023, Association for the Advancement of Artificial Intelligence (www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
25
+ page_content='aaai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
26
+ page_content='org).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
27
+ page_content=' All rights reserved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
28
+ page_content=' main adaptation problem except for the imbalance between treatment and control groups, and the accessibility for an enormous amount of data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
29
+ page_content=' In this position paper, we formally define the problem of continual treatment effect estimation, describe its research challenges, and then present possible solutions to this prob- lem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
30
+ page_content=' Moreover, we will discuss future research directions on this topic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
31
+ page_content=' Related Work Instead of randomized controlled trials, observational data is obtained by the researcher simply observing the subjects without any interference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
32
+ page_content=' That means that the researchers have no control over the treatment assignments, and they just observe the subjects and record data based on their obser- vations (Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
33
+ page_content=' 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
34
+ page_content=' Therefore, from the observational data, directly estimating the treatment effect is challenging due to the missing counterfactual outcomes and the exis- tence of confounders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
35
+ page_content=' Recently, powerful machine learning methods such as tree-based methods (Athey and Imbens 2016;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
36
+ page_content=' Wager and Athey 2018), representation learning (Li and Fu 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
37
+ page_content=' Shalit, Johansson, and Sontag 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
38
+ page_content=' Yao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
39
+ page_content=' 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
40
+ page_content=' Chu, Rathbun, and Li 2022), meta-learning (K¨unzel et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
41
+ page_content=' 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
42
+ page_content=' Nie and Wager 2021), generative mod- els (Louizos et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
43
+ page_content=' 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
44
+ page_content=' Yoon, Jordon, and van der Schaar 2018) have achieved prominent progress in treatment effect estimation task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
45
+ page_content=' In addition, the combination of causal inference and other research fields also exhibits complementary strengths, such as computer vision (Tang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
46
+ page_content=' 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
47
+ page_content=' Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
48
+ page_content=' 2022a), graph learning (Ma et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
49
+ page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
50
+ page_content=' Chu, Rathbun, and Li 2021), natural language processing (Feder et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
51
+ page_content=' 2022;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
52
+ page_content=' Liu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
53
+ page_content=' 2022b), and so on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
54
+ page_content=' The involved causal analysis helps to im- prove the model’s capability of discovering and resolving the underlying system beyond the statistical relationships learned from observational data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
55
+ page_content=' Problem Definition Suppose that the observational data contain n units collected from d different domains and the d-th dataset Dd contains the data {(x, y, t)|x ∈ X, y ∈ Y, t ∈ T } collected from d-th domain, which contains nd units.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
56
+ page_content=' Let X denote all observed variables, Y denote the outcomes in the observational data, and T be a binary variable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
57
+ page_content=' Let D1:d = {D1, D2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
58
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
59
+ page_content=', Dd} be the set of combination of d datasets, separately collected from d different domains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
60
+ page_content=' For d datasets {D1, D2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
61
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
62
+ page_content=', Dd}, they have the commonly observed variables, but due to the fact that they are collected from different domains, they have different distributions with respect to X, Y , and T in each dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
63
+ page_content=' Each unit in the observational data received one of two treatments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
64
+ page_content=' Let ti denote the treatment assignment for unit i;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
65
+ page_content=' i = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
66
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
67
+ page_content=', n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
68
+ page_content=' For binary treatments, ti = 1 is for the treatment group and ti = 0 for the control group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
69
+ page_content=' The outcome for unit i is denoted by yi t when treatment t is ap- plied to unit i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
70
+ page_content=' For observational data, only one of the poten- tial outcomes is observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
71
+ page_content=' The observed outcome is called the factual outcome, and the remaining unobserved poten- tial outcomes are called counterfactual outcomes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
72
+ page_content=' This task can follow the potential outcome frame- work for estimating treatment effects (Rubin 1974;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
73
+ page_content=' Splawa-Neyman, Dabrowska, and Speed 1990).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
74
+ page_content=' The indi- vidual treatment effect (ITE) for unit i is the difference be- tween the potential treated and control outcomes and is de- fined as ITEi = yi 1 − yi 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
75
+ page_content=' (1) The average treatment effect (ATE) is the difference be- tween the mean potential treated and control outcomes, which is defined as ATE = 1 n n � i=1 (yi 1 − yi 0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
76
+ page_content=' (2) The success of the potential outcome framework is based on the following assumptions (Imbens and Rubin 2015), which ensure that the treatment effect can be identified.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
77
+ page_content=' Assumption 1 Stable Unit Treatment Value Assumption (SUTVA): The potential outcomes for any unit do not vary with the treatments assigned to other units, and, for each unit, there are no different forms or versions of each treat- ment level, which lead to different potential outcomes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
78
+ page_content=' Assumption 2 Consistency: The potential outcome of treat- ment t is equal to the observed outcome if the actual treat- ment received is t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
79
+ page_content=' Assumption 3 Positivity: For any value of x, treatment as- signment is not deterministic, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
80
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
81
+ page_content=',P(T = t|X = x) > 0, for all t and x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
82
+ page_content=' Assumption 4 Ignorability: Given covariates, treatment assignment is independent of the potential outcomes, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
83
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
84
+ page_content=', (y1, y0) ⊥⊥ t|x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
85
+ page_content=' Our goal is to develop a novel continual causal inference framework to estimate the causal effect for all available data, including new data Dd and the previous data D1:(d−1), with- out having access to previous data D1:(d−1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
86
+ page_content=' Research Challenges Existing causal effect inference methods, however, are un- able to deal with the aforementioned new challenges, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
87
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
88
+ page_content=', extensibility, adaptability, and accessibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
89
+ page_content=' Although it is possible to adapt existing causal inference methods to cater to these issues, these adjusted methods still have inevitable defects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
90
+ page_content=' Three straightforward adaptation strategies are de- scribed as follows: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
91
+ page_content=' If we directly apply the model previously trained based on original data to new observational data, the perfor- mance on new tasks will be very poor due to the domain shift issues among different data sources;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
92
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
93
+ page_content=' Suppose we utilize newly available data to re-train the previously learned model for adapting changes in the data distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
94
+ page_content=' In that case, old knowledge will be completely or partially overwritten by the new one, which can result in severe performance degradation on old tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
95
+ page_content=' This is the well-known catastrophic forgetting problem (McCloskey and Cohen 1989;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
96
+ page_content=' French 1999);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
97
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
98
+ page_content=' To overcome the catastrophic forgetting problem, we may rely on the storage of old data and combine the old and new data together, and then re-train the model from scratch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
99
+ page_content=' However, this strategy is memory inefficient and time-consuming, and it brings practical concerns such as copyright or privacy issues when storing data for a long time (Samet, Miri, and Granger 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
100
+ page_content=' Any of these three strategies, in combination with the exist- ing causal effect inference methods, is deficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
101
+ page_content=' Potential Solution To address the continual treatment effect estimation prob- lem, we propose a Continual Causal Effect Representation Learning framework (CERL) for estimating causal effect with incrementally available observational data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
102
+ page_content=' Instead of having access to all previous observational data, we only store a limited subset of feature representations learned from previous data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
103
+ page_content=' Combining selective and balanced represen- tation learning, feature representation distillation, and fea- ture transformation, our framework preserves the knowl- edge learned from previous data and updates the knowledge by leveraging new data so that it can achieve the continual causal effect estimation for incrementally new data without compromising the estimation capability for previous data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
104
+ page_content=' Framework Overview.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
105
+ page_content=' To estimate the incrementally available observational data, the framework of CERL is mainly composed of two components: (1) the baseline causal effect learning model is only for the first available observational data, and thus we don’t need to consider the domain shift issue among different data sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
106
+ page_content=' This com- ponent is equivalent to the traditional causal effect estima- tion problem;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
107
+ page_content=' (2) the continual causal effect learning model is for the sequentially available observational data, where we need to handle more complex issues, such as knowledge transfer, catastrophic forgetting, global representation bal- ance, and memory constraint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
108
+ page_content=' Baseline Causal Effect Learning Model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
109
+ page_content=' We first train the baseline causal effect learning model for the initial obser- vational dataset and then bring in subsequent datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
110
+ page_content=' The task on the initial dataset can be converted to a traditional causal effect estimation problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
111
+ page_content=' Owing to the success of deep learning for counterfactual inference, we propose to learn the selective and balanced feature representations (Shalit, Johansson, and Sontag 2017;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
112
+ page_content=' Chu, Rathbun, and Li 2020) for units in treatment and control groups and then in- fer the potential outcomes based on learned representation space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
113
+ page_content=' Sustainability of Model Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
114
+ page_content=' We have built the baseline model for causal effect estimation with observa- tional data from a single source.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
115
+ page_content=' To avoid catastrophic for- getting when learning new data, we propose to preserve a subset of lower-dimensional feature representations rather than all original covariates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
116
+ page_content=' We also can adjust the number of preserved feature representations according to the mem- ory constraint.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
117
+ page_content=' Continual Causal Effect Learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
118
+ page_content=' We have stored mem- ory and the baseline model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
119
+ page_content=' To continually estimate the causal effect for incrementally available observational data, we incorporate feature representation distillation and feature representation transformation (Chu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
120
+ page_content=' 2023) to estimate the causal effect for all seen data based on a balanced global feature representation space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
121
+ page_content=' Research Opportunities Although significant advances have been made to over- come the challenges in causal effect estimation from an aca- demic perspective, industrial applications based on obser- vational data are always more complicated and harder.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
122
+ page_content=' Un- like source-specific and stationary observational data, most real-world data are incrementally available and from non- stationary data distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
123
+ page_content=' Significantly, we also face the realistic consideration of accessibility.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
124
+ page_content=' This work is the first attempt to investigate the continual lifelong causal effect in- ference problem and propose the corresponding evaluation criteria.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
125
+ page_content=' However, constructing the comprehensive analyt- ical tools and the theoretical framework derived from this brand-new problem requires non-trivial efforts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
126
+ page_content=' Specifically, there are several potential directions for continual causal in- ference: In addition to the distribution shift of the covariates among different domains, there are other potential tech- nical issues for continual effect estimation: for example, perhaps we do not initially observe all the necessary con- founding variables and may get access to increasingly more confounders.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
127
+ page_content=' Compared with homogeneous treatment effects (the magnitude and direction of the treatment effect are the same for all patients, regardless of any other patient char- acteristics), heterogeneous causal effects could differ for different individuals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
128
+ page_content=' This could be another candidate to consider for the continual treatment effect estimation model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
129
+ page_content=' The basic assumptions for traditional causal effect esti- mation may not be completely applicable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
130
+ page_content=' New assump- tions may be supplemented, or previous assumptions need to be relaxed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
131
+ page_content=' There exists a natural connection with continual domain adaptation among different times or domains (“contin- ual” causal inference) and between treatment and control groups (continual “causal inference”).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
132
+ page_content=' Compared to traditional causal effect estimation tasks based on a small amount of medical data, the continual causal inference method will face big data computing or cloud computing due to its objective task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
133
+ page_content=' With the increasing public concern over privacy leakage in data, federated learning, which collaboratively trains the machine learning model without directly sharing the raw data among the data holders, may become a potential solution for continual causal inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
134
+ page_content=' References Athey, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
135
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
136
+ page_content=' and Imbens, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
137
+ page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
138
+ page_content=' Recursive partitioning for heterogeneous causal effects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
139
+ page_content=' Proceedings of the National Academy of Sciences, 113(27): 7353–7360.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
140
+ page_content=' Chu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
141
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
142
+ page_content=' Li, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
143
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
144
+ page_content=' Rathbun, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
145
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
146
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
147
+ page_content=' and Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
148
+ page_content=' 2023.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
149
+ page_content=' Continual Causal Inference with Incremental Observational Data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
150
+ page_content=' In The 39th IEEE International Conference on Data Engineer- ing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
151
+ page_content=' Chu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
152
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
153
+ page_content=' Rathbun, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
154
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
155
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
156
+ page_content=' and Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
157
+ page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
158
+ page_content=' Matching in se- lective and balanced representation space for treatment ef- fects estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
159
+ page_content=' In Proceedings of the 29th ACM Interna- tional Conference on Information & Knowledge Manage- ment, 205–214.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
160
+ page_content=' Chu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
161
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
162
+ page_content=' Rathbun, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
163
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
164
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
165
+ page_content=' and Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
166
+ page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
167
+ page_content=' Graph infomax adversarial learning for treatment effect estimation with net- worked observational data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
168
+ page_content=' In Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Min- ing, 176–184.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
169
+ page_content=' Chu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
170
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
171
+ page_content=' Rathbun, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
172
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
173
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
174
+ page_content=' and Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
175
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
176
+ page_content=' Learning Info- max and Domain-Independent Representations for Causal Effect Inference with Real-World Data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
177
+ page_content=' In Proceedings of the 2022 SIAM International Conference on Data Mining (SDM), 433–441.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
178
+ page_content=' SIAM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
179
+ page_content=' Feder, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
180
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
181
+ page_content=' Keith, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
182
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
183
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
184
+ page_content=' Manzoor, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
185
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
186
+ page_content=' Pryzant, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
187
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
188
+ page_content=' Sridhar, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
189
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
190
+ page_content=' Wood-Doughty, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
191
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
192
+ page_content=' Eisenstein, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
193
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
194
+ page_content=' Grimmer, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
195
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
196
+ page_content=' Reichart, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
197
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
198
+ page_content=' Roberts, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
199
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
200
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
201
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
202
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
203
+ page_content=' Causal inference in natural language processing: Estimation, prediction, interpretation and beyond.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
204
+ page_content=' Transactions of the Association for Computa- tional Linguistics, 10: 1138–1158.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
205
+ page_content=' French, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
206
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
207
+ page_content=' 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
208
+ page_content=' Catastrophic forgetting in connectionist networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
209
+ page_content=' Trends in cognitive sciences, 3(4): 128–135.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
210
+ page_content=' Imbens, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
211
+ page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
212
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
213
+ page_content=' and Rubin, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
214
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
215
+ page_content=' 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
216
+ page_content=' Causal inference in statistics, social, and biomedical sciences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
217
+ page_content=' Cambridge University Press.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
218
+ page_content=' K¨unzel, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
219
+ page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
220
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
221
+ page_content=' Sekhon, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
222
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
223
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
224
+ page_content=' Bickel, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
225
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
226
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
227
+ page_content=' and Yu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
228
+ page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
229
+ page_content=' Metalearners for estimating heterogeneous treatment effects using machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
230
+ page_content=' Proceedings of the national academy of sciences, 116(10): 4156–4165.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
231
+ page_content=' Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
232
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
233
+ page_content=' and Fu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
234
+ page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
235
+ page_content=' Matching on balanced nonlinear representations for treatment effects estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
236
+ page_content=' Advances in Neural Information Processing Systems, 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
237
+ page_content=' Liu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
238
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
239
+ page_content=' Wang, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
240
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
241
+ page_content=' Yang, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
242
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
243
+ page_content=' Zhou, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
244
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
245
+ page_content=' Yao, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
246
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
247
+ page_content=' Shao, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
248
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
249
+ page_content=' and Zhao, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
250
+ page_content=' 2022a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
251
+ page_content=' Show, Deconfound and Tell: Image Caption- ing With Causal Inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
252
+ page_content=' In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 18041–18050.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
253
+ page_content=' Liu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
254
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
255
+ page_content=' Wei, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
256
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
257
+ page_content=' Chu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
258
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
259
+ page_content=' Gao, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
260
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
261
+ page_content=' Zhang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
262
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
263
+ page_content=' Yan, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
264
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
265
+ page_content=' and Kang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
266
+ page_content=' 2022b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
267
+ page_content=' Incorporating Causal Analysis into Diversi- fied and Logical Response Generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
268
+ page_content=' In Proceedings of the 29th International Conference on Computational Linguis- tics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
269
+ page_content=' International Committee on Computational Linguistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
270
+ page_content=' Louizos, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
271
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
272
+ page_content=' Shalit, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
273
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
274
+ page_content=' Mooij, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
275
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
276
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
277
+ page_content=' Sontag, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
278
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
279
+ page_content=' Zemel, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
280
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
281
+ page_content=' and Welling, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
282
+ page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
283
+ page_content=' Causal effect inference with deep latent-variable models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
284
+ page_content=' In Advances in Neural Information Processing Systems, 6446–6456.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
285
+ page_content=' Ma, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
286
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
287
+ page_content=' Wan, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
288
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
289
+ page_content=' Yang, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
290
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
291
+ page_content=' Li, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
292
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
293
+ page_content=' Hecht, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
294
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
295
+ page_content=' and Teevan, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
296
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
297
+ page_content=' Learning causal effects on hypergraphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
298
+ page_content=' In Proceed- ings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 1202–1212.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
299
+ page_content=' McCloskey, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
300
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
301
+ page_content=' and Cohen, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
302
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
303
+ page_content=' 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
304
+ page_content=' Catastrophic inter- ference in connectionist networks: The sequential learning problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
305
+ page_content=' In Psychology of learning and motivation, vol- ume 24, 109–165.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
306
+ page_content=' Elsevier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
307
+ page_content=' Nie, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
308
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
309
+ page_content=' and Wager, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
310
+ page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
311
+ page_content=' Quasi-oracle estimation of heterogeneous treatment effects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
312
+ page_content=' Biometrika, 108(2): 299– 319.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
313
+ page_content=' Rubin, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
314
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
315
+ page_content=' 1974.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
316
+ page_content=' Estimating causal effects of treatments in randomized and nonrandomized studies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
317
+ page_content=' Journal of edu- cational Psychology, 66(5): 688.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
318
+ page_content=' Samet, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
319
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
320
+ page_content=' Miri, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
321
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
322
+ page_content=' and Granger, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
323
+ page_content=' 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
324
+ page_content=' Incremental learning of privacy-preserving Bayesian networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
325
+ page_content=' Applied Soft Computing, 13(8): 3657–3667.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
326
+ page_content=' Shalit, U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
327
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
328
+ page_content=' Johansson, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
329
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
330
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
331
+ page_content=' and Sontag, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
332
+ page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
333
+ page_content=' Estimat- ing individual treatment effect: generalization bounds and algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
334
+ page_content=' In International Conference on Machine Learn- ing, 3076–3085.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
335
+ page_content=' PMLR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
336
+ page_content=' Splawa-Neyman, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
337
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
338
+ page_content=' Dabrowska, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
339
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
340
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
341
+ page_content=' and Speed, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
342
+ page_content=' 1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
343
+ page_content=' On the application of probability theory to agricultural ex- periments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
344
+ page_content=' Essay on principles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
345
+ page_content=' Section 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
346
+ page_content=' Statistical Sci- ence, 465–472.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
347
+ page_content=' Tang, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
348
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
349
+ page_content=' Niu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
350
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
351
+ page_content=' Huang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
352
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
353
+ page_content=' Shi, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
354
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
355
+ page_content=' and Zhang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
356
+ page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
357
+ page_content=' Unbiased scene graph generation from biased training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
358
+ page_content=' In Proceedings of the IEEE/CVF conference on computer vi- sion and pattern recognition, 3716–3725.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
359
+ page_content=' Wager, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
360
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
361
+ page_content=' and Athey, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
362
+ page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
363
+ page_content=' Estimation and inference of heterogeneous treatment effects using random forests.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
364
+ page_content=' Jour- nal of the American Statistical Association, 113(523): 1228– 1242.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
365
+ page_content=' Yao, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
366
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
367
+ page_content=' Chu, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
368
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
369
+ page_content=' Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
370
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
371
+ page_content=' Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
372
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
373
+ page_content=' Gao, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
374
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
375
+ page_content=' and Zhang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
376
+ page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
377
+ page_content=' A survey on causal inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
378
+ page_content=' ACM Transactions on Knowl- edge Discovery from Data (TKDD), 15(5): 1–46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
379
+ page_content=' Yao, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
380
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
381
+ page_content=' Li, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
382
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
383
+ page_content=' Li, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
384
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
385
+ page_content=' Huai, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
386
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
387
+ page_content=' Gao, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
388
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
389
+ page_content=' and Zhang, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
390
+ page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
391
+ page_content=' Representation learning for treatment effect estimation from observational data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
392
+ page_content=' Advances in Neural Information Pro- cessing Systems, 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
393
+ page_content=' Yoon, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
394
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
395
+ page_content=' Jordon, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
396
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
397
+ page_content=' and van der Schaar, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
398
+ page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
399
+ page_content=' GANITE: Estimation of individualized treatment effects using genera- tive adversarial nets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
400
+ page_content=' In International Conference on Learn- ing Representations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
401
+ page_content=' Zhang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
402
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
403
+ page_content=' Zhang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
404
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
405
+ page_content=' Ghosh, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
406
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
407
+ page_content=' Li, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
408
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
409
+ page_content=' Tasci, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
410
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
411
+ page_content=' Heck, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
412
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
413
+ page_content=' Zhang, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
414
+ page_content=';' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
415
+ page_content=' and Kuo, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
416
+ page_content='-C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
417
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
418
+ page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
419
+ page_content=' Class-incremental learn- ing via deep model consolidation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
420
+ page_content=' In The IEEE Winter Con- ference on Applications of Computer Vision, 1131–1140.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/B9AzT4oBgHgl3EQfGPvM/content/2301.01026v1.pdf'}
BNE2T4oBgHgl3EQfRgdU/content/2301.03781v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1220b70ad537ec8defd19e887407f52a6ffc30c8577a45f0663ad5914a71aba4
3
+ size 480268
BNE2T4oBgHgl3EQfRgdU/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98987d7a075f92d8e05015b6396fc55f4c6156c265e6014730d6b9ddb7e3be13
3
+ size 3342381
BNE2T4oBgHgl3EQfRgdU/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f70fbaf7f96944b6eff90d14413a156ab7fdbb69eb6b5726b706b6bfb73765a8
3
+ size 120578
C9E5T4oBgHgl3EQfUA9Q/content/2301.05540v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ecec16d3a2646c9d610d00fa2d6a216c7a51380939bca04fb14eac7804d6db5
3
+ size 379527
C9E5T4oBgHgl3EQfUA9Q/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46723147ce724afc971c42363587bd1bf8154495599911a91f10ab77cb774f4b
3
+ size 189765
GdE4T4oBgHgl3EQfHgxG/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b076287d4b44ee752a3c31dc02441660f1b9db26e76ac4fe494134b5cfa3042
3
+ size 3014701